Merge "Extend forkAndSpecialize to take additional inputs and to set the SELinux security context."
diff --git a/dx/src/com/android/dx/command/dexer/Main.java b/dx/src/com/android/dx/command/dexer/Main.java
index d127550..80ddbd0 100644
--- a/dx/src/com/android/dx/command/dexer/Main.java
+++ b/dx/src/com/android/dx/command/dexer/Main.java
@@ -186,6 +186,9 @@
         // Reset the error/warning count to start fresh.
         warnings = 0;
         errors = 0;
+        // empty the list, so that  tools that load dx and keep it around
+        // for multiple runs don't reuse older buffers.
+        libraryDexBuffers.clear();
 
         args = arguments;
         args.makeOptionsObjects();
@@ -297,6 +300,7 @@
             DexBuffer ab = new DexMerger(a, b, CollisionPolicy.FAIL).merge();
             outArray = ab.getBytes();
         }
+
         return outArray;
     }
 
diff --git a/dx/src/com/android/dx/io/DexBuffer.java b/dx/src/com/android/dx/io/DexBuffer.java
index d10b08c..e6f908b 100644
--- a/dx/src/com/android/dx/io/DexBuffer.java
+++ b/dx/src/com/android/dx/io/DexBuffer.java
@@ -304,10 +304,11 @@
         private final String name;
         private int position;
         private final int limit;
+        private final int initialPosition;
 
         private Section(String name, int position, int limit) {
             this.name = name;
-            this.position = position;
+            this.position = this.initialPosition = position;
             this.limit = limit;
         }
 
@@ -361,6 +362,10 @@
             return Leb128Utils.readUnsignedLeb128(this);
         }
 
+        public int readUleb128p1() {
+            return Leb128Utils.readUnsignedLeb128(this) - 1;
+        }
+
         public int readSleb128() {
             return Leb128Utils.readSignedLeb128(this);
         }
@@ -610,6 +615,10 @@
             }
         }
 
+        public void writeUleb128p1(int i) {
+            writeUleb128(i + 1);
+        }
+
         public void writeSleb128(int i) {
             try {
                 Leb128Utils.writeSignedLeb128(this, i);
@@ -645,5 +654,12 @@
         public int remaining() {
             return limit - position;
         }
+
+        /**
+         * Returns the number of bytes used by this section.
+         */
+        public int used () {
+            return position - initialPosition;
+        }
     }
 }
diff --git a/dx/src/com/android/dx/merge/DexMerger.java b/dx/src/com/android/dx/merge/DexMerger.java
index b807117..8078f64 100644
--- a/dx/src/com/android/dx/merge/DexMerger.java
+++ b/dx/src/com/android/dx/merge/DexMerger.java
@@ -190,8 +190,7 @@
          * result in too many bytes wasted, compact the result. To compact,
          * simply merge the result with itself.
          */
-        WriterSizes compactedSizes = writerSizes.clone();
-        compactedSizes.minusWaste(this);
+        WriterSizes compactedSizes = new WriterSizes(this);
         int wastedByteCount = writerSizes.size() - compactedSizes.size();
         if (wastedByteCount >  + compactWasteThreshold) {
             DexMerger compacter = new DexMerger(
@@ -787,9 +786,13 @@
         Code.Try[] tries = code.getTries();
         codeOut.writeUnsignedShort(tries.length);
 
-        // TODO: retain debug info
-        // code.getDebugInfoOffset();
-        codeOut.writeInt(0);
+        int debugInfoOffset = code.getDebugInfoOffset();
+        if (debugInfoOffset != 0) {
+            codeOut.writeInt(debugInfoOut.getPosition());
+            transformDebugInfoItem(in.open(debugInfoOffset), indexMap);
+        } else {
+            codeOut.writeInt(0);
+        }
 
         short[] instructions = code.getInstructions();
         InstructionTransformer transformer = (in == dexA)
@@ -816,6 +819,87 @@
         }
     }
 
+    private static final byte DBG_END_SEQUENCE = 0x00;
+    private static final byte DBG_ADVANCE_PC = 0x01;
+    private static final byte DBG_ADVANCE_LINE = 0x02;
+    private static final byte DBG_START_LOCAL = 0x03;
+    private static final byte DBG_START_LOCAL_EXTENDED = 0x04;
+    private static final byte DBG_END_LOCAL = 0x05;
+    private static final byte DBG_RESTART_LOCAL = 0x06;
+    private static final byte DBG_SET_PROLOGUE_END = 0x07;
+    private static final byte DBG_SET_EPILOGUE_BEGIN = 0x08;
+    private static final byte DBG_SET_FILE = 0x09;
+
+    private void transformDebugInfoItem(DexBuffer.Section in, IndexMap indexMap) {
+        int lineStart = in.readUleb128();
+        debugInfoOut.writeUleb128(lineStart);
+
+        int parametersSize = in.readUleb128();
+        debugInfoOut.writeUleb128(parametersSize);
+
+        for (int p = 0; p < parametersSize; p++) {
+            int parameterName = in.readUleb128p1();
+            debugInfoOut.writeUleb128p1(indexMap.adjustString(parameterName));
+        }
+
+        int addrDiff;    // uleb128   address delta.
+        int lineDiff;    // sleb128   line delta.
+        int registerNum; // uleb128   register number.
+        int nameIndex;   // uleb128p1 string index.    Needs indexMap adjustment.
+        int typeIndex;   // uleb128p1 type index.      Needs indexMap adjustment.
+        int sigIndex;    // uleb128p1 string index.    Needs indexMap adjustment.
+
+        while (true) {
+            int opcode = in.readByte();
+            debugInfoOut.writeByte(opcode);
+
+            switch (opcode) {
+            case DBG_END_SEQUENCE:
+                return;
+
+            case DBG_ADVANCE_PC:
+                addrDiff = in.readUleb128();
+                debugInfoOut.writeUleb128(addrDiff);
+                break;
+
+            case DBG_ADVANCE_LINE:
+                lineDiff = in.readSleb128();
+                debugInfoOut.writeSleb128(lineDiff);
+                break;
+
+            case DBG_START_LOCAL:
+            case DBG_START_LOCAL_EXTENDED:
+                registerNum = in.readUleb128();
+                debugInfoOut.writeUleb128(registerNum);
+                nameIndex = in.readUleb128p1();
+                debugInfoOut.writeUleb128p1(indexMap.adjustString(nameIndex));
+                typeIndex = in.readUleb128p1();
+                debugInfoOut.writeUleb128p1(indexMap.adjustType(typeIndex));
+                if (opcode == DBG_START_LOCAL_EXTENDED) {
+                    sigIndex = in.readUleb128p1();
+                    debugInfoOut.writeUleb128p1(indexMap.adjustString(sigIndex));
+                }
+                break;
+
+            case DBG_END_LOCAL:
+            case DBG_RESTART_LOCAL:
+                registerNum = in.readUleb128();
+                debugInfoOut.writeUleb128(registerNum);
+                break;
+
+            case DBG_SET_FILE:
+                nameIndex = in.readUleb128p1();
+                debugInfoOut.writeUleb128p1(indexMap.adjustString(nameIndex));
+                break;
+
+            case DBG_SET_PROLOGUE_END:
+            case DBG_SET_EPILOGUE_BEGIN:
+            default:
+                break;
+            }
+        }
+    }
+
     private void transformEncodedCatchHandler(Code.CatchHandler catchHandler, IndexMap indexMap) {
         int catchAllAddress = catchHandler.getCatchAllAddress();
         int[] typeIndexes = catchHandler.getTypeIndexes();
@@ -853,7 +937,7 @@
      * <li>By exactly measuring an existing dex.
      * </ul>
      */
-    private static class WriterSizes implements Cloneable {
+    private static class WriterSizes {
         private int header = SizeOf.HEADER_ITEM;
         private int idsDefs;
         private int mapList;
@@ -876,12 +960,20 @@
             plus(b.getTableOfContents(), false);
         }
 
-        @Override public WriterSizes clone() {
-            try {
-                return (WriterSizes) super.clone();
-            } catch (CloneNotSupportedException e) {
-                throw new AssertionError();
-            }
+        public WriterSizes(DexMerger dexMerger) {
+            header = dexMerger.headerOut.used();
+            idsDefs = dexMerger.idsDefsOut.used();
+            mapList = dexMerger.mapListOut.used();
+            typeList = dexMerger.typeListOut.used();
+            classData = dexMerger.classDataOut.used();
+            code = dexMerger.codeOut.used();
+            stringData = dexMerger.stringDataOut.used();
+            debugInfo = dexMerger.debugInfoOut.used();
+            encodedArray = dexMerger.encodedArrayOut.used();
+            annotationsDirectory = dexMerger.annotationsDirectoryOut.used();
+            annotationsSet = dexMerger.annotationSetOut.used();
+            annotationsSetRefList = dexMerger.annotationSetRefListOut.used();
+            annotation = dexMerger.annotationOut.used();
         }
 
         public void plus(TableOfContents contents, boolean exact) {
@@ -894,7 +986,6 @@
             mapList = SizeOf.UINT + (contents.sections.length * SizeOf.MAP_ITEM);
             typeList += contents.typeLists.byteCount;
             stringData += contents.stringDatas.byteCount;
-            debugInfo += contents.debugInfos.byteCount;
             annotationsDirectory += contents.annotationsDirectories.byteCount;
             annotationsSet += contents.annotationSets.byteCount;
             annotationsSetRefList += contents.annotationSetRefLists.byteCount;
@@ -904,6 +995,7 @@
                 classData += contents.classDatas.byteCount;
                 encodedArray += contents.encodedArrays.byteCount;
                 annotation += contents.annotations.byteCount;
+                debugInfo += contents.debugInfos.byteCount;
             } else {
                 // at most 1/4 of the bytes in a code section are uleb/sleb
                 code += (int) Math.ceil(contents.codes.byteCount * 1.25);
@@ -913,25 +1005,11 @@
                 encodedArray += contents.encodedArrays.byteCount * 2;
                 // at most 1/3 of the bytes in an encoding arrays section are uleb/sleb
                 annotation += (int) Math.ceil(contents.annotations.byteCount * 1.34);
+                // all of the bytes in a debug info section may be uleb/sleb
+                debugInfo += contents.debugInfos.byteCount * 2;
             }
         }
 
-        public void minusWaste(DexMerger dexMerger) {
-            header -= dexMerger.headerOut.remaining();
-            idsDefs -= dexMerger.idsDefsOut.remaining();
-            mapList -= dexMerger.mapListOut.remaining();
-            typeList -= dexMerger.typeListOut.remaining();
-            classData -= dexMerger.classDataOut.remaining();
-            code -= dexMerger.codeOut.remaining();
-            stringData -= dexMerger.stringDataOut.remaining();
-            debugInfo -= dexMerger.debugInfoOut.remaining();
-            encodedArray -= dexMerger.encodedArrayOut.remaining();
-            annotationsDirectory -= dexMerger.annotationsDirectoryOut.remaining();
-            annotationsSet -= dexMerger.annotationSetOut.remaining();
-            annotationsSetRefList -= dexMerger.annotationSetRefListOut.remaining();
-            annotation -= dexMerger.annotationOut.remaining();
-        }
-
         public int size() {
             return header + idsDefs + mapList + typeList + classData + code + stringData + debugInfo
                     + encodedArray + annotationsDirectory + annotationsSet + annotationsSetRefList
diff --git a/libdex/SysUtil.h b/libdex/SysUtil.h
index 100c312..90a63ca 100644
--- a/libdex/SysUtil.h
+++ b/libdex/SysUtil.h
@@ -31,7 +31,11 @@
  *
  * Must be a power of 2.
  */
+#ifdef PAGE_SHIFT
+#define SYSTEM_PAGE_SIZE        (1<<PAGE_SHIFT)
+#else
 #define SYSTEM_PAGE_SIZE        4096
+#endif
 
 /*
  * Use this to keep track of mapped segments.
diff --git a/tests/029-assert/src/Main.java b/tests/029-assert/src/Main.java
index 1e5cc7c..3b1f8da 100644
--- a/tests/029-assert/src/Main.java
+++ b/tests/029-assert/src/Main.java
@@ -12,5 +12,8 @@
         } catch (AssertionError ae) {
             System.out.println("caught expected assert exception");
         }
+
+        // exercise this code path
+        ClassLoader.getSystemClassLoader().setDefaultAssertionStatus(true);
     }
 }
diff --git a/tests/030-bad-finalizer/src/Main.java b/tests/030-bad-finalizer/src/Main.java
index c063476..db80a87 100644
--- a/tests/030-bad-finalizer/src/Main.java
+++ b/tests/030-bad-finalizer/src/Main.java
@@ -14,7 +14,7 @@
         System.gc();
 
         for (int i = 0; i < 8; i++) {
-            BadFinalizer.snooze(5000);
+            BadFinalizer.snooze(4000);
             System.out.println("Requesting another GC.");
             System.gc();
         }
diff --git a/tests/031-class-attributes/expected.txt b/tests/031-class-attributes/expected.txt
index 47eaeee..afa3416 100644
--- a/tests/031-class-attributes/expected.txt
+++ b/tests/031-class-attributes/expected.txt
@@ -4,6 +4,7 @@
   simple: ClassAttrs
   genericSignature: null
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: null
   enclosing: null
   enclosingCon: null
@@ -21,12 +22,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class OtherClass:
   name: OtherClass
   canonical: OtherClass
   simple: OtherClass
   genericSignature: null
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: null
   enclosing: null
   enclosingCon: null
@@ -44,12 +48,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class otherpackage.OtherPackageClass:
   name: otherpackage.OtherPackageClass
   canonical: otherpackage.OtherPackageClass
   simple: OtherPackageClass
   genericSignature: null
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: null
   enclosing: null
   enclosingCon: null
@@ -67,12 +74,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class ClassAttrs$1InnerNamed:
   name: ClassAttrs$1InnerNamed
   canonical: null
   simple: InnerNamed
   genericSignature: null
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: null
   enclosing: class ClassAttrs
   enclosingCon: null
@@ -90,12 +100,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class ClassAttrs$1ConsInnerNamed:
   name: ClassAttrs$1ConsInnerNamed
   canonical: null
   simple: ConsInnerNamed
   genericSignature: null
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: null
   enclosing: class ClassAttrs
   enclosingCon: ClassAttrs()
@@ -113,12 +126,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class ClassAttrs$1:
   name: ClassAttrs$1
   canonical: null
   simple: 
   genericSignature: null
   super: class OtherClass
+  genericSuperclass: class OtherClass
   declaring: null
   enclosing: class ClassAttrs
   enclosingCon: null
@@ -136,12 +152,15 @@
   isMemberClass: false
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [0]
 ***** class ClassAttrs$MemberClass:
   name: ClassAttrs$MemberClass
   canonical: ClassAttrs.MemberClass
   simple: MemberClass
   genericSignature: <XYZ:Ljava/lang/Object;>Ljava/lang/Object;
   super: class java.lang.Object
+  genericSuperclass: class java.lang.Object
   declaring: class ClassAttrs
   enclosing: class ClassAttrs
   enclosingCon: null
@@ -159,6 +178,34 @@
   isMemberClass: true
   isPrimitive: false
   isSynthetic: false
+  genericInterfaces: [0]
+  typeParameters: [1] XYZ
+***** class FancyClass:
+  name: FancyClass
+  canonical: FancyClass
+  simple: FancyClass
+  genericSignature: <K:Ljava/lang/Object;V:Ljava/lang/Object;>Ljava/util/HashMap<TK;TV;>;Ljava/util/Map<TK;TV;>;
+  super: class java.util.HashMap
+  genericSuperclass: java.util.HashMap<K, V>
+  declaring: null
+  enclosing: null
+  enclosingCon: null
+  enclosingMeth: null
+  modifiers: 1
+  package: null
+  declaredClasses: [0]
+  member classes: [2] class java.util.AbstractMap$SimpleEntry, class java.util.AbstractMap$SimpleImmutableEntry
+  isAnnotation: false
+  isAnonymous: false
+  isArray: false
+  isEnum: false
+  isInterface: false
+  isLocalClass: false
+  isMemberClass: false
+  isPrimitive: false
+  isSynthetic: false
+  genericInterfaces: [1] java.util.Map<K, V>
+  typeParameters: [2] K, V
 constructor signature: (LClassAttrs$MemberClass<TXYZ;>;)V
 method signature: ()Ljava/lang/Class<TXYZ;>;
 field signature: LClassAttrs$MemberClass<TXYZ;>;
diff --git a/tests/031-class-attributes/src/ClassAttrs.java b/tests/031-class-attributes/src/ClassAttrs.java
index c1407bd..d93a925 100644
--- a/tests/031-class-attributes/src/ClassAttrs.java
+++ b/tests/031-class-attributes/src/ClassAttrs.java
@@ -6,6 +6,7 @@
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Type;
+import java.lang.reflect.TypeVariable;
 
 public class ClassAttrs {
     ClassAttrs() {
@@ -42,6 +43,9 @@
         /* member, not anonymous, not local */
         printClassAttrs(MemberClass.class);
 
+        /* fancy */
+        printClassAttrs(FancyClass.class);
+
         try {
             Constructor cons;
             cons = MemberClass.class.getConstructor(
@@ -82,8 +86,8 @@
             }
             method.setAccessible(true);
         } catch (NoSuchMethodException ex) {
-            System.err.println("getSignatureAttribute() not defined.");
-            ex.printStackTrace();
+            //System.err.println("getSignatureAttribute() not defined.");
+            //ex.printStackTrace();
             return "<unknown>";
         }
 
@@ -118,7 +122,6 @@
      * Dump a variety of class attributes.
      */
     public static void printClassAttrs(Class clazz) {
-        final boolean WORKING = false;
         Class clazz2;
 
         System.out.println("***** " + clazz + ":");
@@ -134,7 +137,7 @@
 
         System.out.println("  super: "
             + clazz.getSuperclass());
-        if (WORKING) System.out.println("  genericSuperclass: "
+        System.out.println("  genericSuperclass: "
             + clazz.getGenericSuperclass());
         System.out.println("  declaring: "
             + clazz.getDeclaringClass());
@@ -173,8 +176,12 @@
         System.out.println("  isSynthetic: "
             + clazz.isSynthetic());
 
-        if (WORKING) System.out.println("  genericInterfaces: "
+        System.out.println("  genericInterfaces: "
             + stringifyTypeArray(clazz.getGenericInterfaces()));
+
+        TypeVariable<Class<?>>[] typeParameters = clazz.getTypeParameters();
+        System.out.println("  typeParameters: "
+            + stringifyTypeArray(typeParameters));
     }
 
     /*
diff --git a/tests/031-class-attributes/src/FancyClass.java b/tests/031-class-attributes/src/FancyClass.java
new file mode 100644
index 0000000..a58b6a6
--- /dev/null
+++ b/tests/031-class-attributes/src/FancyClass.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class FancyClass<K,V> extends HashMap<K,V> implements Map<K,V> {
+}
+
diff --git a/tests/033-class-init-deadlock/expected.txt b/tests/033-class-init-deadlock/expected.txt
index 387a426..182d0da 100644
--- a/tests/033-class-init-deadlock/expected.txt
+++ b/tests/033-class-init-deadlock/expected.txt
@@ -1,7 +1,7 @@
 Deadlock test starting.
 A initializing...
 B initializing...
-Deadlock test interupting threads.
+Deadlock test interrupting threads.
 Deadlock test main thread bailing.
 A initialized: false
 B initialized: false
diff --git a/tests/033-class-init-deadlock/src/Main.java b/tests/033-class-init-deadlock/src/Main.java
index 27c4922..1e3f897 100644
--- a/tests/033-class-init-deadlock/src/Main.java
+++ b/tests/033-class-init-deadlock/src/Main.java
@@ -16,11 +16,13 @@
         thread1 = new Thread() { public void run() { new A(); } };
         thread2 = new Thread() { public void run() { new B(); } };
         thread1.start();
+        // Give thread1 a chance to start before starting thread2.
+        try { Thread.sleep(1000); } catch (InterruptedException ie) { }
         thread2.start();
 
         try { Thread.sleep(6000); } catch (InterruptedException ie) { }
 
-        System.out.println("Deadlock test interupting threads.");
+        System.out.println("Deadlock test interrupting threads.");
         thread1.interrupt();
         thread2.interrupt();
         System.out.println("Deadlock test main thread bailing.");
diff --git a/tests/034-call-null/expected.txt b/tests/034-call-null/expected.txt
index 5ffbe05..19f86f4 100644
--- a/tests/034-call-null/expected.txt
+++ b/tests/034-call-null/expected.txt
@@ -1,3 +1 @@
-java.lang.NullPointerException
-	at Main.main(Main.java:12)
-	at dalvik.system.NativeStart.main(Native Method)
+done
diff --git a/tests/034-call-null/src/Main.java b/tests/034-call-null/src/Main.java
index a0a129e..8fe88bd 100644
--- a/tests/034-call-null/src/Main.java
+++ b/tests/034-call-null/src/Main.java
@@ -9,6 +9,11 @@
 
     public static void main(String[] args) {
         Main instance = null;
-        instance.doStuff();
+        try {
+            instance.doStuff();
+            throw new RuntimeException("fail");
+        } catch (NullPointerException npe) { }
+
+        System.out.println("done");
     }
 }
diff --git a/tests/038-inner-null/expected.txt b/tests/038-inner-null/expected.txt
index 0be8ffd..29c1e42 100644
--- a/tests/038-inner-null/expected.txt
+++ b/tests/038-inner-null/expected.txt
@@ -1,5 +1,2 @@
 new Special()
-java.lang.NullPointerException
-	at Main$Special.callInner(Main.java:17)
-	at Main.main(Main.java:6)
-	at dalvik.system.NativeStart.main(Native Method)
+done
diff --git a/tests/038-inner-null/src/Main.java b/tests/038-inner-null/src/Main.java
index acc8764..1239248 100644
--- a/tests/038-inner-null/src/Main.java
+++ b/tests/038-inner-null/src/Main.java
@@ -4,6 +4,7 @@
     public static void main(String[] args) {
         Special special = new Special();
         special.callInner();
+        System.out.println("done");
     }
 
     public static class Special {
@@ -14,7 +15,10 @@
         }
 
         public void callInner() {
-            mBlort.repaint();
+            try {
+                mBlort.repaint();
+                throw new RuntimeException("fail");
+            } catch (NullPointerException npe) {}
         }
     }
 
diff --git a/tests/042-new-instance/expected.txt b/tests/042-new-instance/expected.txt
index 53447db..bb1b80c 100644
--- a/tests/042-new-instance/expected.txt
+++ b/tests/042-new-instance/expected.txt
@@ -6,3 +6,4 @@
 Cons LocalClass2 succeeded
 Cons got expected PackageAccess complaint
 Cons got expected InstantationException
+Cons got expected PackageAccess2 complaint
diff --git a/tests/042-new-instance/src/Main.java b/tests/042-new-instance/src/Main.java
index 8faef13..e43c5a5 100644
--- a/tests/042-new-instance/src/Main.java
+++ b/tests/042-new-instance/src/Main.java
@@ -16,12 +16,12 @@
 
 import java.lang.reflect.Constructor;
 
-import java.lang.reflect.Constructor;
-
 /**
  * Test instance creation.
  */
 public class Main {
+    private static boolean FULL_ACCESS_CHECKS = false;  // b/5861201
+
     public static void main(String[] args) {
         testClassNewInstance();
         testConstructorNewInstance();
@@ -98,6 +98,7 @@
             Constructor cons = c.getConstructor(new Class[0] /*(Class[])null*/);
             System.err.println("ERROR: Cons PackageAccess succeeded unexpectedly");
         } catch (NoSuchMethodException nsme) {
+            // constructor isn't public
             System.out.println("Cons got expected PackageAccess complaint");
         } catch (Exception ex) {
             System.err.println("Cons got unexpected PackageAccess failure");
@@ -117,6 +118,22 @@
             System.err.println("Cons got unexpected MaybeAbstract failure");
             ex.printStackTrace();
         }
+
+        // should fail
+        try {
+            Class c = Class.forName("otherpackage.PackageAccess2");
+            Constructor cons = c.getConstructor((Class[]) null);
+            if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
+            Object obj = cons.newInstance();
+            System.err.println("ERROR: Cons PackageAccess2 succeeded unexpectedly");
+        } catch (IllegalAccessException iae) {
+            // constructor is public, but class has package scope
+            System.out.println("Cons got expected PackageAccess2 complaint");
+        } catch (Exception ex) {
+            System.err.println("Cons got unexpected PackageAccess2 failure");
+            ex.printStackTrace();
+        }
+
     }
 }
 
diff --git a/tests/042-new-instance/src/otherpackage/PackageAccess.java b/tests/042-new-instance/src/otherpackage/PackageAccess.java
index 0749d67..f4541f2 100644
--- a/tests/042-new-instance/src/otherpackage/PackageAccess.java
+++ b/tests/042-new-instance/src/otherpackage/PackageAccess.java
@@ -1,6 +1,29 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package otherpackage;
 
 class PackageAccess {
     /*package*/ PackageAccess() {
+        System.out.println("created PackageAccess");
+    }
+}
+
+class PackageAccess2 {
+    public PackageAccess2() {
+        System.out.println("created PackageAccess2");
     }
 }
diff --git a/tests/045-reflect-array/expected.txt b/tests/045-reflect-array/expected.txt
index 5c609b5..5990b34 100644
--- a/tests/045-reflect-array/expected.txt
+++ b/tests/045-reflect-array/expected.txt
@@ -1,4 +1,7 @@
 ReflectArrayTest.testSingleInt passed
+ReflectArrayTest.testSingleChar passed
+ReflectArrayTest.testSingleShort passed
+ReflectArrayTest.testSingleLong passed
 ReflectArrayTest.testSingle passed
 ReflectArrayTest.testMultiInt passed
 zero one two ++
diff --git a/tests/045-reflect-array/src/Main.java b/tests/045-reflect-array/src/Main.java
index c70e291..36f8ac3 100644
--- a/tests/045-reflect-array/src/Main.java
+++ b/tests/045-reflect-array/src/Main.java
@@ -10,6 +10,9 @@
 public class Main {
     public static void main(String[] args) {
         testSingleInt();
+        testSingleChar();
+        testSingleShort();
+        testSingleLong();
         testSingle();
         testMultiInt();
         testMulti();
@@ -33,22 +36,31 @@
         try {
             array[2] = 27;
             throw new RuntimeException("store should have failed");
-        }
-        catch (ArrayIndexOutOfBoundsException abe) {
-        }
+        } catch (ArrayIndexOutOfBoundsException abe) { }
+        try {
+            Array.setInt(intArray, 2, 27);
+            throw new RuntimeException("store should have failed");
+        } catch (ArrayIndexOutOfBoundsException abe) { }
         if (array.length != Array.getLength(intArray) ||
             array.length != 2)
         {
             throw new RuntimeException("bad len");
         }
 
+        Integer x123 = Integer.valueOf(123);
+        Integer x456 = Integer.valueOf(456);
+
+        Array.set(intArray, 0, x123);
+        Array.set(intArray, 1, x456);
+        if (!Array.get(intArray, 0).equals(x123) || !Array.get(intArray, 1).equals(x456)) {
+            throw new RuntimeException("bad 123 or 456");
+        }
+
         int[][] wrongArray;
         try {
             wrongArray = (int[][]) intArray;
             throw new RuntimeException("cast should have failed");
-        }
-        catch (ClassCastException cce) {
-        }
+        } catch (ClassCastException cce) { }
 
         intArray = Array.newInstance(Integer.TYPE, 0);
         if (Array.getLength(intArray) != 0)
@@ -56,6 +68,96 @@
         System.out.println("ReflectArrayTest.testSingleInt passed");
     }
 
+    static void testSingleChar() {
+        Object charArray = Array.newInstance(Character.TYPE, 7);
+
+        char[] array = (char[]) charArray;
+        array[0] = '0';
+        array[1] = 'W';
+        array[2] = '2';
+        array[3] = '3';
+        array[4] = 'X';
+        array[5] = '5';
+        array[6] = '6';
+        Array.setChar(charArray, 1, '1');
+        Array.setChar(charArray, 4, '4');
+        try {
+            Array.setShort(charArray, 3, (short) 'Y');
+            throw new RuntimeException("shouldn't allow short in char array");
+        } catch (IllegalArgumentException iae) {}
+        try {
+            Array.setInt(charArray, 5, 'Z');
+            throw new RuntimeException("shouldn't allow int in char array");
+        } catch (IllegalArgumentException iae) {}
+
+        try {
+            for (int i = 0; i < array.length; i++) {
+                if (Array.getInt(charArray, i) - '0' != i) {
+                    throw new RuntimeException("mismatch: " + i + " is " + array[i]);
+                }
+            }
+
+            if (Array.getInt(charArray, 4) != '4') {
+                throw new RuntimeException("load should have worked");
+            }
+        } catch (IllegalArgumentException iae) {
+            System.err.println("Couldn't Array.getInt(charArray)");
+        }
+        try {
+            Array.getByte(charArray, 2);
+            throw new RuntimeException("shouldn't allow read of char as byte");
+        } catch (IllegalArgumentException iae) {}
+
+        Array.setChar(charArray, 3, (char) 0xffff);
+        try {
+            if (Array.getInt(charArray, 3) != 0xffff) {
+                throw new RuntimeException("char got sign-extended? "
+                    + Array.getInt(charArray, 3));
+            }
+        } catch (IllegalArgumentException iae) {
+            System.err.println("Couldn't Array.getInt(charArray)");
+        }
+
+        System.out.println("ReflectArrayTest.testSingleChar passed");
+    }
+
+    static void testSingleShort() {
+        Object shortArray = Array.newInstance(Short.TYPE, 1);
+        Array.setShort(shortArray, 0, (short) -1);
+        if (Array.getInt(shortArray, 0) != -1) {
+            throw new RuntimeException("short didn't get sign-extended");
+        }
+
+        Short box = (Short) Array.get(shortArray, 0);
+
+        System.out.println("ReflectArrayTest.testSingleShort passed");
+    }
+
+    static void testSingleLong() {
+        Object longArray = Array.newInstance(Long.TYPE, 2);
+        Array.setInt(longArray, 0, 123);
+        Array.setLong(longArray, 1, 0x1122334455667788L);
+        try {
+            Array.getInt(longArray, 0);
+            throw new RuntimeException("shouldn't allow read of long as int");
+        } catch (IllegalArgumentException iae) {}
+
+        long[] array = (long[]) longArray;
+        if (array[0] != 123 || array[1] != 0x1122334455667788L) {
+            throw new RuntimeException();
+        }
+
+        float f = Array.getFloat(longArray, 0);
+        if (f < 122.9 || f > 123.1) {
+            throw new RuntimeException("long-as-float failed - " + f);
+        }
+        if (Array.getLong(longArray, 1) != 0x1122334455667788L) {
+            throw new RuntimeException("long1 failed");
+        }
+
+        System.out.println("ReflectArrayTest.testSingleLong passed");
+    }
+
     static void testSingle() {
         Object strArray;
 
@@ -64,6 +166,10 @@
         String[] array = (String[]) strArray;
         array[0] = "entry zero";
         Array.set(strArray, 1, "entry one");
+        try {
+            Array.set(strArray, 2, "entry two");
+            throw new RuntimeException("store should have failed");
+        } catch (ArrayIndexOutOfBoundsException abe) { }
 
         //System.out.println("array: " + array);
 
@@ -77,6 +183,11 @@
         {
             throw new RuntimeException("bad len");
         }
+
+        try {
+            Array.set(strArray, 0, new Integer(5));
+            throw new RuntimeException("store of Integer should have failed");
+        } catch (IllegalArgumentException iae) {}
         System.out.println("ReflectArrayTest.testSingle passed");
     }
 
diff --git a/tests/046-reflect/expected.txt b/tests/046-reflect/expected.txt
index 55b0eca..309b076 100644
--- a/tests/046-reflect/expected.txt
+++ b/tests/046-reflect/expected.txt
@@ -90,8 +90,13 @@
 myMethod (I)I
  arg=17 anInt=7
 ReflectTest done!
+public method
 checkType invoking null
 checkType got expected exception
 got methods
 NoisyInitUser is initializing
 NoisyInit is initializing
+
+generic field: java.util.List<java.lang.String>
+generic method fancyMethod params='[1] java.util.ArrayList<java.lang.String>' ret='java.util.Map<java.lang.Integer, java.lang.String>'
+generic ctor Main params='[1] java.util.ArrayList<java.lang.Integer>'
diff --git a/tests/046-reflect/src/Main.java b/tests/046-reflect/src/Main.java
index e604979..e2a3929 100644
--- a/tests/046-reflect/src/Main.java
+++ b/tests/046-reflect/src/Main.java
@@ -3,11 +3,18 @@
 import java.lang.reflect.*;
 import java.io.IOException;
 import java.util.Collections;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Reflection test.
  */
 public class Main {
+    private static boolean FULL_ACCESS_CHECKS = false;  // b/5861201
+    public Main() {}
+    public Main(ArrayList<Integer> stuff) {}
+
     void printMethodInfo(Method meth) {
         Class[] params, exceptions;
         int i;
@@ -55,6 +62,48 @@
         System.out.println("  ::: " + one + ":" + two + ":" + three);
     }
 
+    public static void checkAccess() {
+        try {
+            Class target = otherpackage.Other.class;
+            Object instance = new otherpackage.Other();
+            Method meth;
+
+            meth = target.getMethod("publicMethod", (Class[]) null);
+            meth.invoke(instance);
+
+            try {
+                meth = target.getMethod("packageMethod", (Class[]) null);
+                System.err.println("succeeded on package-scope method");
+            } catch (NoSuchMethodException nsme) {
+                // good
+            }
+
+
+            instance = otherpackage.Other.getInnerClassInstance();
+            target = instance.getClass();
+            meth = target.getMethod("innerMethod", (Class[]) null);
+            try {
+                if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
+                meth.invoke(instance);
+                System.err.println("inner-method invoke unexpectedly worked");
+            } catch (IllegalAccessException iae) {
+                // good
+            }
+
+            Field field = target.getField("innerField");
+            try {
+                int x = field.getInt(instance);
+                if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
+                System.err.println("field get unexpectedly worked: " + x);
+            } catch (IllegalAccessException iae) {
+                // good
+            }
+        } catch (Exception ex) {
+            System.out.println("----- unexpected exception -----");
+            ex.printStackTrace();
+        }
+    }
+
     public void run() {
         Class target = Target.class;
         Method meth = null;
@@ -295,8 +344,7 @@
             targ = cons.newInstance(args);
             targ.myMethod(17);
 
-        }
-        catch (Exception ex) {
+        } catch (Exception ex) {
             System.out.println("----- unexpected exception -----");
             ex.printStackTrace();
         }
@@ -347,14 +395,80 @@
         /* neither NoisyInit nor NoisyInitUser should be initialized yet */
         NoisyInitUser niu = new NoisyInitUser();
         NoisyInit ni = new NoisyInit();
+
+        System.out.println("");
     }
 
+
+    /*
+     * Test some generic type stuff.
+     */
+    public List<String> dummy;
+    public Map<Integer,String> fancyMethod(ArrayList<String> blah) { return null; }
+    public static void checkGeneric() {
+        Field field;
+        try {
+            field = Main.class.getField("dummy");
+        } catch (NoSuchFieldException nsfe) {
+            throw new RuntimeException(nsfe);
+        }
+        Type listType = field.getGenericType();
+        System.out.println("generic field: " + listType);
+
+        Method method;
+        try {
+            method = Main.class.getMethod("fancyMethod",
+                new Class[] { ArrayList.class });
+        } catch (NoSuchMethodException nsme) {
+            throw new RuntimeException(nsme);
+        }
+        Type[] parmTypes = method.getGenericParameterTypes();
+        Type ret = method.getGenericReturnType();
+        System.out.println("generic method " + method.getName() + " params='"
+            + stringifyTypeArray(parmTypes) + "' ret='" + ret + "'");
+
+        Constructor ctor;
+        try {
+            ctor = Main.class.getConstructor(new Class[] { ArrayList.class });
+        } catch (NoSuchMethodException nsme) {
+            throw new RuntimeException(nsme);
+        }
+        parmTypes = ctor.getGenericParameterTypes();
+        System.out.println("generic ctor " + ctor.getName() + " params='"
+            + stringifyTypeArray(parmTypes) + "'");
+    }
+
+    /*
+     * Convert an array of Type into a string.  Start with an array count.
+     */
+    private static String stringifyTypeArray(Type[] types) {
+        StringBuilder stb = new StringBuilder();
+        boolean first = true;
+
+        stb.append("[" + types.length + "]");
+
+        for (Type t: types) {
+            if (first) {
+                stb.append(" ");
+                first = false;
+            } else {
+                stb.append(", ");
+            }
+            stb.append(t.toString());
+        }
+
+        return stb.toString();
+    }
+
+
     public static void main(String[] args) {
         Main test = new Main();
         test.run();
 
+        checkAccess();
         checkType();
         checkInit();
+        checkGeneric();
     }
 }
 
diff --git a/tests/046-reflect/src/otherpackage/Other.java b/tests/046-reflect/src/otherpackage/Other.java
new file mode 100644
index 0000000..702ab6d
--- /dev/null
+++ b/tests/046-reflect/src/otherpackage/Other.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package otherpackage;
+
+public class Other {
+    public void publicMethod() {
+        System.out.println("public method");
+    }
+    void packageMethod() {
+        System.out.println("package method");
+    }
+
+    public static InnerOther getInnerClassInstance() {
+        return new InnerOther();
+    }
+
+    private static class InnerOther {
+        public void innerMethod() {
+            System.out.println("inner method");
+        }
+
+        public int innerField = 7;
+    }
+}
diff --git a/tests/050-sync-test/src/Main.java b/tests/050-sync-test/src/Main.java
index c2ea192..418f5f4 100644
--- a/tests/050-sync-test/src/Main.java
+++ b/tests/050-sync-test/src/Main.java
@@ -23,8 +23,7 @@
         System.out.println("GOING");
         try {
             Thread.sleep(1000);
-        }
-        catch (InterruptedException ie) {
+        } catch (InterruptedException ie) {
             System.out.println("INTERRUPT!");
             ie.printStackTrace();
         }
@@ -38,23 +37,22 @@
         two = new CpuThread(2);
 
         one.start();
-        two.start();
 
         try {
             Thread.sleep(100);
-        }
-        catch (InterruptedException ie) {
+        } catch (InterruptedException ie) {
             System.out.println("INTERRUPT!");
             ie.printStackTrace();
         }
 
+        two.start();
+
         //System.out.println("main: off and running");
 
         try {
             one.join();
             two.join();
-        }
-        catch (InterruptedException ie) {
+        } catch (InterruptedException ie) {
             System.out.println("INTERRUPT!");
             ie.printStackTrace();
         }
@@ -88,34 +86,29 @@
         //System.out.print("thread running -- ");
         //System.out.println(Thread.currentThread().getName());
 
-        for (int i = 0; i < 10; i++) {
-            output(mNumber);
-        }
+        synchronized (mSyncable) {
+            for (int i = 0; i < 10; i++) {
+                output(mNumber);
+            }
 
-        System.out.print("Final result: ");
-        System.out.println(mCount);
+            System.out.print("Final result: ");
+            System.out.println(mCount);
+        }
     }
 
     void output(int num) {
-        /*
-         * Delete the next line; last "final result" should != 20.
-         */
-        synchronized (mSyncable)
-        {
-            int i, count;
+        int count = mCount;
 
-            count = mCount;
+        System.out.print("going: ");
+        System.out.println(num);
 
-            System.out.print("going: ");
-            System.out.println(num);
-
-            /* burn CPU; adjust end value so we exceed scheduler quantum */
-            for (int j = 0; j < 5000; j++)
-                ;
-
-            count++;
-            mCount = count;
+        /* burn CPU; adjust end value so we exceed scheduler quantum */
+        for (int j = 0; j < 5000; j++) {
+            ;
         }
+
+        count++;
+        mCount = count;
     }
 }
 
@@ -150,14 +143,12 @@
                 synchronized (mWaitOnMe) {
                     mWaitOnMe.wait(9000);
                 }
-            }
-            catch (InterruptedException ie) {
+            } catch (InterruptedException ie) {
                 // Expecting this; interrupted should be false.
                 System.out.println(Thread.currentThread().getName() +
                         " interrupted, flag=" + Thread.interrupted());
                 intr = true;
-            }
-            catch (Exception ex) {
+            } catch (Exception ex) {
                 ex.printStackTrace();
             }
 
@@ -166,8 +157,7 @@
         } else {
             try {
                 Thread.sleep(2000);
-            }
-            catch (InterruptedException ie) {
+            } catch (InterruptedException ie) {
                 System.out.println("PESKY INTERRUPTED?");
             }
 
diff --git a/tests/051-thread/expected.txt b/tests/051-thread/expected.txt
index fbe32f6..8e6b153 100644
--- a/tests/051-thread/expected.txt
+++ b/tests/051-thread/expected.txt
@@ -1,515 +1,5 @@
-running 0
-running 1
-running 2
-running 3
-running 4
-running 5
-running 6
-running 7
-running 8
-running 9
-running 10
-running 11
-running 12
-running 13
-running 14
-running 15
-running 16
-running 17
-running 18
-running 19
-running 20
-running 21
-running 22
-running 23
-running 24
-running 25
-running 26
-running 27
-running 28
-running 29
-running 30
-running 31
-running 32
-running 33
-running 34
-running 35
-running 36
-running 37
-running 38
-running 39
-running 40
-running 41
-running 42
-running 43
-running 44
-running 45
-running 46
-running 47
-running 48
-running 49
-running 50
-running 51
-running 52
-running 53
-running 54
-running 55
-running 56
-running 57
-running 58
-running 59
-running 60
-running 61
-running 62
-running 63
-running 64
-running 65
-running 66
-running 67
-running 68
-running 69
-running 70
-running 71
-running 72
-running 73
-running 74
-running 75
-running 76
-running 77
-running 78
-running 79
-running 80
-running 81
-running 82
-running 83
-running 84
-running 85
-running 86
-running 87
-running 88
-running 89
-running 90
-running 91
-running 92
-running 93
-running 94
-running 95
-running 96
-running 97
-running 98
-running 99
-running 100
-running 101
-running 102
-running 103
-running 104
-running 105
-running 106
-running 107
-running 108
-running 109
-running 110
-running 111
-running 112
-running 113
-running 114
-running 115
-running 116
-running 117
-running 118
-running 119
-running 120
-running 121
-running 122
-running 123
-running 124
-running 125
-running 126
-running 127
-running 128
-running 129
-running 130
-running 131
-running 132
-running 133
-running 134
-running 135
-running 136
-running 137
-running 138
-running 139
-running 140
-running 141
-running 142
-running 143
-running 144
-running 145
-running 146
-running 147
-running 148
-running 149
-running 150
-running 151
-running 152
-running 153
-running 154
-running 155
-running 156
-running 157
-running 158
-running 159
-running 160
-running 161
-running 162
-running 163
-running 164
-running 165
-running 166
-running 167
-running 168
-running 169
-running 170
-running 171
-running 172
-running 173
-running 174
-running 175
-running 176
-running 177
-running 178
-running 179
-running 180
-running 181
-running 182
-running 183
-running 184
-running 185
-running 186
-running 187
-running 188
-running 189
-running 190
-running 191
-running 192
-running 193
-running 194
-running 195
-running 196
-running 197
-running 198
-running 199
-running 200
-running 201
-running 202
-running 203
-running 204
-running 205
-running 206
-running 207
-running 208
-running 209
-running 210
-running 211
-running 212
-running 213
-running 214
-running 215
-running 216
-running 217
-running 218
-running 219
-running 220
-running 221
-running 222
-running 223
-running 224
-running 225
-running 226
-running 227
-running 228
-running 229
-running 230
-running 231
-running 232
-running 233
-running 234
-running 235
-running 236
-running 237
-running 238
-running 239
-running 240
-running 241
-running 242
-running 243
-running 244
-running 245
-running 246
-running 247
-running 248
-running 249
-running 250
-running 251
-running 252
-running 253
-running 254
-running 255
-running 256
-running 257
-running 258
-running 259
-running 260
-running 261
-running 262
-running 263
-running 264
-running 265
-running 266
-running 267
-running 268
-running 269
-running 270
-running 271
-running 272
-running 273
-running 274
-running 275
-running 276
-running 277
-running 278
-running 279
-running 280
-running 281
-running 282
-running 283
-running 284
-running 285
-running 286
-running 287
-running 288
-running 289
-running 290
-running 291
-running 292
-running 293
-running 294
-running 295
-running 296
-running 297
-running 298
-running 299
-running 300
-running 301
-running 302
-running 303
-running 304
-running 305
-running 306
-running 307
-running 308
-running 309
-running 310
-running 311
-running 312
-running 313
-running 314
-running 315
-running 316
-running 317
-running 318
-running 319
-running 320
-running 321
-running 322
-running 323
-running 324
-running 325
-running 326
-running 327
-running 328
-running 329
-running 330
-running 331
-running 332
-running 333
-running 334
-running 335
-running 336
-running 337
-running 338
-running 339
-running 340
-running 341
-running 342
-running 343
-running 344
-running 345
-running 346
-running 347
-running 348
-running 349
-running 350
-running 351
-running 352
-running 353
-running 354
-running 355
-running 356
-running 357
-running 358
-running 359
-running 360
-running 361
-running 362
-running 363
-running 364
-running 365
-running 366
-running 367
-running 368
-running 369
-running 370
-running 371
-running 372
-running 373
-running 374
-running 375
-running 376
-running 377
-running 378
-running 379
-running 380
-running 381
-running 382
-running 383
-running 384
-running 385
-running 386
-running 387
-running 388
-running 389
-running 390
-running 391
-running 392
-running 393
-running 394
-running 395
-running 396
-running 397
-running 398
-running 399
-running 400
-running 401
-running 402
-running 403
-running 404
-running 405
-running 406
-running 407
-running 408
-running 409
-running 410
-running 411
-running 412
-running 413
-running 414
-running 415
-running 416
-running 417
-running 418
-running 419
-running 420
-running 421
-running 422
-running 423
-running 424
-running 425
-running 426
-running 427
-running 428
-running 429
-running 430
-running 431
-running 432
-running 433
-running 434
-running 435
-running 436
-running 437
-running 438
-running 439
-running 440
-running 441
-running 442
-running 443
-running 444
-running 445
-running 446
-running 447
-running 448
-running 449
-running 450
-running 451
-running 452
-running 453
-running 454
-running 455
-running 456
-running 457
-running 458
-running 459
-running 460
-running 461
-running 462
-running 463
-running 464
-running 465
-running 466
-running 467
-running 468
-running 469
-running 470
-running 471
-running 472
-running 473
-running 474
-running 475
-running 476
-running 477
-running 478
-running 479
-running 480
-running 481
-running 482
-running 483
-running 484
-running 485
-running 486
-running 487
-running 488
-running 489
-running 490
-running 491
-running 492
-running 493
-running 494
-running 495
-running 496
-running 497
-running 498
-running 499
-running 500
-running 501
-running 502
-running 503
-running 504
-running 505
-running 506
-running 507
-running 508
-running 509
-running 510
-running 511
+Initializing System.out...
+Thread count: 512
 Starting thread 'Thready'
 @ Thread running
 @ Got expected setDaemon exception
diff --git a/tests/051-thread/src/Main.java b/tests/051-thread/src/Main.java
index 9acc89e..ea587af 100644
--- a/tests/051-thread/src/Main.java
+++ b/tests/051-thread/src/Main.java
@@ -1,20 +1,42 @@
-// Copyright 2006 The Android Open Source Project
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
 
 /**
  * Test some basic thread stuff.
  */
 public class Main {
-    public static void main(String[] args) {
+    public static void main(String[] args) throws Exception {
+        System.out.println("Initializing System.out...");
+
+        MyThread[] threads = new MyThread[512];
         for (int i = 0; i < 512; i++) {
-            MyThread myThread = new MyThread();
-            myThread.start();
-            try {
-                Thread.sleep(1);
-            } catch (InterruptedException ie) {
-                ie.printStackTrace();
-            }
+            threads[i] = new MyThread();
         }
 
+        for (MyThread thread : threads) {
+            thread.start();
+        }
+        for (MyThread thread : threads) {
+            thread.join();
+        }
+
+        System.out.println("Thread count: " + MyThread.mCount);
+
         go();
         System.out.println("thread test done");
     }
@@ -40,9 +62,11 @@
      * Simple thread capacity test.
      */
     static class MyThread extends Thread {
-        private static int mCount = 0;
+        static int mCount = 0;
         public void run() {
-            System.out.println("running " + (mCount++));
+            synchronized (MyThread.class) {
+                ++mCount;
+            }
         }
     }
 }
diff --git a/tests/061-out-of-memory/src/Main.java b/tests/061-out-of-memory/src/Main.java
index b5999b3..c812c81 100644
--- a/tests/061-out-of-memory/src/Main.java
+++ b/tests/061-out-of-memory/src/Main.java
@@ -43,18 +43,13 @@
     private static void testOomeLarge() {
         System.out.println("testOomeLarge beginning");
 
-        /* Just shy of the typical max heap size so that it will actually
-         * try to allocate it instead of short-circuiting.
-         *
-         * TODO: stop assuming the VM defaults to 16MB max
-         */
-        final int SIXTEEN_MB = (16 * 1024 * 1024 - 32);
-
         Boolean sawEx = false;
-        byte a[];
+        byte[] a;
 
         try {
-            a = new byte[SIXTEEN_MB];
+            // Just shy of the typical max heap size so that it will actually
+            // try to allocate it instead of short-circuiting.
+            a = new byte[(int) Runtime.getRuntime().maxMemory() - 32];
         } catch (OutOfMemoryError oom) {
             //Log.i(TAG, "HeapTest/OomeLarge caught " + oom);
             sawEx = true;
@@ -71,11 +66,8 @@
     /* Do this in another method so that the GC has a chance of freeing the
      * list afterwards.  Even if we null out list when we're done, the conservative
      * GC may see a stale pointer to it in a register.
-     *
-     * TODO: stop assuming the VM defaults to 16MB max
      */
     private static boolean testOomeSmallInternal() {
-        final int SIXTEEN_MB = (16 * 1024 * 1024);
         final int LINK_SIZE = 6 * 4; // estimated size of a LinkedList's node
 
         LinkedList<Object> list = new LinkedList<Object>();
@@ -86,7 +78,7 @@
         while (objSize >= LINK_SIZE) {
             boolean sawEx = false;
             try {
-                for (int i = 0; i < SIXTEEN_MB / objSize; i++) {
+                for (int i = 0; i < Runtime.getRuntime().maxMemory() / objSize; i++) {
                     list.add((Object)new byte[objSize]);
                 }
             } catch (OutOfMemoryError oom) {
diff --git a/tests/068-classloader/src/FancyLoader.java b/tests/068-classloader/src/FancyLoader.java
index 173b08f..d04083a 100644
--- a/tests/068-classloader/src/FancyLoader.java
+++ b/tests/068-classloader/src/FancyLoader.java
@@ -53,7 +53,7 @@
         super(parent);
 
         try {
-            mDexClass = parent.loadClass("dalvik/system/DexFile");
+            mDexClass = parent.loadClass("dalvik.system.DexFile");
         } catch (ClassNotFoundException cnfe) {
             // ignore -- not running Dalvik
         }
diff --git a/tests/070-nio-buffer/expected.txt b/tests/070-nio-buffer/expected.txt
index e271001..ddb45af 100644
--- a/tests/070-nio-buffer/expected.txt
+++ b/tests/070-nio-buffer/expected.txt
@@ -1,3 +1,6 @@
+Direct byte buffer has array: true
 Got expected buffer overflow exception
 Got expected out-of-bounds exception
 Got expected buffer overflow exception
+00fbfb2ec03000001234567840490fd01122334455667788000000000000000100000000
+ccfb2efb30c0cccc78563412d00f494088776655443322110100000000000000cccccccc
diff --git a/tests/070-nio-buffer/src/Main.java b/tests/070-nio-buffer/src/Main.java
index bfcab3a..a7433b8 100644
--- a/tests/070-nio-buffer/src/Main.java
+++ b/tests/070-nio-buffer/src/Main.java
@@ -17,14 +17,21 @@
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
+import java.nio.CharBuffer;
+import java.nio.DoubleBuffer;
 import java.nio.FloatBuffer;
 import java.nio.IntBuffer;
+import java.nio.LongBuffer;
 import java.nio.ShortBuffer;
 
 public class Main {
     public static void main(String[] args) {
-         intFloatTest();
-         basicShortTest();
+        ByteBuffer buf = ByteBuffer.allocateDirect(16);
+        System.out.println("Direct byte buffer has array: " + buf.hasArray());
+
+        intFloatTest();
+        basicShortTest();
+        primTest();
     }
 
     /*
@@ -94,4 +101,77 @@
         int1.put (data);
         int1.position (0);
     }
+
+    /*
+     * Exercise all "view buffer" classes, in both byte orders.
+     */
+    public static void primTest() {
+        ByteBuffer directBuf = ByteBuffer.allocateDirect(36);
+        directBuf.order(ByteOrder.BIG_ENDIAN);
+        storeValues(directBuf);
+
+        for (int i = 0; i < 36; i++) {
+            directBuf.put(i, (byte) 0xcc);
+        }
+
+        directBuf.order(ByteOrder.LITTLE_ENDIAN);
+        storeValues(directBuf);
+    }
+
+    static void storeValues(ByteBuffer directBuf) {
+        directBuf.position(0);
+        ShortBuffer shortBuf = directBuf.asShortBuffer();
+        CharBuffer charBuf = directBuf.asCharBuffer();
+        IntBuffer intBuf = directBuf.asIntBuffer();
+        FloatBuffer floatBuf = directBuf.asFloatBuffer();
+        LongBuffer longBuf = directBuf.asLongBuffer();
+        DoubleBuffer doubleBuf = directBuf.asDoubleBuffer();
+
+        final byte byteValue = -5;
+        final short shortValue = -1234;
+        final char charValue = 49200;
+        final int intValue = 0x12345678;
+        final float floatValue = 3.14159f;
+        final long longValue = 0x1122334455667788L;
+        final double doubleValue = Double.MIN_VALUE;
+
+        if (directBuf.put(1, byteValue).get(1) != byteValue) {
+            throw new RuntimeException("byte get/store failed");
+        }
+        if (shortBuf.put(1, shortValue).get(1) != shortValue) {
+            throw new RuntimeException("short get/store failed");
+        }
+        if (charBuf.put(2, charValue).get(2) != charValue) {
+            throw new RuntimeException("char get/store failed");
+        }
+        if (intBuf.put(2, intValue).get(2) != intValue) {
+            throw new RuntimeException("int get/store failed");
+        }
+        if (floatBuf.put(3, floatValue).get(3) != floatValue) {
+            throw new RuntimeException("float get/store failed");
+        }
+        if (longBuf.put(2, longValue).get(2) != longValue) {
+            throw new RuntimeException("long get/store failed");
+        }
+        if (doubleBuf.put(3, doubleValue).get(3) != doubleValue) {
+            throw new RuntimeException("double get/store failed");
+        }
+
+        directBuf.position(0);
+        char[] outBuf = new char[directBuf.limit() * 2];
+        for (int i = 0; i < directBuf.limit(); i++) {
+            byte b = directBuf.get();
+            outBuf[i*2] = hexChar((byte) ((b >> 4) & 0x0f));
+            outBuf[i*2+1] = hexChar((byte) (b & 0x0f));
+        }
+        System.out.println(new String(outBuf));
+    }
+
+    static char hexChar(byte b) {
+        if (b < 10) {
+            return (char) ('0' + b);
+        } else {
+            return (char) ('a' + b - 10);
+        }
+    }
 }
diff --git a/tests/083-jit-regressions/expected.txt b/tests/083-jit-regressions/expected.txt
index 1f30d21..4b9ad5b 100644
--- a/tests/083-jit-regressions/expected.txt
+++ b/tests/083-jit-regressions/expected.txt
@@ -1,3 +1,4 @@
 b2296099 passes
 b2302318 passes
 b2487514 passes
+b5884080 passes
diff --git a/tests/083-jit-regressions/info.txt b/tests/083-jit-regressions/info.txt
index b791aba..00c24ee 100644
--- a/tests/083-jit-regressions/info.txt
+++ b/tests/083-jit-regressions/info.txt
@@ -8,3 +8,4 @@
 2296099 JIT shift bug
 2302318 Crash during spin-on-suspend testing
 2487514 Missed exception in PriorityBlockingQueueTest.testToArray1_BadArg
+5884080 ICS JIT regression in nested loop formation
diff --git a/tests/083-jit-regressions/src/Main.java b/tests/083-jit-regressions/src/Main.java
index 1f1dee3..3b596db 100644
--- a/tests/083-jit-regressions/src/Main.java
+++ b/tests/083-jit-regressions/src/Main.java
@@ -24,6 +24,7 @@
         b2296099Test();
         b2302318Test();
         b2487514Test();
+        b5884080Test();
     }
 
     static void b2296099Test() throws Exception {
@@ -105,6 +106,26 @@
                                " (expecting 1000)");
         }
     }
+
+    static void b5884080Test() {
+        int vA = 1;
+
+        int l = 0;
+        do
+        {
+            int k = 0;
+            do
+                vA += 1;
+            while(++k < 100);
+        } while(++l < 1000);
+        if (vA == 100001) {
+            System.out.println("b5884080 passes");
+        }
+        else {
+            System.out.println("b5884080 fails: vA is " + vA +
+                               " (expecting 100001)");
+        }
+    }
 }
 
 class SpinThread extends Thread {
diff --git a/tests/084-class-init/src/Main.java b/tests/084-class-init/src/Main.java
index f777113..de28ed9 100644
--- a/tests/084-class-init/src/Main.java
+++ b/tests/084-class-init/src/Main.java
@@ -76,8 +76,8 @@
 
     static class FieldThread extends Thread {
         public void run() {
-            /* allow class init to start */
-            Main.sleep(200);
+            /* allow SlowInit's <clinit> to start */
+            Main.sleep(1000);
 
             /* collect fields; should delay until class init completes */
             int field0, field1, field2, field3;
@@ -87,7 +87,7 @@
             field3 = SlowInit.FIELD3.getValue();
 
             /* let MethodThread print first */
-            Main.sleep(400);
+            Main.sleep(5000);
             System.out.println("Fields (child thread): " +
                 field0 + field1 + field2 + field3);
         }
@@ -95,8 +95,8 @@
 
     static class MethodThread extends Thread {
         public void run() {
-            /* allow class init to start */
-            Main.sleep(200);
+            /* allow SlowInit's <clinit> to start */
+            Main.sleep(1000);
 
             /* use a method that shouldn't be accessible yet */
             SlowInit.printMsg("MethodThread message");
diff --git a/tests/084-class-init/src/SlowInit.java b/tests/084-class-init/src/SlowInit.java
index 8ac72be..f0c6919 100644
--- a/tests/084-class-init/src/SlowInit.java
+++ b/tests/084-class-init/src/SlowInit.java
@@ -32,7 +32,7 @@
         FIELD0.setValue(111);
         FIELD1.setValue(222);
         printMsg("SlowInit static block pre-sleep");
-        Main.sleep(600);
+        Main.sleep(4000);
         printMsg("SlowInit static block post-sleep");
         FIELD2.setValue(333);
         FIELD3.setValue(444);
diff --git a/tests/086-null-super/src/Main.java b/tests/086-null-super/src/Main.java
index 6decb20..82237c5 100644
--- a/tests/086-null-super/src/Main.java
+++ b/tests/086-null-super/src/Main.java
@@ -76,7 +76,7 @@
                  * through reflection, then call loadCLass on it.
                  */
                 Class mDexClass = ClassLoader.getSystemClassLoader().
-                        loadClass("dalvik/system/DexFile");
+                        loadClass("dalvik.system.DexFile");
                 Constructor ctor = mDexClass.
                         getConstructor(new Class[] {String.class});
                 Object mDexFile = ctor.newInstance(DEX_FILE);
diff --git a/tests/087-gc-after-link/src/Main.java b/tests/087-gc-after-link/src/Main.java
index dc68f9f..11fb2d3 100644
--- a/tests/087-gc-after-link/src/Main.java
+++ b/tests/087-gc-after-link/src/Main.java
@@ -79,7 +79,7 @@
                      * through reflection, then call loadClass on it.
                      */
                     dexClass = ClassLoader.getSystemClassLoader().
-                            loadClass("dalvik/system/DexFile");
+                            loadClass("dalvik.system.DexFile");
                     Constructor ctor = dexClass.
                             getConstructor(new Class[] {String.class});
                     dexFile = ctor.newInstance(DEX_FILE);
diff --git a/tests/092-locale/expected.txt b/tests/092-locale/expected.txt
new file mode 100644
index 0000000..0a955e7
--- /dev/null
+++ b/tests/092-locale/expected.txt
@@ -0,0 +1,12 @@
+USA(GMT): Sunday, January 1, 2012
+USA: first=1, name=Sunday
+France(GMT): Monday, January 2, 2012
+France: first=2, name=lundi
+USA dfs: [AM, PM]
+en_US: USD $2
+jp_JP: JPY ¥0
+Normalizer passed
+loc: en_US
+ iso3=eng
+loc: eng_USA
+ iso3=eng
diff --git a/tests/092-locale/info.txt b/tests/092-locale/info.txt
new file mode 100644
index 0000000..e3c3a98
--- /dev/null
+++ b/tests/092-locale/info.txt
@@ -0,0 +1 @@
+Exercise some locale-specific classes.
diff --git a/tests/092-locale/src/Main.java b/tests/092-locale/src/Main.java
new file mode 100644
index 0000000..8916a29
--- /dev/null
+++ b/tests/092-locale/src/Main.java
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.DateFormat;
+import java.text.DateFormatSymbols;
+import java.text.Normalizer;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.Currency;
+import java.util.Date;
+import java.util.Locale;
+import java.util.MissingResourceException;
+import java.util.TimeZone;
+
+/**
+ * Exercise some locale-table-driven stuff.
+ */
+public class Main {
+
+    public static void main(String[] args) {
+        try {
+            testCalendar();
+        } catch (Exception ex) {
+            ex.printStackTrace();
+        }
+
+        try {
+            testDateFormatSymbols();
+        } catch (Exception ex) {
+            ex.printStackTrace();
+        }
+
+        try {
+            testCurrency();
+        } catch (Exception ex) {
+            ex.printStackTrace();
+        }
+
+        try {
+            testNormalizer();
+        } catch (Exception ex) {
+            ex.printStackTrace();
+        }
+
+        try {
+            testIso3();
+        } catch (Exception ex) {
+            ex.printStackTrace();
+        }
+    }
+
+    static void testCalendar() {
+        TimeZone tz = TimeZone.getTimeZone("GMT");
+
+        Locale usa = new Locale("en", "US");
+        Calendar usaCal = Calendar.getInstance(tz, usa);
+        usaCal.clear();     // don't want current date/time
+        usaCal.set(2012, Calendar.JANUARY, 1);
+
+        Date when = usaCal.getTime();
+        DateFormat fmt = DateFormat.getDateInstance(DateFormat.FULL, usa);
+        fmt.setTimeZone(tz);    // defaults to local TZ; force GMT
+        System.out.println("USA(" + fmt.getTimeZone().getID() + "): "
+            + fmt.format(when));
+
+        System.out.println("USA: first="
+            + usaCal.getFirstDayOfWeek() + ", name="
+            + usaCal.getDisplayName(Calendar.DAY_OF_WEEK, Calendar.LONG, usa));
+
+
+        Locale france = new Locale("fr", "FR");
+        Calendar franceCal = Calendar.getInstance(tz, france);
+        franceCal.clear();
+        franceCal.set(2012, Calendar.JANUARY, 2);
+
+        when = franceCal.getTime();
+        fmt = DateFormat.getDateInstance(DateFormat.FULL, usa);
+        fmt.setTimeZone(tz);    // defaults to local TZ; force GMT
+        System.out.println("France(" + fmt.getTimeZone().getID() + "): "
+            + fmt.format(when));
+
+        System.out.println("France: first="
+            + franceCal.getFirstDayOfWeek() + ", name="
+            + franceCal.getDisplayName(Calendar.DAY_OF_WEEK, Calendar.LONG, france));
+    }
+
+    static void testDateFormatSymbols() {
+        Locale usa = new Locale("en", "US");
+        DateFormatSymbols syms = DateFormatSymbols.getInstance(usa);
+        String[] list = syms.getAmPmStrings();
+        System.out.println("USA dfs: " + Arrays.deepToString(list));
+    }
+
+    static void testCurrency() {
+        Locale usa = new Locale("en", "US");
+        Currency dollars = Currency.getInstance(usa);
+
+        System.out.println(usa.toString() + ": " + dollars.toString()
+            + " " + dollars.getSymbol() + dollars.getDefaultFractionDigits());
+
+        Locale japan = new Locale("jp", "JP");
+        Currency yen = Currency.getInstance(japan);
+
+        System.out.println(japan.toString() + ": " + yen.toString()
+            + " " + yen.getSymbol() + yen.getDefaultFractionDigits());
+    }
+
+    static void testNormalizer() {
+        String composed = "Bl\u00c1ah";
+        String decomposed = "Bl\u0041\u0301ah";
+        String res;
+
+        res = Normalizer.normalize(composed, Normalizer.Form.NFD);
+        if (!decomposed.equals(res)) {
+            System.err.println("Bad decompose: '" + composed + "' --> '"
+                + res + "'");
+        }
+
+        res = Normalizer.normalize(decomposed, Normalizer.Form.NFC);
+        if (!composed.equals(res)) {
+            System.err.println("Bad compose: '" + decomposed + "' --> '"
+                + res + "'");
+        }
+
+        System.out.println("Normalizer passed");
+    }
+
+    /*
+     * Test that we can set and get an ISO3 language code.  Support for this
+     * is expected by the Android framework.
+     */
+    static void testIso3() {
+        Locale loc;
+        loc = new Locale("en", "US");
+        System.out.println("loc: " + loc);
+        System.out.println(" iso3=" + loc.getISO3Language());
+
+        loc = new Locale("eng", "USA");
+        System.out.println("loc: " + loc);
+        try {
+            System.out.println(" iso3=" + loc.getISO3Language());
+        } catch (MissingResourceException mre) {
+            System.err.println("couldn't get iso3 language");
+        }
+    }
+}
diff --git a/tests/093-serialization/expected.txt b/tests/093-serialization/expected.txt
new file mode 100644
index 0000000..60c64f8
--- /dev/null
+++ b/tests/093-serialization/expected.txt
@@ -0,0 +1 @@
+one=true two=2 three=three four=4.0 five=5.0 six=6 seven=7 eight=8 nine=9 thing=X
diff --git a/tests/093-serialization/info.txt b/tests/093-serialization/info.txt
new file mode 100644
index 0000000..effe3d8
--- /dev/null
+++ b/tests/093-serialization/info.txt
@@ -0,0 +1 @@
+Tests object serialization.
diff --git a/tests/093-serialization/src/Main.java b/tests/093-serialization/src/Main.java
new file mode 100644
index 0000000..ca3dc9f
--- /dev/null
+++ b/tests/093-serialization/src/Main.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+
+/**
+ * Exercise serialization.
+ */
+public class Main {
+
+    public static void main(String[] args) {
+        testObjectSerialization();
+    }
+
+    static void testObjectSerialization() {
+        byte[] serialData;
+
+        try {
+            serialData = createStream();
+            checkStream(serialData);
+        } catch (IOException ioe) {
+            throw new RuntimeException(ioe);
+        }
+    }
+
+    static byte[] createStream() throws IOException {
+        ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+        ObjectOutputStream objStream = new ObjectOutputStream(byteStream);
+
+        Sub sub = new Sub('X');
+        objStream.writeObject(sub);
+        byte[] bytes = byteStream.toByteArray();
+
+        objStream.close();
+        byteStream.close();
+        return bytes;
+    }
+
+    static void checkStream(byte[] input) throws IOException {
+        ByteArrayInputStream byteStream = new ByteArrayInputStream(input);
+        ObjectInputStream objStream = new ObjectInputStream(byteStream);
+
+        Sub sub;
+        try {
+            sub = (Sub) objStream.readObject();
+        } catch (ClassNotFoundException cnfe) {
+            throw new RuntimeException(cnfe);
+        }
+
+        objStream.close();
+        byteStream.close();
+
+        sub.check();
+    }
+}
+
+class Base implements Serializable {
+    private static final long serialVersionUID = 12345;
+
+    Boolean one;
+    Integer two;
+    String three;
+
+    public Base() {
+        one = true;
+        two = Integer.valueOf(2);
+        three = "three";
+    }
+}
+
+class Sub extends Base {
+    private static final long serialVersionUID = 54321;
+
+    Double four;
+    Float five;
+    private Byte six = 26;
+    Character seven = '7';
+    Short eight;
+    long nine;
+    public Character thing;
+
+    public Sub(char thing) {
+        four = 4.0;
+        five = 5.0f;
+        six = 6;
+        eight = 8;
+        nine = 9;
+        this.thing = thing;
+    }
+
+    public void check() {
+        System.out.println("one=" + one + " two=" + two + " three=" + three
+            + " four=" + four + " five=" + five + " six=" + six
+            + " seven=" + seven + " eight=" + eight + " nine=" + nine
+            + " thing=" + thing);
+    }
+}
+
diff --git a/tests/094-pattern/expected.txt b/tests/094-pattern/expected.txt
new file mode 100644
index 0000000..4af0c66
--- /dev/null
+++ b/tests/094-pattern/expected.txt
@@ -0,0 +1,3 @@
+str1 matches: true
+str2 matches: false
+str3 matches: true
diff --git a/tests/094-pattern/info.txt b/tests/094-pattern/info.txt
new file mode 100644
index 0000000..c1ade33
--- /dev/null
+++ b/tests/094-pattern/info.txt
@@ -0,0 +1,4 @@
+A simple test to exercise pattern matching.
+
+The test may throw a StackOverflowError if the stack size is too small.  With
+some regex libs, -Xss65k is the minimum allowable size.
diff --git a/tests/094-pattern/src/Main.java b/tests/094-pattern/src/Main.java
new file mode 100644
index 0000000..4d7e1a3
--- /dev/null
+++ b/tests/094-pattern/src/Main.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class Main {
+    // from android.util.Patterns
+    public static final String GOOD_IRI_CHAR =
+        "a-zA-Z0-9\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF";
+
+    public static final String TOP_LEVEL_DOMAIN_STR_FOR_WEB_URL =
+        "(?:"
+        + "(?:aero|arpa|asia|a[cdefgilmnoqrstuwxz])"
+        + "|(?:biz|b[abdefghijmnorstvwyz])"
+        + "|(?:cat|com|coop|c[acdfghiklmnoruvxyz])"
+        + "|d[ejkmoz]"
+        + "|(?:edu|e[cegrstu])"
+        + "|f[ijkmor]"
+        + "|(?:gov|g[abdefghilmnpqrstuwy])"
+        + "|h[kmnrtu]"
+        + "|(?:info|int|i[delmnoqrst])"
+        + "|(?:jobs|j[emop])"
+        + "|k[eghimnprwyz]"
+        + "|l[abcikrstuvy]"
+        + "|(?:mil|mobi|museum|m[acdeghklmnopqrstuvwxyz])"
+        + "|(?:name|net|n[acefgilopruz])"
+        + "|(?:org|om)"
+        + "|(?:pro|p[aefghklmnrstwy])"
+        + "|qa"
+        + "|r[eosuw]"
+        + "|s[abcdeghijklmnortuvyz]"
+        + "|(?:tel|travel|t[cdfghjklmnoprtvwz])"
+        + "|u[agksyz]"
+        + "|v[aceginu]"
+        + "|w[fs]"
+        + "|(?:\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae|\u0438\u0441\u043f\u044b\u0442\u0430\u043d\u0438\u0435|\u0440\u0444|\u0441\u0440\u0431|\u05d8\u05e2\u05e1\u05d8|\u0622\u0632\u0645\u0627\u06cc\u0634\u06cc|\u0625\u062e\u062a\u0628\u0627\u0631|\u0627\u0644\u0627\u0631\u062f\u0646|\u0627\u0644\u062c\u0632\u0627\u0626\u0631|\u0627\u0644\u0633\u0639\u0648\u062f\u064a\u0629|\u0627\u0644\u0645\u063a\u0631\u0628|\u0627\u0645\u0627\u0631\u0627\u062a|\u0628\u06be\u0627\u0631\u062a|\u062a\u0648\u0646\u0633|\u0633\u0648\u0631\u064a\u0629|\u0641\u0644\u0633\u0637\u064a\u0646|\u0642\u0637\u0631|\u0645\u0635\u0631|\u092a\u0930\u0940\u0915\u094d\u0937\u093e|\u092d\u093e\u0930\u0924|\u09ad\u09be\u09b0\u09a4|\u0a2d\u0a3e\u0a30\u0a24|\u0aad\u0abe\u0ab0\u0aa4|\u0b87\u0ba8\u0bcd\u0ba4\u0bbf\u0baf\u0bbe|\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8|\u0b9a\u0bbf\u0b99\u0bcd\u0b95\u0baa\u0bcd\u0baa\u0bc2\u0bb0\u0bcd|\u0baa\u0bb0\u0bbf\u0b9f\u0bcd\u0b9a\u0bc8|\u0c2d\u0c3e\u0c30\u0c24\u0c4d|\u0dbd\u0d82\u0d9a\u0dcf|\u0e44\u0e17\u0e22|\u30c6\u30b9\u30c8|\u4e2d\u56fd|\u4e2d\u570b|\u53f0\u6e7e|\u53f0\u7063|\u65b0\u52a0\u5761|\u6d4b\u8bd5|\u6e2c\u8a66|\u9999\u6e2f|\ud14c\uc2a4\ud2b8|\ud55c\uad6d|xn\\-\\-0zwm56d|xn\\-\\-11b5bs3a9aj6g|xn\\-\\-3e0b707e|xn\\-\\-45brj9c|xn\\-\\-80akhbyknj4f|xn\\-\\-90a3ac|xn\\-\\-9t4b11yi5a|xn\\-\\-clchc0ea0b2g2a9gcd|xn\\-\\-deba0ad|xn\\-\\-fiqs8s|xn\\-\\-fiqz9s|xn\\-\\-fpcrj9c3d|xn\\-\\-fzc2c9e2c|xn\\-\\-g6w251d|xn\\-\\-gecrj9c|xn\\-\\-h2brj9c|xn\\-\\-hgbk6aj7f53bba|xn\\-\\-hlcj6aya9esc7a|xn\\-\\-j6w193g|xn\\-\\-jxalpdlp|xn\\-\\-kgbechtv|xn\\-\\-kprw13d|xn\\-\\-kpry57d|xn\\-\\-lgbbat1ad8j|xn\\-\\-mgbaam7a8h|xn\\-\\-mgbayh7gpa|xn\\-\\-mgbbh1a71e|xn\\-\\-mgbc0a9azcg|xn\\-\\-mgberp4a5d4ar|xn\\-\\-o3cw4h|xn\\-\\-ogbpf8fl|xn\\-\\-p1ai|xn\\-\\-pgbs0dh|xn\\-\\-s9brj9c|xn\\-\\-wgbh1c|xn\\-\\-wgbl6a|xn\\-\\-xkc2al3hye2a|xn\\-\\-xkc2dl3a5ee0h|xn\\-\\-yfro4i67o|xn\\-\\-ygbi2ammx|xn\\-\\-zckzah|xxx)"
+        + "|y[et]"
+        + "|z[amw]))";
+
+    public static final String WEB_URL_STR =
+        "((?:(http|https|Http|Https|rtsp|Rtsp):\\/\\/(?:(?:[a-zA-Z0-9\\$\\-\\_\\.\\+\\!\\*\\'\\(\\)"
+        + "\\,\\;\\?\\&\\=]|(?:\\%[a-fA-F0-9]{2})){1,64}(?:\\:(?:[a-zA-Z0-9\\$\\-\\_"
+        + "\\.\\+\\!\\*\\'\\(\\)\\,\\;\\?\\&\\=]|(?:\\%[a-fA-F0-9]{2})){1,25})?\\@)?)?"
+        + "((?:(?:[" + GOOD_IRI_CHAR + "][" + GOOD_IRI_CHAR + "\\-]{0,64}\\.)+"   // named host
+        + TOP_LEVEL_DOMAIN_STR_FOR_WEB_URL
+        + "|(?:(?:25[0-5]|2[0-4]" // or ip address
+        + "[0-9]|[0-1][0-9]{2}|[1-9][0-9]|[1-9])\\.(?:25[0-5]|2[0-4][0-9]"
+        + "|[0-1][0-9]{2}|[1-9][0-9]|[1-9]|0)\\.(?:25[0-5]|2[0-4][0-9]|[0-1]"
+        + "[0-9]{2}|[1-9][0-9]|[1-9]|0)\\.(?:25[0-5]|2[0-4][0-9]|[0-1][0-9]{2}"
+        + "|[1-9][0-9]|[0-9])))"
+        + "(?:\\:\\d{1,5})?)" // plus option port number
+        + "(\\/(?:(?:[" + GOOD_IRI_CHAR + "\\;\\/\\?\\:\\@\\&\\=\\#\\~"  // plus option query params
+        + "\\-\\.\\+\\!\\*\\'\\(\\)\\,\\_])|(?:\\%[a-fA-F0-9]{2}))*)?"
+        + "(?:\\b|$)"; // and finally, a word boundary or end of
+                        // input.  This is to stop foo.sure from
+                        // matching as foo.su
+
+    public static final Pattern WEB_URL = Pattern.compile(WEB_URL_STR);
+
+    public static final String testStr1 =
+        "http://www.google.com/blah?client=tablet-android&source=android-home";
+    public static final String testStr2 = "http:///www.google.com/";
+    public static final String testStr3 =
+        "http://www.google.com/search?hl=en&redir_esc=&client=tablet-android-verizon&source=android-browser-type&v=141000000&qsubts=1327020479959&action=devloc&q=cnn";
+
+    public static void main(String[] args) {
+        System.out.println("str1 matches: " + WEB_URL.matcher(testStr1).matches());
+        System.out.println("str2 matches: " + WEB_URL.matcher(testStr2).matches());
+        System.out.println("str3 matches: " + WEB_URL.matcher(testStr3).matches());
+    }
+
+    static String getStringAsHex(String text) {
+        StringBuilder sb = new StringBuilder(text.length() * 4);
+
+        for (int i = 0; i < text.length(); i++) {
+            sb.append(Integer.toHexString((int) text.charAt(i)));
+        }
+
+        return sb.toString();
+    }
+}
diff --git a/tests/095-switch-MAX_INT/expected.txt b/tests/095-switch-MAX_INT/expected.txt
new file mode 100644
index 0000000..12799cc
--- /dev/null
+++ b/tests/095-switch-MAX_INT/expected.txt
@@ -0,0 +1 @@
+good
diff --git a/tests/095-switch-MAX_INT/info.txt b/tests/095-switch-MAX_INT/info.txt
new file mode 100644
index 0000000..bb901db
--- /dev/null
+++ b/tests/095-switch-MAX_INT/info.txt
@@ -0,0 +1 @@
+Bug: http://code.google.com/p/android/issues/detail?id=22344
diff --git a/tests/095-switch-MAX_INT/src/Main.java b/tests/095-switch-MAX_INT/src/Main.java
new file mode 100644
index 0000000..d1171ea
--- /dev/null
+++ b/tests/095-switch-MAX_INT/src/Main.java
@@ -0,0 +1,11 @@
+public class Main {
+  static public void main(String[] args) throws Exception {
+    switch (0x7fffffff) {
+    case 0x7fffffff:
+      System.err.println("good");
+      break;
+    default:
+      throw new AssertionError();
+    }
+  }
+}
diff --git a/tests/096-array-copy-concurrent-gc/expected.txt b/tests/096-array-copy-concurrent-gc/expected.txt
new file mode 100644
index 0000000..23b9dab
--- /dev/null
+++ b/tests/096-array-copy-concurrent-gc/expected.txt
@@ -0,0 +1,3 @@
+Initializing...
+Starting the test
+Test OK
diff --git a/tests/096-array-copy-concurrent-gc/info.txt b/tests/096-array-copy-concurrent-gc/info.txt
new file mode 100644
index 0000000..37dd8be
--- /dev/null
+++ b/tests/096-array-copy-concurrent-gc/info.txt
@@ -0,0 +1,2 @@
+This is a test to verify that System.arraycopy works nice together with
+the concurrent gc.
diff --git a/tests/096-array-copy-concurrent-gc/src/Main.java b/tests/096-array-copy-concurrent-gc/src/Main.java
new file mode 100644
index 0000000..c8e538b
--- /dev/null
+++ b/tests/096-array-copy-concurrent-gc/src/Main.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Running concurrent gc and doing some System.arraycopy
+ * Several threads is created in order to increase the probability
+ * of thread switches at critical points. Without creating several
+ * threads the test case usually passed even when there were bugs.
+ * Size of array and amount of garbage created is based on experimental
+ * numbers and is a tradeoff between time that the test takes when
+ * it succeeds and the probability that the test discovers a problem.
+ */
+public class Main {
+    public static void main(String args[]) {
+        new ObjectCreatorThread(true).start();
+        new ObjectCreatorThread(false).start();
+        new ObjectCreatorThread(false).start();
+    }
+
+    static class ObjectCreatorThread extends Thread {
+        boolean mDoLog;
+        public ObjectCreatorThread(boolean doLog) {
+            mDoLog = doLog;
+        }
+
+        @Override
+        public void run() {
+            new Main().stressArray(mDoLog);
+        }
+    }
+
+    Object [] array = new Object[10000];
+
+    void stressArray(boolean doLog) {
+        // We want many references in the array
+        // We also want elements close to each other to have large
+        // diff in address so lets skip every 2:nd address so it is null
+        if (doLog) {
+            System.out.println("Initializing...");
+        }
+        for (int i = 0; i < array.length; i+=2) {
+            array[i] = new String("Creating some garbage" + i);
+        }
+
+        if (doLog) {
+            System.out.println("Starting the test");
+        }
+
+        for (int j = 0; j < array.length; j++) {
+            Object obj = array[array.length - 1];
+            System.arraycopy(array, 0, array, 1, array.length - 1);
+            array[0] = obj;
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+        }
+
+        for (int j = 0; j < array.length; j++) {
+            Object obj = array[0];
+            System.arraycopy(array, 1, array, 0, array.length - 1);
+            array[array.length - 1] = obj;
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+            new String("Creating some garbage" + Math.random());
+        }
+
+        if (doLog) {
+            System.out.println("Test OK");
+        }
+    }
+}
diff --git a/vm/Android.mk b/vm/Android.mk
index 17b5a04..081acab 100644
--- a/vm/Android.mk
+++ b/vm/Android.mk
@@ -62,6 +62,7 @@
     LOCAL_MODULE := libdvm_assert
     include $(BUILD_SHARED_LIBRARY)
 
+  ifneq ($(dvm_arch),mips)    # MIPS support for self-verification is incomplete
     # Derivation #2
     # Enable assert and self-verification
     include $(LOCAL_PATH)/ReconfigureDvm.mk
@@ -71,6 +72,7 @@
                     -DWITH_SELF_VERIFICATION $(target_smp_flag)
     LOCAL_MODULE := libdvm_sv
     include $(BUILD_SHARED_LIBRARY)
+  endif # dvm_arch!=mips
 
     # Derivation #3
     # Compile out the JIT
diff --git a/vm/Atomic.cpp b/vm/Atomic.cpp
index 98ff7d0..4d376cf 100644
--- a/vm/Atomic.cpp
+++ b/vm/Atomic.cpp
@@ -18,74 +18,40 @@
 
 #include <cutils/atomic.h>
 
-/*
- * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
- *
- * TODO: unify ARMv6/x86/sh implementations using the to-be-written
- * spin lock implementation.  We don't want to rely on mutex innards,
- * and it would be great if all platforms were running the same code.
- */
-
-#if defined(HAVE_MACOSX_IPC)
-
-#include <libkern/OSAtomic.h>
-
-#if defined(__ppc__)        \
-    || defined(__PPC__)     \
-    || defined(__powerpc__) \
-    || defined(__powerpc)   \
-    || defined(__POWERPC__) \
-    || defined(_M_PPC)      \
-    || defined(__PPC)
-#define NEED_QUASIATOMICS 1
-#else
-
-int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
-    volatile int64_t* addr)
-{
-    return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
-            (int64_t*)addr) == 0;
-}
-
-
-static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
-                                               volatile int64_t* addr)
-{
-    int64_t oldValue;
-    do {
-        oldValue = *addr;
-    } while (dvmQuasiAtomicCas64(oldValue, value, addr));
-    return oldValue;
-}
-
-int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
-{
-    return dvmQuasiAtomicSwap64Body(value, addr);
-}
-
-int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
-{
-    int64_t oldValue;
-    ANDROID_MEMBAR_STORE();
-    oldValue = dvmQuasiAtomicSwap64Body(value, addr);
-    /* TUNING: barriers can be avoided on some architectures */
-    ANDROID_MEMBAR_FULL();
-    return oldValue;
-}
-
-int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
-{
-    return OSAtomicAdd64Barrier(0, addr);
-}
+#if defined(__arm__)
+#include <machine/cpu-features.h>
 #endif
 
+/*****************************************************************************/
+
+#if defined(HAVE_MACOSX_IPC)
+#define NEED_MAC_QUASI_ATOMICS 1
+
 #elif defined(__i386__) || defined(__x86_64__)
-#define NEED_QUASIATOMICS 1
+#define NEED_PTHREADS_QUASI_ATOMICS 1
 
-#elif __arm__
-#include <machine/cpu-features.h>
+#elif defined(__mips__)
+#define NEED_PTHREADS_QUASI_ATOMICS 1
 
-#ifdef __ARM_HAVE_LDREXD
+#elif defined(__arm__)
+
+#if defined(__ARM_HAVE_LDREXD)
+#define NEED_ARM_LDREXD_QUASI_ATOMICS 1
+#else
+#define NEED_PTHREADS_QUASI_ATOMICS 1
+#endif /*__ARM_HAVE_LDREXD*/
+
+#elif defined(__sh__)
+#define NEED_PTHREADS_QUASI_ATOMICS 1
+
+#else
+#error "Unsupported atomic operations for this platform"
+#endif
+
+/*****************************************************************************/
+
+#if NEED_ARM_LDREXD_QUASI_ATOMICS
+
 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
                                                volatile int64_t* addr)
 {
@@ -144,37 +110,93 @@
         : "r" (addr));
     return value;
 }
+#endif
 
-#else
+/*****************************************************************************/
 
-// on the device, we implement the 64-bit atomic operations through
-// mutex locking. normally, this is bad because we must initialize
-// a pthread_mutex_t before being able to use it, and this means
-// having to do an initialization check on each function call, and
-// that's where really ugly things begin...
-//
-// BUT, as a special twist, we take advantage of the fact that in our
-// pthread library, a mutex is simply a volatile word whose value is always
-// initialized to 0. In other words, simply declaring a static mutex
-// object initializes it !
-//
+#if NEED_MAC_QUASI_ATOMICS
+
+#include <libkern/OSAtomic.h>
+
+int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
+    volatile int64_t* addr)
+{
+    return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
+            (int64_t*)addr) == 0;
+}
+
+
+static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
+                                               volatile int64_t* addr)
+{
+    int64_t oldValue;
+    do {
+        oldValue = *addr;
+    } while (dvmQuasiAtomicCas64(oldValue, value, addr));
+    return oldValue;
+}
+
+int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
+{
+    return dvmQuasiAtomicSwap64Body(value, addr);
+}
+
+int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
+{
+    int64_t oldValue;
+    ANDROID_MEMBAR_STORE();
+    oldValue = dvmQuasiAtomicSwap64Body(value, addr);
+    /* TUNING: barriers can be avoided on some architectures */
+    ANDROID_MEMBAR_FULL();
+    return oldValue;
+}
+
+int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
+{
+    return OSAtomicAdd64Barrier(0, addr);
+}
+#endif
+
+/*****************************************************************************/
+
+#if NEED_PTHREADS_QUASI_ATOMICS
+
+// In the absence of a better implementation, we implement the 64-bit atomic
+// operations through mutex locking.
+
 // another twist is that we use a small array of mutexes to dispatch
 // the contention locks from different memory addresses
-//
 
 #include <pthread.h>
 
-#define  SWAP_LOCK_COUNT  32U
-static pthread_mutex_t  _swap_locks[SWAP_LOCK_COUNT];
+static const size_t kSwapLockCount = 32;
+static pthread_mutex_t* gSwapLocks[kSwapLockCount];
 
-#define  SWAP_LOCK(addr)   \
-   &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
+void dvmQuasiAtomicsStartup() {
+    for (size_t i = 0; i < kSwapLockCount; ++i) {
+        pthread_mutex_t* m = new pthread_mutex_t;
+        dvmInitMutex(m);
+        gSwapLocks[i] = m;
+    }
+}
 
+void dvmQuasiAtomicsShutdown() {
+    for (size_t i = 0; i < kSwapLockCount; ++i) {
+        pthread_mutex_t* m = gSwapLocks[i];
+        gSwapLocks[i] = NULL;
+        dvmDestroyMutex(m);
+        delete m;
+    }
+}
+
+static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
+    return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
+}
 
 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
 {
     int64_t oldValue;
-    pthread_mutex_t*  lock = SWAP_LOCK(addr);
+    pthread_mutex_t* lock = GetSwapLock(addr);
 
     pthread_mutex_lock(lock);
 
@@ -195,7 +217,7 @@
     volatile int64_t* addr)
 {
     int result;
-    pthread_mutex_t*  lock = SWAP_LOCK(addr);
+    pthread_mutex_t* lock = GetSwapLock(addr);
 
     pthread_mutex_lock(lock);
 
@@ -212,7 +234,7 @@
 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
 {
     int64_t result;
-    pthread_mutex_t*  lock = SWAP_LOCK(addr);
+    pthread_mutex_t* lock = GetSwapLock(addr);
 
     pthread_mutex_lock(lock);
     result = *addr;
@@ -220,107 +242,10 @@
     return result;
 }
 
-#endif /*__ARM_HAVE_LDREXD*/
-
-/*****************************************************************************/
-#elif __sh__
-#define NEED_QUASIATOMICS 1
-
 #else
-#error "Unsupported atomic operations for this platform"
-#endif
 
+// The other implementations don't need any special setup.
+void dvmQuasiAtomicsStartup() {}
+void dvmQuasiAtomicsShutdown() {}
 
-#if NEED_QUASIATOMICS
-
-/* Note that a spinlock is *not* a good idea in general
- * since they can introduce subtle issues. For example,
- * a real-time thread trying to acquire a spinlock already
- * acquired by another thread will never yeld, making the
- * CPU loop endlessly!
- *
- * However, this code is only used on the Linux simulator
- * so it's probably ok for us.
- *
- * The alternative is to use a pthread mutex, but
- * these must be initialized before being used, and
- * then you have the problem of lazily initializing
- * a mutex without any other synchronization primitive.
- *
- * TODO: these currently use sched_yield(), which is not guaranteed to
- * do anything at all.  We need to use dvmIterativeSleep or a wait /
- * notify mechanism if the initial attempt fails.
- */
-
-/* global spinlock for all 64-bit quasiatomic operations */
-static int32_t quasiatomic_spinlock = 0;
-
-int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
-    volatile int64_t* addr)
-{
-    int result;
-
-    while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
-        Sleep(0);
-#else
-        sched_yield();
-#endif
-    }
-
-    if (*addr == oldvalue) {
-        *addr = newvalue;
-        result = 0;
-    } else {
-        result = 1;
-    }
-
-    android_atomic_release_store(0, &quasiatomic_spinlock);
-
-    return result;
-}
-
-int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
-{
-    int64_t result;
-
-    while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
-        Sleep(0);
-#else
-        sched_yield();
-#endif
-    }
-
-    result = *addr;
-    android_atomic_release_store(0, &quasiatomic_spinlock);
-
-    return result;
-}
-
-int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
-{
-    int64_t result;
-
-    while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
-        Sleep(0);
-#else
-        sched_yield();
-#endif
-    }
-
-    result = *addr;
-    *addr = value;
-    android_atomic_release_store(0, &quasiatomic_spinlock);
-
-    return result;
-}
-
-/* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
-int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
-{
-    return dvmQuasiAtomicSwap64(value, addr);
-}
-
-#endif /*NEED_QUASIATOMICS*/
+#endif /*NEED_PTHREADS_QUASI_ATOMICS*/
diff --git a/vm/Atomic.h b/vm/Atomic.h
index 6f7100b..becbeeb 100644
--- a/vm/Atomic.h
+++ b/vm/Atomic.h
@@ -23,6 +23,9 @@
 #include <cutils/atomic.h>          /* use common Android atomic ops */
 #include <cutils/atomic-inline.h>   /* and some uncommon ones */
 
+void dvmQuasiAtomicsStartup();
+void dvmQuasiAtomicsShutdown();
+
 /*
  * NOTE: Two "quasiatomic" operations on the exact same memory address
  * are guaranteed to operate atomically with respect to each other,
diff --git a/vm/Common.h b/vm/Common.h
index 43c7500..1eae2ab 100644
--- a/vm/Common.h
+++ b/vm/Common.h
@@ -102,6 +102,7 @@
 struct Object;
 
 union JValue {
+#if defined(HAVE_LITTLE_ENDIAN)
     u1      z;
     s1      b;
     u2      c;
@@ -111,6 +112,30 @@
     float   f;
     double  d;
     Object* l;
+#endif
+#if defined(HAVE_BIG_ENDIAN)
+    struct {
+        u1    _z[3];
+        u1    z;
+    };
+    struct {
+        s1    _b[3];
+        s1    b;
+    };
+    struct {
+        u2    _c;
+        u2    c;
+    };
+    struct {
+        s2    _s;
+        s2    s;
+    };
+    s4      i;
+    s8      j;
+    float   f;
+    double  d;
+    void*   l;
+#endif
 };
 
 #define OFFSETOF_MEMBER(t, f)         \
diff --git a/vm/Dvm.mk b/vm/Dvm.mk
index 4aa054d..5e34641 100644
--- a/vm/Dvm.mk
+++ b/vm/Dvm.mk
@@ -185,7 +185,7 @@
 	test/TestIndirectRefTable.cpp
 
 # TODO: this is the wrong test, but what's the right one?
-ifeq ($(dvm_arch),arm)
+ifneq ($(filter arm mips,$(dvm_arch)),)
   LOCAL_SRC_FILES += os/android.cpp
 else
   LOCAL_SRC_FILES += os/linux.cpp
@@ -256,6 +256,31 @@
   endif
 endif
 
+ifeq ($(dvm_arch),mips)
+  MTERP_ARCH_KNOWN := true
+  LOCAL_C_INCLUDES += external/libffi/$(TARGET_OS)-$(TARGET_ARCH)
+  LOCAL_SHARED_LIBRARIES += libffi
+  LOCAL_SRC_FILES += \
+		arch/mips/CallO32.S \
+		arch/mips/HintsO32.cpp \
+		arch/generic/Call.cpp \
+		mterp/out/InterpC-mips.cpp \
+		mterp/out/InterpAsm-mips.S
+
+  ifeq ($(WITH_JIT),true)
+    dvm_arch_variant := mips
+    LOCAL_SRC_FILES += \
+		compiler/codegen/mips/RallocUtil.cpp \
+		compiler/codegen/mips/$(dvm_arch_variant)/Codegen.cpp \
+		compiler/codegen/mips/$(dvm_arch_variant)/CallingConvention.S \
+		compiler/codegen/mips/Assemble.cpp \
+		compiler/codegen/mips/ArchUtility.cpp \
+		compiler/codegen/mips/LocalOptimizations.cpp \
+		compiler/codegen/mips/GlobalOptimizations.cpp \
+		compiler/template/out/CompilerTemplateAsm-$(dvm_arch_variant).S
+  endif
+endif
+
 ifeq ($(dvm_arch),x86)
   ifeq ($(dvm_os),linux)
     MTERP_ARCH_KNOWN := true
diff --git a/vm/Init.cpp b/vm/Init.cpp
index 36ac269..96a7c2a 100644
--- a/vm/Init.cpp
+++ b/vm/Init.cpp
@@ -1231,6 +1231,7 @@
     /*
      * Initialize components.
      */
+    dvmQuasiAtomicsStartup();
     if (!dvmAllocTrackerStartup()) {
         return "dvmAllocTrackerStartup failed";
     }
@@ -1719,6 +1720,8 @@
 
     freeAssertionCtrl();
 
+    dvmQuasiAtomicsShutdown();
+
     /*
      * We want valgrind to report anything we forget to free as "definitely
      * lost".  If there's a pointer in the global chunk, it would be reported
diff --git a/vm/Sync.cpp b/vm/Sync.cpp
index 80cc6af..eea5116 100644
--- a/vm/Sync.cpp
+++ b/vm/Sync.cpp
@@ -276,6 +276,11 @@
     size_t len;
     int fd;
 
+    /* When a thread is being destroyed it is normal that the frame depth is zero */
+    if (self->interpSave.curFrame == NULL) {
+        return;
+    }
+
     saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
     meth = saveArea->method;
     cp = eventBuffer;
diff --git a/vm/Thread.cpp b/vm/Thread.cpp
index 5122adf..28182d6 100644
--- a/vm/Thread.cpp
+++ b/vm/Thread.cpp
@@ -3340,7 +3340,10 @@
 
     StringObject* nameObj = (StringObject*)
         dvmGetFieldObject(thread->threadObj, gDvm.offJavaLangThread_name);
-    return dvmCreateCstrFromString(nameObj);
+    char* name = dvmCreateCstrFromString(nameObj);
+    std::string result(name);
+    free(name);
+    return result;
 }
 
 /*
@@ -3431,15 +3434,26 @@
      * The target thread can continue to execute between the two signals.
      * (The first just causes debuggerd to attach to it.)
      */
-    LOGD("threadid=%d: sending two SIGSTKFLTs to threadid=%d (tid=%d) to"
+
+#ifdef SIGSTKFLT
+#define SIG SIGSTKFLT
+#define SIGNAME "SIGSTKFLT"
+#elif defined(SIGEMT)
+#define SIG SIGEMT
+#define SIGNAME "SIGEMT"
+#else
+#error No signal available for dvmNukeThread
+#endif
+
+    LOGD("threadid=%d: sending two " SIGNAME "s to threadid=%d (tid=%d) to"
          " cause debuggerd dump",
         dvmThreadSelf()->threadId, thread->threadId, thread->systemTid);
-    killResult = pthread_kill(thread->handle, SIGSTKFLT);
+    killResult = pthread_kill(thread->handle, SIG);
     if (killResult != 0) {
         LOGD("NOTE: pthread_kill #1 failed: %s", strerror(killResult));
     }
     usleep(2 * 1000 * 1000);    // TODO: timed-wait until debuggerd attaches
-    killResult = pthread_kill(thread->handle, SIGSTKFLT);
+    killResult = pthread_kill(thread->handle, SIG);
     if (killResult != 0) {
         LOGD("NOTE: pthread_kill #2 failed: %s", strerror(killResult));
     }
diff --git a/vm/Thread.h b/vm/Thread.h
index 7f14ce5..9fa2e03 100644
--- a/vm/Thread.h
+++ b/vm/Thread.h
@@ -92,7 +92,7 @@
 #ifndef DVM_NO_ASM_INTERP
         void* curHandlerTable;
 #else
-        void* unused;
+        int32_t    unused1;
 #endif
     } ctl;
 };
diff --git a/vm/alloc/HeapSource.cpp b/vm/alloc/HeapSource.cpp
index f61724a..6552680 100644
--- a/vm/alloc/HeapSource.cpp
+++ b/vm/alloc/HeapSource.cpp
@@ -42,10 +42,10 @@
 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
 #define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
 
-/* Number of seconds to wait after a GC before performing a heap trim
+/* How long to wait after a GC before performing a heap trim
  * operation to reclaim unused pages.
  */
-#define HEAP_TRIM_IDLE_TIME_SECONDS 5
+#define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
 
 /* Start a concurrent collection when free memory falls under this
  * many bytes.
@@ -410,7 +410,7 @@
         bool trim = false;
         if (gHs->gcThreadTrimNeeded) {
             int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
-                    HEAP_TRIM_IDLE_TIME_SECONDS, 0);
+                    HEAP_TRIM_IDLE_TIME_MS, 0);
             if (result == ETIMEDOUT) {
                 /* Timed out waiting for a GC request, schedule a heap trim. */
                 trim = true;
diff --git a/vm/analysis/CodeVerify.cpp b/vm/analysis/CodeVerify.cpp
index 7656ce9..1149307 100644
--- a/vm/analysis/CodeVerify.cpp
+++ b/vm/analysis/CodeVerify.cpp
@@ -272,7 +272,7 @@
         /* chk: 0  1  Z  y  Y  h  H  c  i  b  B  s  S  C  I  F */
         { /*0*/ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
         { /*1*/ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-        { /*Z*/ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+        { /*Z*/ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
         { /*y*/ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
         { /*Y*/ 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1 },
         { /*h*/ 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1 },
diff --git a/vm/arch/generic/Call.cpp b/vm/arch/generic/Call.cpp
index c23e7c8..28783cb 100644
--- a/vm/arch/generic/Call.cpp
+++ b/vm/arch/generic/Call.cpp
@@ -47,6 +47,14 @@
     }
 }
 
+/* We will call this generic function if there are no hints */
+#ifdef __mips__
+#define dvmPlatformInvoke dvmPlatformInvokeFFI
+
+extern "C" void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo,
+    int argc, const u4* argv, const char* signature, void* func, JValue* pResult);
+#endif
+
 /*
  * Call "func" with the specified arguments.
  *
diff --git a/vm/arch/mips/CallO32.S b/vm/arch/mips/CallO32.S
new file mode 100644
index 0000000..e436d1e
--- /dev/null
+++ b/vm/arch/mips/CallO32.S
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * JNI method invocation.  This is used to call a C/C++ JNI method.  The
+ * argument list has to be pushed onto the native stack according to
+ * local calling conventions.
+ *
+ * This version supports the MIPS O32 ABI.
+ */
+
+/*
+Function prototype:
+
+void dvmPlatformInvoke(void* pEnv, ClassObject* clazz, int argInfo, int argc,
+    const u4* argv, const char* signature, void* func, JValue* pReturn)
+
+The method we are calling has the form:
+
+  return_type func(JNIEnv* pEnv, ClassObject* clazz, ...)
+    -or-
+  return_type func(JNIEnv* pEnv, Object* this, ...)
+
+We receive a collection of 32-bit values which correspond to arguments from
+the interpreter (e.g. float occupies one, double occupies two).  It's up to
+us to convert these into local calling conventions.
+
+Please notice that argc in dvmPlatformInvoke does NOT include pEnv and clazz/this.
+*/
+
+    .text
+    .align  2
+    .globl dvmPlatformInvoke
+    .ent dvmPlatformInvoke
+/*
+ * On entry:
+ *   a0  JNIEnv (can be left alone)
+ *   a1  clazz (NULL for virtual method calls, non-NULL for static)
+ *   a2  argInfo
+ *   a3  argc (number of 32-bit values in argv)
+ *   MIPS reservers 16 bytes on stack even if the first 4 args are passed by
+ *   reg a0-a3. That's different from ARM.
+ *   [sp + 16]  argv
+ *   [sp + 20]  short signature
+ *   [sp + 24]  func
+ *   [sp + 28]  pReturn
+ *
+ * For a virtual method call, the "this" reference is in argv[0].
+ *
+ * argInfo (32-bit int) layout:
+ *   SRRRLLLL FFFFFFFF FFFFFFFF FFFFFFFF
+ *
+ *   S - if set, do things the hard way (scan the signature)
+ *   R - return type enumeration, really only important for hardware FP
+ *   L - number of double-words (64 bits!) of storage required on stack (0-30 words)
+ *   F - pad flag -- if set, write a pad word to the stack
+ *
+ * With this arrangement we can efficiently push up to 24 words of arguments
+ * onto the stack.  Anything requiring more than that -- which should happen
+ * rarely to never -- can do the slow signature scan.
+ *
+ * (We could pack the Fs more efficiently -- we know we never push two pads
+ * in a row, and the first word can never be a pad -- but there's really
+ * no need for it.)
+ *
+ * NOTE: if the called function has more than 4 words of arguments, gdb
+ * will not be able to unwind the stack past this method.  The only way
+ * around this is to convince gdb to respect an explicit frame pointer.
+ */
+
+ /* Stack:
+  *                     High
+  *                 ____________
+  *                 |__28______| pReturn
+  *                 |__24______| func
+  *                 |__20______| short signature
+  *                 |__16______| argv
+  *                 |__12______| reserved (a3: argc)
+  *                 |__8_______| reserved (a2: arg)
+  *                 |__4_______| reserved (a1: clazz)
+  *__sp on entry_->_|__0_______|_reserved (a0: JNIenv)
+  *                 |__________| saved ra
+  *                 |__________| saved fp
+  *                 |__________| saved s0
+  *                 |__________| spare
+  *                 |__________| saved s2
+  *"framepointer"->_|__________| pad for 8 bytes aligned
+  *                 |__________| other argv or pad
+  *                 |__________| other argv or pad
+  *                 |__________| other argv or pad
+  *                 |__________| other argv or pad
+  *                 |__________| other argv or pad
+  *                 |__________| other argv or pad
+  *                 |__________| reserved for a3
+  *                 |__________| reserved for a2
+  *                 |__________| reserved for a1
+  *_____new sp___-> |__________| reserved for a0
+  * (new sp: sp when call native method)
+  */
+
+ /* Register usage:
+  *
+  *  s0: pReturn
+  *  s2: Return type
+  * These registers should be saved to and restored from stack.
+  *
+  *  t0: argv
+  *  t9: func
+  * These registers do not need to be saved.
+  *
+  * We put the stack size into register s1 because we can not know the size
+  * of stack at the beginning. This size can be calculated with the help
+  * of hints in jniarginfo.
+  *
+  */
+
+dvmPlatformInvoke:
+	.set noreorder
+	.cpload $t9
+	.set reorder
+
+	/*  Do we have arg padding flags in "argInfo"? Check bit 31 */
+	bltz	$a2,.Lno_arginfo
+
+	/* Fast path. We have hints. */
+	/* save fp and ra to stack */
+#define FSIZE 24
+	subu	$sp,FSIZE
+	sw	$ra,20($sp)
+	sw	$fp,16($sp)
+	sw	$s0,12($sp)
+	sw	$s2,4($sp)
+	move	$fp,$sp
+
+	lw	$t0,FSIZE+16($sp)	/* t0 <- argv */
+	lw	$t9,FSIZE+24($sp)	/* t9 <- func */
+	lw	$s0,FSIZE+28($sp)	/* s0 <- pReturn */
+
+	/* Is the method static? */
+	bnez	$a1,1f
+	/* Not static: a1 <- *argv++ ("this"), argc-- */
+	lw	$a1,($t0)
+	addiu	$t0,4
+	addiu	$a3,-1
+1:
+	/* expand the stack for args */
+	srl	$s2,$a2,28	/* s2 <- returnType */
+	srl	$t1,$a2,21
+	andi	$t1,0x78	/* t1 <- stackSize in bytes */
+
+	addiu	$t1,16		/* include space for a0/a1/a2/a3 */
+	subu	$sp,$t1
+	addiu	$t1,$sp,8
+
+	/*
+	 * t0 :argv
+	 * t1 :sp+8(first arg position in stack except pEnv and clazz/this)
+	 * a2 :argInfo
+	 * a3 :argc
+	 * sp :new stack bottom
+	 */
+
+	/* first two args or one args and pad */
+	blez	$a3,.Largs_done
+	lw	$t2,($t0)
+	addiu	$t0,4
+	addiu	$a3,-1
+	sw	$t2,($t1)
+	addiu	$t1,4
+	srl	$a2,1
+	blez	$a3,.Largs_done
+
+	andi	$t3,$a2,0x1	/* the second position is a pad? */
+	bnez	$t3,.Lpad0
+
+	lw	$t2,($t0)
+	addiu	$t0,4
+	addiu	$a3,-1
+	sw	$t2,($t1)
+.Lpad0:
+	addiu	$t1,4
+	srl	$a2,1
+	blez	$a3,.Largs_done
+
+.Lloop1:
+	/* copy other args
+	 * $fp: sp top for args
+	 * $t1: sp for next arg
+	 */
+	beq	$t1,$fp,.Largs_done
+	andi	$t3,$a2,0x1
+	srl	$a2,1
+	bnez	$t3,.Lpad
+	lw	$t2,($t0)
+	addiu	$t0,4
+	sw	$t2,($t1)
+.Lpad:
+	addiu	$t1,4
+	b	.Lloop1
+
+.Largs_done:
+
+	/*
+	 * We have copied args into stacks. Then copy argv[0]/argv[1] into
+	 * reg a2/a3. You may find that if argv[0] is 32 bits and argv[1]
+	 * is 64 bits, then we do not need to set reg a3 since it is a pad.
+	 * However, copy a3 from argv is harmless. We do not need to set
+	 * a0(pEnv)/a1(clazz/this) since they are already there.
+	 */
+
+	/*
+	 * sp: new stack
+	 * s0: pReturn
+	 * s2: Return type
+	 *
+	 */
+	lw	$a2,8($sp)
+	lw	$a3,12($sp)
+
+	/* Linux/PIC needs $t9 points to function address.
+	 * call the function
+	 */
+	jalr $t9
+
+	/* function call return */
+	/* 1. check the return type
+	 * 2. if the return type is not DALVIK_JNI_RETURN_VOID then copy v0/v1
+	 *    to pReturn
+	 */
+	beqz	$s2,.Lend	/* don't set result if return type is void */
+
+#ifdef __mips_hard_float
+	mfc1	$t0,$f0		/* Get float ($f0) or double ($f1$f0) result */
+	mfc1	$t1,$f1
+	sltiu	$t2,$s2,3	/* set t2 if return type is float or double */
+#ifdef HAVE_LITTLE_ENDIAN
+        /* Note: for little endian, the double result is in $v1:$v0 and float result is in $v0 */
+	movn	$v0,$t0,$t2	/* If the result type is float or double overwrite $v1/$v0 */
+	movn	$v1,$t1,$t2
+#else
+        /* Note: for big endian, the double result is in $v0:$v1 and float result is in $v0 */
+	movn	$v1,$t0,$t2	/* If the result type is float or double overwrite $v0/$v1 */
+	movn	$v0,$t1,$t2
+	sltiu	$t3,$s2,2	/* set t3 if return type is float */
+	movn	$v0,$t0,$t3	/* If the result type is float overwrite $v0 */
+#endif
+#endif
+
+	/* Store the result */
+	sw	$v0,0($s0)
+	sw	$v1,4($s0)
+
+.Lend:
+	/* restore saved registers */
+	move	$sp,$fp
+	lw	$ra,20($sp)
+	lw	$fp,16($sp)
+	lw	$s0,12($sp)
+	lw	$s2,4($sp)
+	addiu	$sp,FSIZE
+	jr	$ra
+
+/* Slow path - just tail call the generic routine */
+.Lno_arginfo:
+
+	la $t9,dvmPlatformInvokeFFI
+	j $t9
+
+.end dvmPlatformInvoke
diff --git a/vm/arch/mips/HintsO32.cpp b/vm/arch/mips/HintsO32.cpp
new file mode 100644
index 0000000..77fdfd4
--- /dev/null
+++ b/vm/arch/mips/HintsO32.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * JNI method invocation.  This is used to call a C/C++ JNI method.  The
+ * argument list has to be pushed onto the native stack according to
+ * local calling conventions.
+ *
+ * This version supports the MIPS O32 ABI.
+ */
+
+/* TODO: this is candidate for consolidation of similar code from ARM. */
+
+#include "Dalvik.h"
+#include "libdex/DexClass.h"
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <sys/stat.h>
+
+
+/*
+ * The class loader will associate with each method a 32-bit info word
+ * (jniArgInfo) to support JNI calls.  The high order 4 bits of this word
+ * are the same for all targets, while the lower 28 are used for hints to
+ * allow accelerated JNI bridge transfers.
+ *
+ * jniArgInfo (32-bit int) layout:
+ *
+ *    SRRRHHHH HHHHHHHH HHHHHHHH HHHHHHHH
+ *
+ *    S - if set, ignore the hints and do things the hard way (scan signature)
+ *    R - return-type enumeration
+ *    H - target-specific hints (see below for details)
+ *
+ * This function produces mips-specific hints - specifically a description
+ * of padding required to keep all 64-bit parameters properly aligned.
+ *
+ * MIPS JNI hint format(Same as ARM)
+ *
+ *       LLLL FFFFFFFF FFFFFFFF FFFFFFFF
+ *
+ *   L - number of double-words of storage required on the stack (0-30 words)
+ *   F - pad flag -- if set, the stack increases 8 bytes, else the stack increases 4 bytes
+ *                   after copying 32 bits args into stack. (little different from ARM)
+ *
+ * If there are too many arguments to construct valid hints, this function will
+ * return a result with the S bit set.
+ */
+u4 dvmPlatformInvokeHints(const DexProto* proto)
+{
+
+    const char* sig = dexProtoGetShorty(proto);
+    int padFlags, jniHints;
+    char sigByte;
+    int stackOffset, padMask, hints;
+
+    stackOffset = padFlags = 0;
+    padMask = 0x00000001;
+
+    /* Skip past the return type */
+    sig++;
+
+    while (true) {
+        sigByte = *(sig++);
+
+        if (sigByte == '\0')
+            break;
+
+        if (sigByte == 'D' || sigByte == 'J') {
+            if ((stackOffset & 1) != 0) {
+                padFlags |= padMask;
+                stackOffset++;
+                padMask <<= 1;
+            }
+            stackOffset += 2;
+            padMask <<= 2;
+        } else {
+            stackOffset++;
+            padMask <<= 1;
+        }
+    }
+
+    jniHints = 0;
+
+    if (stackOffset > DALVIK_JNI_COUNT_SHIFT) {
+        /* too big for "fast" version */
+        jniHints = DALVIK_JNI_NO_ARG_INFO;
+    } else {
+        assert((padFlags & (0xffffffff << DALVIK_JNI_COUNT_SHIFT)) == 0);
+        /*
+         * StackOffset includes the space for a2/a3. However we have reserved
+         * 16 bytes on stack in CallO32.S, so we should subtract 2 from stackOffset.
+         */
+        stackOffset -= 2;
+        if (stackOffset < 0)
+            stackOffset = 0;
+        jniHints |= ((stackOffset+1) / 2) << DALVIK_JNI_COUNT_SHIFT;
+        jniHints |= padFlags;
+    }
+
+    return jniHints;
+}
diff --git a/vm/compiler/Compiler.cpp b/vm/compiler/Compiler.cpp
index c08d42d..83ed5b8 100644
--- a/vm/compiler/Compiler.cpp
+++ b/vm/compiler/Compiler.cpp
@@ -313,9 +313,9 @@
      * Wipe out the code cache content to force immediate crashes if
      * stale JIT'ed code is invoked.
      */
-    memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
-           0,
-           gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
+    dvmCompilerCacheClear((char *) gDvmJit.codeCache + gDvmJit.templateSize,
+                          gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
+
     dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
                           (intptr_t) gDvmJit.codeCache +
                           gDvmJit.codeCacheByteUsed, 0);
diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h
index fc23254..f844e3c 100644
--- a/vm/compiler/Compiler.h
+++ b/vm/compiler/Compiler.h
@@ -74,7 +74,8 @@
     DALVIK_JIT_ARM,
     DALVIK_JIT_THUMB,
     DALVIK_JIT_THUMB2,
-    DALVIK_JIT_IA32
+    DALVIK_JIT_IA32,
+    DALVIK_JIT_MIPS
 } JitInstructionSetType;
 
 /* Description of a compiled trace. */
@@ -107,6 +108,9 @@
 /* Chain cell for predicted method invocation */
 typedef struct PredictedChainingCell {
     u4 branch;                  /* Branch to chained destination */
+#ifdef __mips__
+    u4 delay_slot;              /* nop goes here */
+#endif
     const ClassObject *clazz;   /* key for prediction */
     const Method *method;       /* to lookup native PC from dalvik PC */
     const ClassObject *stagedClazz;   /* possible next key for prediction */
diff --git a/vm/compiler/CompilerUtility.h b/vm/compiler/CompilerUtility.h
index 8a674b8..ccf22a7 100644
--- a/vm/compiler/CompilerUtility.h
+++ b/vm/compiler/CompilerUtility.h
@@ -74,6 +74,7 @@
                            const BitVector *bv, int length);
 void dvmGetBlockName(struct BasicBlock *bb, char *name);
 int dvmCompilerCacheFlush(long start, long end, long flags);
+void dvmCompilerCacheClear(char *start, size_t size);
 
 
 #endif  // DALVIK_COMPILER_UTILITY_H_
diff --git a/vm/compiler/Frontend.cpp b/vm/compiler/Frontend.cpp
index 24ec6c7..723de86 100644
--- a/vm/compiler/Frontend.cpp
+++ b/vm/compiler/Frontend.cpp
@@ -536,7 +536,8 @@
 /* Split an existing block from the specified code offset into two */
 static BasicBlock *splitBlock(CompilationUnit *cUnit,
                               unsigned int codeOffset,
-                              BasicBlock *origBlock)
+                              BasicBlock *origBlock,
+                              BasicBlock **immedPredBlockP)
 {
     MIR *insn = origBlock->firstMIRInsn;
     while (insn) {
@@ -598,16 +599,29 @@
 
     insn->prev->next = NULL;
     insn->prev = NULL;
+
+    /*
+     * Update the immediate predecessor block pointer so that outgoing edges
+     * can be applied to the proper block.
+     */
+    if (immedPredBlockP) {
+        assert(*immedPredBlockP == origBlock);
+        *immedPredBlockP = bottomBlock;
+    }
     return bottomBlock;
 }
 
 /*
  * Given a code offset, find out the block that starts with it. If the offset
- * is in the middle of an existing block, split it into two.
+ * is in the middle of an existing block, split it into two. If immedPredBlockP
+ * is non-null and is the block being split, update *immedPredBlockP to point
+ * to the bottom block so that outgoing edges can be setup properly (by the
+ * caller).
  */
 static BasicBlock *findBlock(CompilationUnit *cUnit,
                              unsigned int codeOffset,
-                             bool split, bool create)
+                             bool split, bool create,
+                             BasicBlock **immedPredBlockP)
 {
     GrowableList *blockList = &cUnit->blockList;
     BasicBlock *bb;
@@ -621,7 +635,9 @@
         if ((split == true) && (codeOffset > bb->startOffset) &&
             (bb->lastMIRInsn != NULL) &&
             (codeOffset <= bb->lastMIRInsn->offset)) {
-            BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb);
+            BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb,
+                                           bb == *immedPredBlockP ?
+                                               immedPredBlockP : NULL);
             return newBB;
         }
     }
@@ -898,7 +914,9 @@
                       /* split */
                       false,
                       /* create */
-                      true);
+                      true,
+                      /* immedPredBlockP */
+                      NULL);
         }
 
         offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
@@ -942,7 +960,9 @@
                                        /* split */
                                        true,
                                        /* create */
-                                       true);
+                                       true,
+                                       /* immedPredBlockP */
+                                       &curBlock);
     curBlock->taken = takenBlock;
     dvmCompilerSetBit(takenBlock->predecessors, curBlock->id);
 
@@ -964,7 +984,9 @@
                                                   */
                                                  true,
                                                  /* create */
-                                                 true);
+                                                 true,
+                                                 /* immedPredBlockP */
+                                                 &curBlock);
         curBlock->fallThrough = fallthroughBlock;
         dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
     } else if (codePtr < codeEnd) {
@@ -974,7 +996,9 @@
                       /* split */
                       false,
                       /* create */
-                      true);
+                      true,
+                      /* immedPredBlockP */
+                      NULL);
         }
     }
 }
@@ -1038,7 +1062,9 @@
                                           /* split */
                                           true,
                                           /* create */
-                                          true);
+                                          true,
+                                          /* immedPredBlockP */
+                                          &curBlock);
         SuccessorBlockInfo *successorBlockInfo =
             (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
                                                   false);
@@ -1056,7 +1082,9 @@
                                              /* split */
                                              false,
                                              /* create */
-                                             true);
+                                             true,
+                                             /* immedPredBlockP */
+                                             NULL);
     curBlock->fallThrough = fallthroughBlock;
     dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
 }
@@ -1099,7 +1127,9 @@
                                                /* split */
                                                false,
                                                /* create */
-                                               false);
+                                               false,
+                                               /* immedPredBlockP */
+                                               NULL);
 
             SuccessorBlockInfo *successorBlockInfo =
               (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
@@ -1133,7 +1163,9 @@
                                                      /* split */
                                                      false,
                                                      /* create */
-                                                     true);
+                                                     true,
+                                                     /* immedPredBlockP */
+                                                     NULL);
             /*
              * OP_THROW and OP_THROW_VERIFICATION_ERROR are unconditional
              * branches.
@@ -1251,7 +1283,9 @@
                               /* split */
                               false,
                               /* create */
-                              true);
+                              true,
+                              /* immedPredBlockP */
+                              NULL);
                 }
             }
         } else if (flags & kInstrCanThrow) {
@@ -1265,7 +1299,9 @@
                                           /* split */
                                           false,
                                           /* create */
-                                          false);
+                                          false,
+                                          /* immedPredBlockP */
+                                          NULL);
         if (nextBlock) {
             /*
              * The next instruction could be the target of a previously parsed
@@ -1405,7 +1441,9 @@
                                           /* split */
                                           false,
                                           /* create */
-                                          false);
+                                          false,
+                                          /* immedPredBlockP */
+                                          NULL);
         if (nextBlock) {
             /*
              * The next instruction could be the target of a previously parsed
diff --git a/vm/compiler/Loop.cpp b/vm/compiler/Loop.cpp
index 90c97d7..301df7a 100644
--- a/vm/compiler/Loop.cpp
+++ b/vm/compiler/Loop.cpp
@@ -678,7 +678,7 @@
     cUnit->loopAnalysis = loopAnalysis;
 
     /* Constant propagation */
-    cUnit->isConstantV = dvmAllocBitVector(cUnit->numSSARegs, false);
+    cUnit->isConstantV = dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
     cUnit->constantValues =
         (int *)dvmCompilerNew(sizeof(int) * cUnit->numSSARegs,
                               true);
@@ -692,7 +692,7 @@
     loopAnalysis->ivList =
         (GrowableList *)dvmCompilerNew(sizeof(GrowableList), true);
     dvmInitGrowableList(loopAnalysis->ivList, 4);
-    loopAnalysis->isIndVarV = dvmAllocBitVector(cUnit->numSSARegs, false);
+    loopAnalysis->isIndVarV = dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
     dvmCompilerDataFlowAnalysisDispatcher(cUnit,
                                           dvmCompilerFindInductionVariables,
                                           kAllNodes,
diff --git a/vm/compiler/codegen/arm/ArchUtility.cpp b/vm/compiler/codegen/arm/ArchUtility.cpp
index 0bbb875..a1cb741 100644
--- a/vm/compiler/codegen/arm/ArchUtility.cpp
+++ b/vm/compiler/codegen/arm/ArchUtility.cpp
@@ -425,3 +425,10 @@
 {
     return cacheflush(start, end, flags);
 }
+
+/* Target-specific cache clearing */
+void dvmCompilerCacheClear(char *start, size_t size)
+{
+    /* 0 is an invalid opcode for arm. */
+    memset(start, 0, size);
+}
diff --git a/vm/compiler/codegen/mips/ArchUtility.cpp b/vm/compiler/codegen/mips/ArchUtility.cpp
new file mode 100644
index 0000000..df7d008
--- /dev/null
+++ b/vm/compiler/codegen/mips/ArchUtility.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../CompilerInternals.h"
+#include "libdex/DexOpcodes.h"
+#include "MipsLIR.h"
+
+/* For dumping instructions */
+#define MIPS_REG_COUNT 32
+static const char *mipsRegName[MIPS_REG_COUNT] = {
+    "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+    "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+    "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+    "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+static void buildInsnString(const char *fmt, MipsLIR *lir, char* buf,
+                            unsigned char *baseAddr, int size)
+{
+    int i;
+    char *bufEnd = &buf[size-1];
+    const char *fmtEnd = &fmt[strlen(fmt)];
+    char tbuf[256];
+    char nc;
+    while (fmt < fmtEnd) {
+        int operand;
+        if (*fmt == '!') {
+            fmt++;
+            assert(fmt < fmtEnd);
+            nc = *fmt++;
+            if (nc=='!') {
+                strcpy(tbuf, "!");
+            } else {
+               assert(fmt < fmtEnd);
+               assert((unsigned)(nc-'0') < 4);
+               operand = lir->operands[nc-'0'];
+               switch(*fmt++) {
+                   case 'b':
+                       strcpy(tbuf,"0000");
+                       for (i=3; i>= 0; i--) {
+                           tbuf[i] += operand & 1;
+                           operand >>= 1;
+                       }
+                       break;
+                   case 's':
+                       sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+                       break;
+                   case 'S':
+		       assert(((operand & FP_REG_MASK) & 1) == 0);
+                       sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+                       break;
+                   case 'h':
+                       sprintf(tbuf,"%04x", operand);
+                       break;
+                   case 'M':
+                   case 'd':
+                       sprintf(tbuf,"%d", operand);
+                       break;
+                   case 'D':
+                       sprintf(tbuf,"%d", operand+1);
+                       break;
+                   case 'E':
+                       sprintf(tbuf,"%d", operand*4);
+                       break;
+                   case 'F':
+                       sprintf(tbuf,"%d", operand*2);
+                       break;
+                   case 'c':
+                       switch (operand) {
+                           case kMipsCondEq:
+                               strcpy(tbuf, "eq");
+                               break;
+                           case kMipsCondNe:
+                               strcpy(tbuf, "ne");
+                               break;
+                           case kMipsCondLt:
+                               strcpy(tbuf, "lt");
+                               break;
+                           case kMipsCondGe:
+                               strcpy(tbuf, "ge");
+                               break;
+                           case kMipsCondGt:
+                               strcpy(tbuf, "gt");
+                               break;
+                           case kMipsCondLe:
+                               strcpy(tbuf, "le");
+                               break;
+                           case kMipsCondCs:
+                               strcpy(tbuf, "cs");
+                               break;
+                           case kMipsCondMi:
+                               strcpy(tbuf, "mi");
+                               break;
+                           default:
+                               strcpy(tbuf, "");
+                               break;
+                       }
+                       break;
+                   case 't':
+                       sprintf(tbuf,"0x%08x (L%p)",
+                               (int) baseAddr + lir->generic.offset + 4 +
+                               (operand << 2),
+                               lir->generic.target);
+                       break;
+                   case 'T':
+                       sprintf(tbuf,"0x%08x",
+                               (int) (operand << 2));
+                       break;
+                   case 'u': {
+                       int offset_1 = lir->operands[0];
+                       int offset_2 = NEXT_LIR(lir)->operands[0];
+                       intptr_t target =
+                           ((((intptr_t) baseAddr + lir->generic.offset + 4) &
+                            ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
+                           0xfffffffc;
+                       sprintf(tbuf, "%p", (void *) target);
+                       break;
+                    }
+
+                   /* Nothing to print for BLX_2 */
+                   case 'v':
+                       strcpy(tbuf, "see above");
+                       break;
+                   case 'r':
+                       assert(operand >= 0 && operand < MIPS_REG_COUNT);
+                       strcpy(tbuf, mipsRegName[operand]);
+                       break;
+                   default:
+                       strcpy(tbuf,"DecodeError");
+                       break;
+               }
+               if (buf+strlen(tbuf) <= bufEnd) {
+                   strcpy(buf, tbuf);
+                   buf += strlen(tbuf);
+               } else {
+                   break;
+               }
+            }
+        } else {
+           *buf++ = *fmt++;
+        }
+        if (buf == bufEnd)
+            break;
+    }
+    *buf = 0;
+}
+
+void dvmDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
+{
+    char buf[256];
+    buf[0] = 0;
+    MipsLIR *mipsLIR = (MipsLIR *) lir;
+
+    if (mask == ENCODE_ALL) {
+        strcpy(buf, "all");
+    } else {
+        char num[8];
+        int i;
+
+        for (i = 0; i < kRegEnd; i++) {
+            if (mask & (1ULL << i)) {
+                sprintf(num, "%d ", i);
+                strcat(buf, num);
+            }
+        }
+
+        if (mask & ENCODE_CCODE) {
+            strcat(buf, "cc ");
+        }
+        if (mask & ENCODE_FP_STATUS) {
+            strcat(buf, "fpcc ");
+        }
+        /* Memory bits */
+        if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
+            sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
+                    (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+        }
+        if (mask & ENCODE_LITERAL) {
+            strcat(buf, "lit ");
+        }
+
+        if (mask & ENCODE_HEAP_REF) {
+            strcat(buf, "heap ");
+        }
+        if (mask & ENCODE_MUST_NOT_ALIAS) {
+            strcat(buf, "noalias ");
+        }
+    }
+    if (buf[0]) {
+        LOGD("%s: %s", prefix, buf);
+    }
+}
+
+/*
+ * Debugging macros
+ */
+#define DUMP_RESOURCE_MASK(X)
+#define DUMP_SSA_REP(X)
+
+/* Pretty-print a LIR instruction */
+void dvmDumpLIRInsn(LIR *arg, unsigned char *baseAddr)
+{
+    MipsLIR *lir = (MipsLIR *) arg;
+    char buf[256];
+    char opName[256];
+    int offset = lir->generic.offset;
+    int dest = lir->operands[0];
+    const bool dumpNop = false;
+
+    /* Handle pseudo-ops individually, and all regular insns as a group */
+    switch(lir->opcode) {
+        case kMipsChainingCellBottom:
+            LOGD("-------- end of chaining cells (0x%04x)", offset);
+            break;
+        case kMipsPseudoBarrier:
+            LOGD("-------- BARRIER");
+            break;
+        case kMipsPseudoExtended:
+            /* intentional fallthrough */
+        case kMipsPseudoSSARep:
+            DUMP_SSA_REP(LOGD("-------- %s", (char *) dest));
+            break;
+        case kMipsPseudoChainingCellBackwardBranch:
+            LOGD("L%p:", lir);
+            LOGD("-------- chaining cell (backward branch): 0x%04x", dest);
+            break;
+        case kMipsPseudoChainingCellNormal:
+            LOGD("L%p:", lir);
+            LOGD("-------- chaining cell (normal): 0x%04x", dest);
+            break;
+        case kMipsPseudoChainingCellHot:
+            LOGD("L%p:", lir);
+            LOGD("-------- chaining cell (hot): 0x%04x", dest);
+            break;
+        case kMipsPseudoChainingCellInvokePredicted:
+            LOGD("L%p:", lir);
+            LOGD("-------- chaining cell (predicted): %s%s",
+                 dest ? ((Method *) dest)->clazz->descriptor : "",
+                 dest ? ((Method *) dest)->name : "N/A");
+            break;
+        case kMipsPseudoChainingCellInvokeSingleton:
+            LOGD("L%p:", lir);
+            LOGD("-------- chaining cell (invoke singleton): %s%s/%p",
+                 ((Method *)dest)->clazz->descriptor,
+                 ((Method *)dest)->name,
+                 ((Method *)dest)->insns);
+            break;
+        case kMipsPseudoEntryBlock:
+            LOGD("-------- entry offset: 0x%04x", dest);
+            break;
+        case kMipsPseudoDalvikByteCodeBoundary:
+            LOGD("-------- dalvik offset: 0x%04x @ %s", dest,
+                 (char *) lir->operands[1]);
+            break;
+        case kMipsPseudoExitBlock:
+            LOGD("-------- exit offset: 0x%04x", dest);
+            break;
+        case kMipsPseudoPseudoAlign4:
+            LOGD("%p (%04x): .align4", baseAddr + offset, offset);
+            break;
+        case kMipsPseudoPCReconstructionCell:
+            LOGD("L%p:", lir);
+            LOGD("-------- reconstruct dalvik PC : 0x%04x @ +0x%04x", dest,
+                 lir->operands[1]);
+            break;
+        case kMipsPseudoPCReconstructionBlockLabel:
+            /* Do nothing */
+            break;
+        case kMipsPseudoEHBlockLabel:
+            LOGD("Exception_Handling:");
+            break;
+        case kMipsPseudoTargetLabel:
+        case kMipsPseudoNormalBlockLabel:
+            LOGD("L%p:", lir);
+            break;
+        default:
+            if (lir->flags.isNop && !dumpNop) {
+                break;
+            }
+            buildInsnString(EncodingMap[lir->opcode].name, lir, opName,
+                            baseAddr, 256);
+            buildInsnString(EncodingMap[lir->opcode].fmt, lir, buf, baseAddr,
+                            256);
+            LOGD("%p (%04x): %08x %-9s%s%s",
+                 baseAddr + offset, offset, *(u4 *)(baseAddr + offset), opName, buf,
+                 lir->flags.isNop ? "(nop)" : "");
+            break;
+    }
+
+    if (lir->useMask && (!lir->flags.isNop || dumpNop)) {
+        DUMP_RESOURCE_MASK(dvmDumpResourceMask((LIR *) lir,
+                                               lir->useMask, "use"));
+    }
+    if (lir->defMask && (!lir->flags.isNop || dumpNop)) {
+        DUMP_RESOURCE_MASK(dvmDumpResourceMask((LIR *) lir,
+                                               lir->defMask, "def"));
+    }
+}
+
+/* Dump instructions and constant pool contents */
+void dvmCompilerCodegenDump(CompilationUnit *cUnit)
+{
+    LOGD("Dumping LIR insns");
+    LIR *lirInsn;
+    MipsLIR *mipsLIR;
+
+    LOGD("installed code is at %p", cUnit->baseAddr);
+    LOGD("total size is %d bytes", cUnit->totalSize);
+    for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
+        dvmDumpLIRInsn(lirInsn, (unsigned char *) cUnit->baseAddr);
+    }
+    for (lirInsn = cUnit->classPointerList; lirInsn; lirInsn = lirInsn->next) {
+        mipsLIR = (MipsLIR *) lirInsn;
+        LOGD("%p (%04x): .class (%s)",
+             (char*)cUnit->baseAddr + mipsLIR->generic.offset,
+             mipsLIR->generic.offset,
+             ((CallsiteInfo *) mipsLIR->operands[0])->classDescriptor);
+    }
+    for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
+        mipsLIR = (MipsLIR *) lirInsn;
+        LOGD("%p (%04x): .word (%#x)",
+             (char*)cUnit->baseAddr + mipsLIR->generic.offset,
+             mipsLIR->generic.offset,
+             mipsLIR->operands[0]);
+    }
+}
+
+/* Target-specific cache flushing */
+int dvmCompilerCacheFlush(long start, long end, long flags)
+{
+    return cacheflush(start, end, flags);
+}
+
+/* Target-specific cache clearing */
+void dvmCompilerCacheClear(char *start, size_t size)
+{
+    /* 0x66 is an invalid opcode for mips. */
+    memset(start, 0x66, size);
+}
diff --git a/vm/compiler/codegen/mips/Assemble.cpp b/vm/compiler/codegen/mips/Assemble.cpp
new file mode 100644
index 0000000..a97857d
--- /dev/null
+++ b/vm/compiler/codegen/mips/Assemble.cpp
@@ -0,0 +1,2324 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "libdex/DexOpcodes.h"
+
+#include "../../CompilerInternals.h"
+#include "MipsLIR.h"
+#include "Codegen.h"
+#include <unistd.h>             /* for cacheflush */
+#include <sys/mman.h>           /* for protection change */
+
+#define MAX_ASSEMBLER_RETRIES 10
+
+/*
+ * opcode: MipsOpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+                     k3, k3s, k3e, flags, name, fmt, size) \
+        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ *     0 -> operands[0] (dest)
+ *     1 -> operands[1] (src1)
+ *     2 -> operands[2] (src2)
+ *     3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ *     h -> 4-digit hex
+ *     d -> decimal
+ *     E -> decimal*4
+ *     F -> decimal*2
+ *     c -> branch condition (beq, bne, etc.)
+ *     t -> pc-relative target
+ *     T -> pc-region target
+ *     u -> 1st half of bl[x] target
+ *     v -> 2nd half ob bl[x] target
+ *     R -> register list
+ *     s -> single precision floating point register
+ *     S -> double precision floating point register
+ *     m -> Thumb2 modified immediate
+ *     n -> complimented Thumb2 modified immediate
+ *     M -> Thumb2 16-bit zero-extended immediate
+ *     b -> 4-digit binary
+ *
+ *  [!] escape.  To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum MipsOpcode from MipsLIR.h */
+MipsEncodingMap EncodingMap[kMipsLast] = {
+    ENCODING_MAP(kMips32BitData, 0x00000000,
+                 kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "data", "0x!0h(!0d)", 2),
+    ENCODING_MAP(kMipsAddiu, 0x24000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "addiu", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsAddu, 0x00000021,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "addu", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsAnd, 0x00000024,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "and", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsAndi, 0x30000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "andi", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsB, 0x10000000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
+                 "b", "!0t", 2),
+    ENCODING_MAP(kMipsBal, 0x04110000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+                 "bal", "!0t", 2),
+    ENCODING_MAP(kMipsBeq, 0x10000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01,
+                 "beq", "!0r,!1r,!2t", 2),
+    ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "beqz", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBgez, 0x04010000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "bgez", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBgtz, 0x1C000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "bgtz", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBlez, 0x18000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "blez", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBltz, 0x04000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "bltz", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "bnez", "!0r,!1t", 2),
+    ENCODING_MAP(kMipsBne, 0x14000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01,
+                 "bne", "!0r,!1r,!2t", 2),
+    ENCODING_MAP(kMipsDiv, 0x0000001a,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtBitBlt, 25, 21,
+                 kFmtBitBlt, 20, 16, IS_QUAD_OP | REG_DEF01 | REG_USE23,
+                 "div", "!2r,!3r", 2),
+#if __mips_isa_rev>=2
+    ENCODING_MAP(kMipsExt, 0x7c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+                 kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+                 "ext", "!0r,!1r,!2d,!3D", 2),
+#endif
+    ENCODING_MAP(kMipsJal, 0x0c000000,
+                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+                 "jal", "!0T(!0E)", 2),
+    ENCODING_MAP(kMipsJalr, 0x00000009,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+                 "jalr", "!0r,!1r", 2),
+    ENCODING_MAP(kMipsJr, 0x00000008,
+                 kFmtBitBlt, 25, 21, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0,
+                 "jr", "!0r", 2),
+    ENCODING_MAP(kMipsLahi, 0x3C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lahi/lui", "!0r,0x!1h(!1d)", 2),
+    ENCODING_MAP(kMipsLalo, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "lalo/ori", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsLui, 0x3C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lui", "!0r,0x!1h(!1d)", 2),
+    ENCODING_MAP(kMipsLb, 0x80000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lb", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsLbu, 0x90000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lbu", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsLh, 0x84000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lh", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsLhu, 0x94000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lhu", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsLw, 0x8C000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lw", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsMfhi, 0x00000010,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfhi", "!0r", 2),
+    ENCODING_MAP(kMipsMflo, 0x00000012,
+                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mflo", "!0r", 2),
+    ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "move", "!0r,!1r", 2),
+    ENCODING_MAP(kMipsMovz, 0x0000000a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "movz", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsMul, 0x70000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsNop, 0x00000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "nop", "", 2),
+    ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "nor", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsOr, 0x00000025,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "or", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsOri, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "ori", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsPref, 0xCC000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE2,
+                 "pref", "!0d,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsSb, 0xA0000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sb", "!0r,!1d(!2r)", 2),
+#if __mips_isa_rev>=2
+    ENCODING_MAP(kMipsSeb, 0x7c000420,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seb", "!0r,!1r", 2),
+    ENCODING_MAP(kMipsSeh, 0x7c000620,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seh", "!0r,!1r", 2),
+#endif
+    ENCODING_MAP(kMipsSh, 0xA4000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sh", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsSll, 0x00000000,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sll", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsSllv, 0x00000004,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sllv", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSlt, 0x0000002a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "slt", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSlti, 0x28000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "slti", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsSltu, 0x0000002b,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sltu", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSra, 0x00000003,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sra", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsSrav, 0x00000007,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srav", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSrl, 0x00000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "srl", "!0r,!1r,0x!2h(!2d)", 2),
+    ENCODING_MAP(kMipsSrlv, 0x00000006,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srlv", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "subu", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsSw, 0xAC000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sw", "!0r,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsXor, 0x00000026,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "xor", "!0r,!1r,!2r", 2),
+    ENCODING_MAP(kMipsXori, 0x38000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "xori", "!0r,!1r,0x!2h(!2d)", 2),
+#ifdef __mips_hard_float
+    ENCODING_MAP(kMipsFadds, 0x46000000,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.s", "!0s,!1s,!2s", 2),
+    ENCODING_MAP(kMipsFsubs, 0x46000001,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.s", "!0s,!1s,!2s", 2),
+    ENCODING_MAP(kMipsFmuls, 0x46000002,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.s", "!0s,!1s,!2s", 2),
+    ENCODING_MAP(kMipsFdivs, 0x46000003,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.s", "!0s,!1s,!2s", 2),
+    ENCODING_MAP(kMipsFaddd, 0x46200000,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.d", "!0S,!1S,!2S", 2),
+    ENCODING_MAP(kMipsFsubd, 0x46200001,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.d", "!0S,!1S,!2S", 2),
+    ENCODING_MAP(kMipsFmuld, 0x46200002,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.d", "!0S,!1S,!2S", 2),
+    ENCODING_MAP(kMipsFdivd, 0x46200003,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.d", "!0S,!1S,!2S", 2),
+    ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.d", "!0s,!1S", 2),
+    ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.w", "!0s,!1s", 2),
+    ENCODING_MAP(kMipsFcvtds, 0x46000021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.s", "!0S,!1s", 2),
+    ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.w", "!0S,!1s", 2),
+    ENCODING_MAP(kMipsFcvtws, 0x46000024,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.s", "!0s,!1s", 2),
+    ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.d", "!0s,!1S", 2),
+    ENCODING_MAP(kMipsFmovs, 0x46000006,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.s", "!0s,!1s", 2),
+    ENCODING_MAP(kMipsFmovd, 0x46200006,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.d", "!0S,!1S", 2),
+    ENCODING_MAP(kMipsFlwc1, 0xC4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwc1", "!0s,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsFldc1, 0xD4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ldc1", "!0S,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsFswc1, 0xE4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "swc1", "!0s,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsFsdc1, 0xF4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sdc1", "!0S,!1d(!2r)", 2),
+    ENCODING_MAP(kMipsMfc1, 0x44000000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfc1", "!0r,!1s", 2),
+    ENCODING_MAP(kMipsMtc1, 0x44800000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "mtc1", "!0r,!1s", 2),
+#endif
+    ENCODING_MAP(kMipsUndefined, 0x64000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "undefined", "", 2),
+};
+
+/* Track the number of times that the code cache is patched */
+#if defined(WITH_JIT_TUNING)
+#define UPDATE_CODE_CACHE_PATCHES()    (gDvmJit.codeCachePatches++)
+#else
+#define UPDATE_CODE_CACHE_PATCHES()
+#endif
+
+/* Write the numbers in the constant and class pool to the output stream */
+static void installLiteralPools(CompilationUnit *cUnit)
+{
+    int *dataPtr = (int *) ((char *) cUnit->baseAddr + cUnit->dataOffset);
+    /* Install number of class pointer literals */
+    *dataPtr++ = cUnit->numClassPointers;
+    MipsLIR *dataLIR = (MipsLIR *) cUnit->classPointerList;
+    while (dataLIR) {
+        /*
+         * Install the callsiteinfo pointers into the cells for now. They will
+         * be converted into real pointers in dvmJitInstallClassObjectPointers.
+         */
+        *dataPtr++ = dataLIR->operands[0];
+        dataLIR = NEXT_LIR(dataLIR);
+    }
+    dataLIR = (MipsLIR *) cUnit->literalList;
+    while (dataLIR) {
+        *dataPtr++ = dataLIR->operands[0];
+        dataLIR = NEXT_LIR(dataLIR);
+    }
+}
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.  In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+static AssemblerStatus assembleInstructions(CompilationUnit *cUnit,
+                                            intptr_t startAddr)
+{
+    int *bufferAddr = (int *) cUnit->codeBuffer;
+    MipsLIR *lir;
+
+    for (lir = (MipsLIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+        if (lir->opcode < 0) {
+            continue;
+        }
+
+
+        if (lir->flags.isNop) {
+            continue;
+        }
+
+        if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+            MipsLIR *targetLIR = (MipsLIR *) lir->generic.target;
+            intptr_t pc = lir->generic.offset + 4;
+            intptr_t target = targetLIR->generic.offset;
+            int delta = target - pc;
+            if (delta & 0x3) {
+                LOGE("PC-rel distance is not multiple of 4: %d", delta);
+                dvmAbort();
+            }
+            if (delta > 131068 || delta < -131069) {
+                LOGE("Unconditional branch distance out of range: %d", delta);
+                dvmAbort();
+            }
+            lir->operands[0] = delta >> 2;
+        } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+            MipsLIR *targetLIR = (MipsLIR *) lir->generic.target;
+            intptr_t pc = lir->generic.offset + 4;
+            intptr_t target = targetLIR->generic.offset;
+            int delta = target - pc;
+            if (delta & 0x3) {
+                LOGE("PC-rel distance is not multiple of 4: %d", delta);
+                dvmAbort();
+            }
+            if (delta > 131068 || delta < -131069) {
+                LOGE("Conditional branch distance out of range: %d", delta);
+                dvmAbort();
+            }
+            lir->operands[1] = delta >> 2;
+        } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+            MipsLIR *targetLIR = (MipsLIR *) lir->generic.target;
+            intptr_t pc = lir->generic.offset + 4;
+            intptr_t target = targetLIR->generic.offset;
+            int delta = target - pc;
+            if (delta & 0x3) {
+                LOGE("PC-rel distance is not multiple of 4: %d", delta);
+                dvmAbort();
+            }
+            if (delta > 131068 || delta < -131069) {
+                LOGE("Conditional branch distance out of range: %d", delta);
+                dvmAbort();
+            }
+            lir->operands[2] = delta >> 2;
+        } else if (lir->opcode == kMipsJal) {
+            intptr_t curPC = (startAddr + lir->generic.offset + 4) & ~3;
+            intptr_t target = lir->operands[0];
+            /* ensure PC-region branch can be used */
+            assert((curPC & 0xF0000000) == (target & 0xF0000000));
+            if (target & 0x3) {
+                LOGE("Jump target is not multiple of 4: %d", target);
+                dvmAbort();
+            }
+            lir->operands[0] =  target >> 2;
+        } else if (lir->opcode == kMipsLahi) { /* load address hi (via lui) */
+            MipsLIR *targetLIR = (MipsLIR *) lir->generic.target;
+            intptr_t target = startAddr + targetLIR->generic.offset;
+            lir->operands[1] = target >> 16;
+        } else if (lir->opcode == kMipsLalo) { /* load address lo (via ori) */
+            MipsLIR *targetLIR = (MipsLIR *) lir->generic.target;
+            intptr_t target = startAddr + targetLIR->generic.offset;
+            lir->operands[2] = lir->operands[2] + target;
+        }
+
+
+        MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+        u4 bits = encoder->skeleton;
+        int i;
+        for (i = 0; i < 4; i++) {
+            u4 operand;
+            u4 value;
+            operand = lir->operands[i];
+            switch(encoder->fieldLoc[i].kind) {
+                case kFmtUnused:
+                    break;
+                case kFmtBitBlt:
+                    if (encoder->fieldLoc[i].start == 0 && encoder->fieldLoc[i].end == 31) {
+                        value = operand;
+                    } else {
+                        value = (operand << encoder->fieldLoc[i].start) &
+                                ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+                    }
+                    bits |= value;
+                    break;
+                case kFmtDfp: {
+                    assert(DOUBLEREG(operand));
+                    assert((operand & 0x1) == 0);
+                    value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+                            ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+                    bits |= value;
+                    break;
+                }
+                case kFmtSfp:
+                    assert(SINGLEREG(operand));
+                    value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+                            ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+                    bits |= value;
+                    break;
+                default:
+                    assert(0);
+            }
+        }
+        assert(encoder->size == 2);
+        *bufferAddr++ = bits;
+    }
+    return kSuccess;
+}
+
+static int assignLiteralOffsetCommon(LIR *lir, int offset)
+{
+    for (;lir != NULL; lir = lir->next) {
+        lir->offset = offset;
+        offset += 4;
+    }
+    return offset;
+}
+
+/* Determine the offset of each literal field */
+static int assignLiteralOffset(CompilationUnit *cUnit, int offset)
+{
+    /* Reserved for the size field of class pointer pool */
+    offset += 4;
+    offset = assignLiteralOffsetCommon(cUnit->classPointerList, offset);
+    offset = assignLiteralOffsetCommon(cUnit->literalList, offset);
+    return offset;
+}
+
+/*
+ * Translation layout in the code cache.  Note that the codeAddress pointer
+ * in JitTable will point directly to the code body (field codeAddress).  The
+ * chain cell offset codeAddress - 4, and the address of the trace profile
+ * counter is at codeAddress - 8.
+ *
+ *      +----------------------------+
+ *      | Trace Profile Counter addr |  -> 4 bytes (PROF_COUNTER_ADDR_SIZE)
+ *      +----------------------------+
+ *   +--| Offset to chain cell counts|  -> 4 bytes (CHAIN_CELL_OFFSET_SIZE)
+ *   |  +----------------------------+
+ *   |  | Trace profile code         |  <- entry point when profiling
+ *   |  .  -   -   -   -   -   -   - .
+ *   |  | Code body                  |  <- entry point when not profiling
+ *   |  .                            .
+ *   |  |                            |
+ *   |  +----------------------------+
+ *   |  | Chaining Cells             |  -> 16/20 bytes, 4 byte aligned
+ *   |  .                            .
+ *   |  .                            .
+ *   |  |                            |
+ *   |  +----------------------------+
+ *   |  | Gap for large switch stmt  |  -> # cases >= MAX_CHAINED_SWITCH_CASES
+ *   |  +----------------------------+
+ *   +->| Chaining cell counts       |  -> 8 bytes, chain cell counts by type
+ *      +----------------------------+
+ *      | Trace description          |  -> variable sized
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *      | # Class pointer pool size  |  -> 4 bytes
+ *      +----------------------------+
+ *      | Class pointer pool         |  -> 4-byte aligned, variable size
+ *      .                            .
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *      | Literal pool               |  -> 4-byte aligned, variable size
+ *      .                            .
+ *      .                            .
+ *      |                            |
+ *      +----------------------------+
+ *
+ */
+
+#define PROF_COUNTER_ADDR_SIZE 4
+#define CHAIN_CELL_OFFSET_SIZE 4
+
+/*
+ * Utility functions to navigate various parts in a trace. If we change the
+ * layout/offset in the future, we just modify these functions and we don't need
+ * to propagate the changes to all the use cases.
+ */
+static inline char *getTraceBase(const JitEntry *p)
+{
+    return (char*)p->codeAddress -
+        (PROF_COUNTER_ADDR_SIZE + CHAIN_CELL_OFFSET_SIZE);
+}
+
+/* Handy function to retrieve the profile count */
+static inline JitTraceCounter_t getProfileCount(const JitEntry *entry)
+{
+    if (entry->dPC == 0 || entry->codeAddress == 0 ||
+        entry->codeAddress == dvmCompilerGetInterpretTemplate())
+        return 0;
+
+    JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+    return **p;
+}
+
+/* Handy function to reset the profile count */
+static inline void resetProfileCount(const JitEntry *entry)
+{
+    if (entry->dPC == 0 || entry->codeAddress == 0 ||
+        entry->codeAddress == dvmCompilerGetInterpretTemplate())
+        return;
+
+    JitTraceCounter_t **p = (JitTraceCounter_t **) getTraceBase(entry);
+
+    **p = 0;
+}
+
+/* Get the pointer of the chain cell count */
+static inline ChainCellCounts* getChainCellCountsPointer(const char *base)
+{
+    /* 4 is the size of the profile count */
+    u4 *chainCellOffsetP = (u4 *) (base + PROF_COUNTER_ADDR_SIZE);
+    u4 chainCellOffset = *chainCellOffsetP;
+    return (ChainCellCounts *) ((char *) chainCellOffsetP + chainCellOffset);
+}
+
+/* Get the size of all chaining cells */
+static inline u4 getChainCellSize(const ChainCellCounts* pChainCellCounts)
+{
+    int cellSize = 0;
+    int i;
+
+    /* Get total count of chain cells */
+    for (i = 0; i < kChainingCellGap; i++) {
+        if (i != kChainingCellInvokePredicted) {
+            cellSize += pChainCellCounts->u.count[i] *
+                        (CHAIN_CELL_NORMAL_SIZE >> 2);
+        } else {
+            cellSize += pChainCellCounts->u.count[i] *
+                (CHAIN_CELL_PREDICTED_SIZE >> 2);
+        }
+    }
+    return cellSize;
+}
+
+/* Get the starting pointer of the trace description section */
+static JitTraceDescription* getTraceDescriptionPointer(const char *base)
+{
+    ChainCellCounts* pCellCounts = getChainCellCountsPointer(base);
+    return (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
+}
+
+/* Get the size of a trace description */
+static int getTraceDescriptionSize(const JitTraceDescription *desc)
+{
+    int runCount;
+    /* Trace end is always of non-meta type (ie isCode == true) */
+    for (runCount = 0; ; runCount++) {
+        if (desc->trace[runCount].isCode &&
+            desc->trace[runCount].info.frag.runEnd)
+           break;
+    }
+    return sizeof(JitTraceDescription) + ((runCount+1) * sizeof(JitTraceRun));
+}
+
+#if defined(SIGNATURE_BREAKPOINT)
+/* Inspect the assembled instruction stream to find potential matches */
+static void matchSignatureBreakpoint(const CompilationUnit *cUnit,
+                                     unsigned int size)
+{
+    unsigned int i, j;
+    u4 *ptr = (u4 *) cUnit->codeBuffer;
+
+    for (i = 0; i < size - gDvmJit.signatureBreakpointSize + 1; i++) {
+        if (ptr[i] == gDvmJit.signatureBreakpoint[0]) {
+            for (j = 1; j < gDvmJit.signatureBreakpointSize; j++) {
+                if (ptr[i+j] != gDvmJit.signatureBreakpoint[j]) {
+                    break;
+                }
+            }
+            if (j == gDvmJit.signatureBreakpointSize) {
+                LOGD("Signature match starting from offset %#x (%d words)",
+                     i*4, gDvmJit.signatureBreakpointSize);
+                int descSize = getTraceDescriptionSize(cUnit->traceDesc);
+                JitTraceDescription *newCopy =
+                    (JitTraceDescription *) malloc(descSize);
+                memcpy(newCopy, cUnit->traceDesc, descSize);
+                dvmCompilerWorkEnqueue(NULL, kWorkOrderTraceDebug, newCopy);
+                break;
+            }
+        }
+    }
+}
+#endif
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ */
+void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info)
+{
+    MipsLIR *mipsLIR;
+    int offset = 0;
+    int i;
+    ChainCellCounts chainCellCounts;
+    int descSize = (cUnit->jitMode == kJitMethod) ?
+        0 : getTraceDescriptionSize(cUnit->traceDesc);
+    int chainingCellGap = 0;
+
+    info->instructionSet = cUnit->instructionSet;
+
+    /* Beginning offset needs to allow space for chain cell offset */
+    for (mipsLIR = (MipsLIR *) cUnit->firstLIRInsn;
+         mipsLIR;
+         mipsLIR = NEXT_LIR(mipsLIR)) {
+        mipsLIR->generic.offset = offset;
+        if (mipsLIR->opcode >= 0 && !mipsLIR->flags.isNop) {
+            mipsLIR->flags.size = EncodingMap[mipsLIR->opcode].size * 2;
+            offset += mipsLIR->flags.size;
+        }
+        /* Pseudo opcodes don't consume space */
+    }
+
+    /* Const values have to be word aligned */
+    offset = (offset + 3) & ~3;
+
+    u4 chainCellOffset = offset;
+    MipsLIR *chainCellOffsetLIR = NULL;
+
+    if (cUnit->jitMode != kJitMethod) {
+        /*
+         * Get the gap (# of u4) between the offset of chaining cell count and
+         * the bottom of real chaining cells. If the translation has chaining
+         * cells, the gap is guaranteed to be multiples of 4.
+         */
+        chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2;
+
+        /* Add space for chain cell counts & trace description */
+        chainCellOffsetLIR = (MipsLIR *) cUnit->chainCellOffsetLIR;
+        assert(chainCellOffsetLIR);
+        assert(chainCellOffset < 0x10000);
+        assert(chainCellOffsetLIR->opcode == kMips32BitData &&
+               chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG);
+
+        /*
+         * Adjust the CHAIN_CELL_OFFSET_TAG LIR's offset to remove the
+         * space occupied by the pointer to the trace profiling counter.
+         */
+        chainCellOffsetLIR->operands[0] = chainCellOffset - 4;
+
+        offset += sizeof(chainCellCounts) + descSize;
+
+        assert((offset & 0x3) == 0);  /* Should still be word aligned */
+    }
+
+    /* Set up offsets for literals */
+    cUnit->dataOffset = offset;
+
+    /*
+     * Assign each class pointer/constant an offset from the beginning of the
+     * compilation unit.
+     */
+    offset = assignLiteralOffset(cUnit, offset);
+
+    cUnit->totalSize = offset;
+
+    if (gDvmJit.codeCacheByteUsed + cUnit->totalSize > gDvmJit.codeCacheSize) {
+        gDvmJit.codeCacheFull = true;
+        info->discardResult = true;
+        return;
+    }
+
+    /* Allocate enough space for the code block */
+    cUnit->codeBuffer = (unsigned char *)dvmCompilerNew(chainCellOffset, true);
+    if (cUnit->codeBuffer == NULL) {
+        LOGE("Code buffer allocation failure");
+        info->discardResult = true;
+        return;
+    }
+
+    /*
+     * Attempt to assemble the trace.  Note that assembleInstructions
+     * may rewrite the code sequence and request a retry.
+     */
+    cUnit->assemblerStatus = assembleInstructions(cUnit,
+          (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed);
+
+    switch(cUnit->assemblerStatus) {
+        case kSuccess:
+            break;
+        case kRetryAll:
+            if (cUnit->assemblerRetries < MAX_ASSEMBLER_RETRIES) {
+                if (cUnit->jitMode != kJitMethod) {
+                    /* Restore pristine chain cell marker on retry */
+                    chainCellOffsetLIR->operands[0] = CHAIN_CELL_OFFSET_TAG;
+                }
+                return;
+            }
+            /* Too many retries - reset and try cutting the trace in half */
+            cUnit->assemblerRetries = 0;
+            cUnit->assemblerStatus = kRetryHalve;
+            return;
+        case kRetryHalve:
+            return;
+        default:
+             LOGE("Unexpected assembler status: %d", cUnit->assemblerStatus);
+             dvmAbort();
+    }
+
+#if defined(SIGNATURE_BREAKPOINT)
+    if (info->discardResult == false && gDvmJit.signatureBreakpoint != NULL &&
+        chainCellOffset/4 >= gDvmJit.signatureBreakpointSize) {
+        matchSignatureBreakpoint(cUnit, chainCellOffset/4);
+    }
+#endif
+
+    /* Don't go all the way if the goal is just to get the verbose output */
+    if (info->discardResult) return;
+
+    /*
+     * The cache might disappear - acquire lock and check version
+     * Continue holding lock until translation cache update is complete.
+     * These actions are required here in the compiler thread because
+     * it is unaffected by suspend requests and doesn't know if a
+     * translation cache flush is in progress.
+     */
+    dvmLockMutex(&gDvmJit.compilerLock);
+    if (info->cacheVersion != gDvmJit.cacheVersion) {
+        /* Cache changed - discard current translation */
+        info->discardResult = true;
+        info->codeAddress = NULL;
+        dvmUnlockMutex(&gDvmJit.compilerLock);
+        return;
+    }
+
+    cUnit->baseAddr = (char *) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
+    gDvmJit.codeCacheByteUsed += offset;
+
+    UNPROTECT_CODE_CACHE(cUnit->baseAddr, offset);
+
+    /* Install the code block */
+    memcpy((char*)cUnit->baseAddr, cUnit->codeBuffer, chainCellOffset);
+    gDvmJit.numCompilations++;
+
+    if (cUnit->jitMode != kJitMethod) {
+        /* Install the chaining cell counts */
+        for (i=0; i< kChainingCellGap; i++) {
+            chainCellCounts.u.count[i] = cUnit->numChainingCells[i];
+        }
+
+        /* Set the gap number in the chaining cell count structure */
+        chainCellCounts.u.count[kChainingCellGap] = chainingCellGap;
+
+        memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts,
+               sizeof(chainCellCounts));
+
+        /* Install the trace description */
+        memcpy((char*) cUnit->baseAddr + chainCellOffset +
+                       sizeof(chainCellCounts),
+               cUnit->traceDesc, descSize);
+    }
+
+    /* Write the literals directly into the code cache */
+    installLiteralPools(cUnit);
+
+    /* Flush dcache and invalidate the icache to maintain coherence */
+    dvmCompilerCacheFlush((long)cUnit->baseAddr,
+                          (long)((char *) cUnit->baseAddr + offset), 0);
+
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(cUnit->baseAddr, offset);
+
+    /* Translation cache update complete - release lock */
+    dvmUnlockMutex(&gDvmJit.compilerLock);
+
+    /* Record code entry point and instruction set */
+    info->codeAddress = (char*)cUnit->baseAddr + cUnit->headerSize;
+    /* transfer the size of the profiling code */
+    info->profileCodeSize = cUnit->profileCodeSize;
+}
+
+/*
+ * Returns the skeleton bit pattern associated with an opcode.  All
+ * variable fields are zeroed.
+ */
+static u4 getSkeleton(MipsOpCode op)
+{
+    return EncodingMap[op].skeleton;
+}
+
+static u4 assembleChainingBranch(int branchOffset, bool thumbTarget)
+{
+    return getSkeleton(kMipsJal) | ((branchOffset & 0x0FFFFFFF) >> 2);
+}
+
+/*
+ * Perform translation chain operation.
+ * For MIPS, we'll use a JAL instruction to generate an
+ * unconditional chaining branch of up to 256M. The JAL
+ * instruction also has a restriction that the jump target
+ * must be in the same 256M page as the JAL instruction's
+ * delay slot address.
+ * If the target is out of JAL's range, don't chain.
+ * If one or more threads is suspended, don't chain.
+ */
+void* dvmJitChain(void* tgtAddr, u4* branchAddr)
+{
+    u4 newInst;
+
+    /*
+     * Only chain translations when there is no urge to ask all threads to
+     * suspend themselves via the interpreter.
+     */
+    if ((gDvmJit.pProfTable != NULL) && (gDvm.sumThreadSuspendCount == 0) &&
+        (gDvmJit.codeCacheFull == false) &&
+        ((((int) tgtAddr) & 0xF0000000) == (((int) branchAddr+4) & 0xF0000000))) {
+        gDvmJit.translationChains++;
+
+        COMPILER_TRACE_CHAINING(
+            LOGD("Jit Runtime: chaining 0x%x to 0x%x",
+                 (int) branchAddr, (int) tgtAddr & -2));
+
+        newInst = assembleChainingBranch((int) tgtAddr & -2, 0);
+
+        UNPROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
+
+        *branchAddr = newInst;
+        dvmCompilerCacheFlush((long)branchAddr, (long)branchAddr + 4, 0);
+        UPDATE_CODE_CACHE_PATCHES();
+
+        PROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
+
+        gDvmJit.hasNewChain = true;
+    }
+
+    return tgtAddr;
+}
+
+#if !defined(WITH_SELF_VERIFICATION)
+/*
+ * Attempt to enqueue a work order to patch an inline cache for a predicted
+ * chaining cell for virtual/interface calls.
+ */
+static void inlineCachePatchEnqueue(PredictedChainingCell *cellAddr,
+                                    PredictedChainingCell *newContent)
+{
+    /*
+     * Make sure only one thread gets here since updating the cell (ie fast
+     * path and queueing the request (ie the queued path) have to be done
+     * in an atomic fashion.
+     */
+    dvmLockMutex(&gDvmJit.compilerICPatchLock);
+
+    /* Fast path for uninitialized chaining cell */
+    if (cellAddr->clazz == NULL &&
+        cellAddr->branch == PREDICTED_CHAIN_BX_PAIR_INIT) {
+
+        UNPROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+        cellAddr->method = newContent->method;
+        cellAddr->branch = newContent->branch;
+
+        /*
+         * The update order matters - make sure clazz is updated last since it
+         * will bring the uninitialized chaining cell to life.
+         */
+        android_atomic_release_store((int32_t)newContent->clazz,
+            (volatile int32_t *)(void*) &cellAddr->clazz);
+        dvmCompilerCacheFlush((long) cellAddr, (long) (cellAddr+1), 0);
+        UPDATE_CODE_CACHE_PATCHES();
+
+        PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.icPatchInit++;
+#endif
+    /* Check if this is a frequently missed clazz */
+    } else if (cellAddr->stagedClazz != newContent->clazz) {
+        /* Not proven to be frequent yet - build up the filter cache */
+        UNPROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+        cellAddr->stagedClazz = newContent->clazz;
+
+        UPDATE_CODE_CACHE_PATCHES();
+        PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.icPatchRejected++;
+#endif
+    /*
+     * Different classes but same method implementation - it is safe to just
+     * patch the class value without the need to stop the world.
+     */
+    } else if (cellAddr->method == newContent->method) {
+        UNPROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+        cellAddr->clazz = newContent->clazz;
+        /* No need to flush the cache here since the branch is not patched */
+        UPDATE_CODE_CACHE_PATCHES();
+
+        PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
+
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.icPatchLockFree++;
+#endif
+    /*
+     * Cannot patch the chaining cell inline - queue it until the next safe
+     * point.
+     */
+    } else if (gDvmJit.compilerICPatchIndex < COMPILER_IC_PATCH_QUEUE_SIZE) {
+        int index = gDvmJit.compilerICPatchIndex++;
+        const ClassObject *clazz = newContent->clazz;
+
+        gDvmJit.compilerICPatchQueue[index].cellAddr = cellAddr;
+        gDvmJit.compilerICPatchQueue[index].cellContent = *newContent;
+        gDvmJit.compilerICPatchQueue[index].classDescriptor = clazz->descriptor;
+        gDvmJit.compilerICPatchQueue[index].classLoader = clazz->classLoader;
+        /* For verification purpose only */
+        gDvmJit.compilerICPatchQueue[index].serialNumber = clazz->serialNumber;
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.icPatchQueued++;
+#endif
+    } else {
+    /* Queue is full - just drop this patch request */
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.icPatchDropped++;
+#endif
+    }
+
+    dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
+}
+#endif
+
+/*
+ * This method is called from the invoke templates for virtual and interface
+ * methods to speculatively setup a chain to the callee. The templates are
+ * written in assembly and have setup method, cell, and clazz at r0, r2, and
+ * r3 respectively, so there is a unused argument in the list. Upon return one
+ * of the following three results may happen:
+ *   1) Chain is not setup because the callee is native. Reset the rechain
+ *      count to a big number so that it will take a long time before the next
+ *      rechain attempt to happen.
+ *   2) Chain is not setup because the callee has not been created yet. Reset
+ *      the rechain count to a small number and retry in the near future.
+ *   3) Ask all other threads to stop before patching this chaining cell.
+ *      This is required because another thread may have passed the class check
+ *      but hasn't reached the chaining cell yet to follow the chain. If we
+ *      patch the content before halting the other thread, there could be a
+ *      small window for race conditions to happen that it may follow the new
+ *      but wrong chain to invoke a different method.
+ */
+const Method *dvmJitToPatchPredictedChain(const Method *method,
+                                          Thread *self,
+                                          PredictedChainingCell *cell,
+                                          const ClassObject *clazz)
+{
+    int newRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN;
+#if defined(WITH_SELF_VERIFICATION)
+    newRechainCount = PREDICTED_CHAIN_COUNTER_AVOID;
+    goto done;
+#else
+    PredictedChainingCell newCell;
+    int baseAddr, tgtAddr;
+    if (dvmIsNativeMethod(method)) {
+        UNPROTECT_CODE_CACHE(cell, sizeof(*cell));
+
+        /*
+         * Put a non-zero/bogus value in the clazz field so that it won't
+         * trigger immediate patching and will continue to fail to match with
+         * a real clazz pointer.
+         */
+        cell->clazz = (ClassObject *) PREDICTED_CHAIN_FAKE_CLAZZ;
+
+        UPDATE_CODE_CACHE_PATCHES();
+        PROTECT_CODE_CACHE(cell, sizeof(*cell));
+        goto done;
+    }
+
+    tgtAddr = (int) dvmJitGetTraceAddr(method->insns);
+    baseAddr = (int) cell + 4;   // PC is cur_addr + 4
+
+    if ((baseAddr & 0xF0000000) != (tgtAddr & 0xF0000000)) {
+        COMPILER_TRACE_CHAINING(
+            LOGD("Jit Runtime: predicted chain %p to distant target %s ignored",
+                 cell, method->name));
+        goto done;
+    }
+
+    /*
+     * Compilation not made yet for the callee. Reset the counter to a small
+     * value and come back to check soon.
+     */
+    if ((tgtAddr == 0) ||
+        ((void*)tgtAddr == dvmCompilerGetInterpretTemplate())) {
+        COMPILER_TRACE_CHAINING(
+            LOGD("Jit Runtime: predicted chain %p to method %s%s delayed",
+                 cell, method->clazz->descriptor, method->name));
+        goto done;
+    }
+
+    if (cell->clazz == NULL) {
+        newRechainCount = self->icRechainCount;
+    }
+
+    newCell.branch = assembleChainingBranch(tgtAddr, true);
+    newCell.delay_slot = getSkeleton(kMipsNop);
+    newCell.clazz = clazz;
+    newCell.method = method;
+    newCell.stagedClazz = NULL;
+
+    /*
+     * Enter the work order to the queue and the chaining cell will be patched
+     * the next time a safe point is entered.
+     *
+     * If the enqueuing fails reset the rechain count to a normal value so that
+     * it won't get indefinitely delayed.
+     */
+    inlineCachePatchEnqueue(cell, &newCell);
+#endif
+done:
+    self->icRechainCount = newRechainCount;
+    return method;
+}
+
+/*
+ * Patch the inline cache content based on the content passed from the work
+ * order.
+ */
+void dvmCompilerPatchInlineCache(void)
+{
+    int i;
+    PredictedChainingCell *minAddr, *maxAddr;
+
+    /* Nothing to be done */
+    if (gDvmJit.compilerICPatchIndex == 0) return;
+
+    /*
+     * Since all threads are already stopped we don't really need to acquire
+     * the lock. But race condition can be easily introduced in the future w/o
+     * paying attention so we still acquire the lock here.
+     */
+    dvmLockMutex(&gDvmJit.compilerICPatchLock);
+
+    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+    //LOGD("Number of IC patch work orders: %d", gDvmJit.compilerICPatchIndex);
+
+    /* Initialize the min/max address range */
+    minAddr = (PredictedChainingCell *)
+        ((char *) gDvmJit.codeCache + gDvmJit.codeCacheSize);
+    maxAddr = (PredictedChainingCell *) gDvmJit.codeCache;
+
+    for (i = 0; i < gDvmJit.compilerICPatchIndex; i++) {
+        ICPatchWorkOrder *workOrder = &gDvmJit.compilerICPatchQueue[i];
+        PredictedChainingCell *cellAddr = workOrder->cellAddr;
+        PredictedChainingCell *cellContent = &workOrder->cellContent;
+        ClassObject *clazz = dvmFindClassNoInit(workOrder->classDescriptor,
+                                                workOrder->classLoader);
+
+        assert(clazz->serialNumber == workOrder->serialNumber);
+
+        /* Use the newly resolved clazz pointer */
+        cellContent->clazz = clazz;
+
+        COMPILER_TRACE_CHAINING(
+            LOGD("Jit Runtime: predicted chain %p from %s to %s (%s) "
+                 "patched",
+                 cellAddr,
+                 cellAddr->clazz->descriptor,
+                 cellContent->clazz->descriptor,
+                 cellContent->method->name));
+
+        /* Patch the chaining cell */
+        *cellAddr = *cellContent;
+        minAddr = (cellAddr < minAddr) ? cellAddr : minAddr;
+        maxAddr = (cellAddr > maxAddr) ? cellAddr : maxAddr;
+    }
+
+    /* Then synchronize the I/D cache */
+    dvmCompilerCacheFlush((long) minAddr, (long) (maxAddr+1), 0);
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+    gDvmJit.compilerICPatchIndex = 0;
+    dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
+}
+
+/*
+ * Unchain a trace given the starting address of the translation
+ * in the code cache.  Refer to the diagram in dvmCompilerAssembleLIR.
+ * Returns the address following the last cell unchained.  Note that
+ * the incoming codeAddr is a thumb code address, and therefore has
+ * the low bit set.
+ */
+static u4* unchainSingle(JitEntry *trace)
+{
+    const char *base = getTraceBase(trace);
+    ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+    int cellSize = getChainCellSize(pChainCellCounts);
+    u4* pChainCells;
+    int i,j;
+    PredictedChainingCell *predChainCell;
+
+    if (cellSize == 0)
+        return (u4 *) pChainCellCounts;
+
+    /* Locate the beginning of the chain cell region */
+    pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+                  pChainCellCounts->u.count[kChainingCellGap];
+
+    /* The cells are sorted in order - walk through them and reset */
+    for (i = 0; i < kChainingCellGap; i++) {
+        int elemSize = CHAIN_CELL_NORMAL_SIZE >> 2;  /* In 32-bit words */
+        if (i == kChainingCellInvokePredicted) {
+            elemSize = CHAIN_CELL_PREDICTED_SIZE >> 2;
+        }
+
+        for (j = 0; j < pChainCellCounts->u.count[i]; j++) {
+            int targetOffset;
+            switch(i) {
+                case kChainingCellNormal:
+                    targetOffset = offsetof(Thread,
+                          jitToInterpEntries.dvmJitToInterpNormal);
+                    break;
+                case kChainingCellHot:
+                case kChainingCellInvokeSingleton:
+                    targetOffset = offsetof(Thread,
+                          jitToInterpEntries.dvmJitToInterpTraceSelect);
+                    break;
+                case kChainingCellInvokePredicted:
+                    targetOffset = 0;
+                    predChainCell = (PredictedChainingCell *) pChainCells;
+                    /*
+                     * There could be a race on another mutator thread to use
+                     * this particular predicted cell and the check has passed
+                     * the clazz comparison. So we cannot safely wipe the
+                     * method and branch but it is safe to clear the clazz,
+                     * which serves as the key.
+                     */
+                    predChainCell->clazz = PREDICTED_CHAIN_CLAZZ_INIT;
+                    break;
+#if defined(WITH_SELF_VERIFICATION)
+                case kChainingCellBackwardBranch:
+                    targetOffset = offsetof(Thread,
+                          jitToInterpEntries.dvmJitToInterpBackwardBranch);
+                    break;
+#else
+                case kChainingCellBackwardBranch:
+                    targetOffset = offsetof(Thread,
+                          jitToInterpEntries.dvmJitToInterpNormal);
+                    break;
+#endif
+                default:
+                    targetOffset = 0; // make gcc happy
+                    LOGE("Unexpected chaining type: %d", i);
+                    dvmAbort();  // dvmAbort OK here - can't safely recover
+            }
+            COMPILER_TRACE_CHAINING(
+                LOGD("Jit Runtime: unchaining %#x", (int)pChainCells));
+            /*
+             * Code sequence for a chaining cell is:
+             *     lw   a0, offset(rSELF)
+             *     jalr ra, a0
+             */
+            if (i != kChainingCellInvokePredicted) {
+                *pChainCells = getSkeleton(kMipsLw) | (r_A0 << 16) |
+                               targetOffset | (rSELF << 21);
+                *(pChainCells+1) = getSkeleton(kMipsJalr) | (r_RA << 11) |
+                                   (r_A0 << 21);
+            }
+            pChainCells += elemSize;  /* Advance by a fixed number of words */
+        }
+    }
+    return pChainCells;
+}
+
+/* Unchain all translation in the cache. */
+void dvmJitUnchainAll()
+{
+    u4* lowAddress = NULL;
+    u4* highAddress = NULL;
+    unsigned int i;
+    if (gDvmJit.pJitEntryTable != NULL) {
+        COMPILER_TRACE_CHAINING(LOGD("Jit Runtime: unchaining all"));
+        dvmLockMutex(&gDvmJit.tableLock);
+
+        UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+        for (i = 0; i < gDvmJit.jitTableSize; i++) {
+            if (gDvmJit.pJitEntryTable[i].dPC &&
+                !gDvmJit.pJitEntryTable[i].u.info.isMethodEntry &&
+                gDvmJit.pJitEntryTable[i].codeAddress &&
+                (gDvmJit.pJitEntryTable[i].codeAddress !=
+                 dvmCompilerGetInterpretTemplate())) {
+                u4* lastAddress;
+                lastAddress = unchainSingle(&gDvmJit.pJitEntryTable[i]);
+                if (lowAddress == NULL ||
+                      (u4*)gDvmJit.pJitEntryTable[i].codeAddress < lowAddress)
+                    lowAddress = (u4*)gDvmJit.pJitEntryTable[i].codeAddress;
+                if (lastAddress > highAddress)
+                    highAddress = lastAddress;
+            }
+        }
+
+        if (lowAddress && highAddress)
+                dvmCompilerCacheFlush((long)lowAddress, (long)highAddress, 0);
+
+        UPDATE_CODE_CACHE_PATCHES();
+
+        PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+        dvmUnlockMutex(&gDvmJit.tableLock);
+        gDvmJit.translationChains = 0;
+    }
+    gDvmJit.hasNewChain = false;
+}
+
+typedef struct jitProfileAddrToLine {
+    u4 lineNum;
+    u4 bytecodeOffset;
+} jitProfileAddrToLine;
+
+
+/* Callback function to track the bytecode offset/line number relationiship */
+static int addrToLineCb (void *cnxt, u4 bytecodeOffset, u4 lineNum)
+{
+    jitProfileAddrToLine *addrToLine = (jitProfileAddrToLine *) cnxt;
+
+    /* Best match so far for this offset */
+    if (addrToLine->bytecodeOffset >= bytecodeOffset) {
+        addrToLine->lineNum = lineNum;
+    }
+    return 0;
+}
+
+/* Dumps profile info for a single trace */
+static int dumpTraceProfile(JitEntry *p, bool silent, bool reset,
+                            unsigned long sum)
+{
+    int idx;
+
+    if (p->codeAddress == NULL) {
+        if (!silent)
+            LOGD("TRACEPROFILE NULL");
+        return 0;
+    }
+    if (p->codeAddress == dvmCompilerGetInterpretTemplate()) {
+        if (!silent)
+            LOGD("TRACEPROFILE INTERPRET_ONLY");
+        return 0;
+    }
+
+    JitTraceCounter_t count = getProfileCount(p);
+    if (reset) {
+        resetProfileCount(p);
+    }
+    if (silent) {
+        return count;
+    }
+    JitTraceDescription *desc = getTraceDescriptionPointer(getTraceBase(p));
+    const Method *method = desc->method;
+    char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
+    jitProfileAddrToLine addrToLine = {0, desc->trace[0].info.frag.startOffset};
+
+    /*
+     * We may end up decoding the debug information for the same method
+     * multiple times, but the tradeoff is we don't need to allocate extra
+     * space to store the addr/line mapping. Since this is a debugging feature
+     * and done infrequently so the slower but simpler mechanism should work
+     * just fine.
+     */
+    dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile,
+                       dvmGetMethodCode(method),
+                       method->clazz->descriptor,
+                       method->prototype.protoIdx,
+                       method->accessFlags,
+                       addrToLineCb, NULL, &addrToLine);
+
+    LOGD("TRACEPROFILE 0x%08x % 10d %5.2f%% [%#x(+%d), %d] %s%s;%s",
+         (int) getTraceBase(p),
+         count,
+         ((float ) count) / sum * 100.0,
+         desc->trace[0].info.frag.startOffset,
+         desc->trace[0].info.frag.numInsts,
+         addrToLine.lineNum,
+         method->clazz->descriptor, method->name, methodDesc);
+    free(methodDesc);
+
+    /* Find the last fragment (ie runEnd is set) */
+    for (idx = 0;
+         desc->trace[idx].isCode && !desc->trace[idx].info.frag.runEnd;
+         idx++) {
+    }
+
+    /*
+     * runEnd must comes with a JitCodeDesc frag. If isCode is false it must
+     * be a meta info field (only used by callsite info for now).
+     */
+    if (!desc->trace[idx].isCode) {
+        const Method *method = (const Method *)
+            desc->trace[idx+JIT_TRACE_CUR_METHOD-1].info.meta;
+        char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
+        /* Print the callee info in the trace */
+        LOGD("    -> %s%s;%s", method->clazz->descriptor, method->name,
+             methodDesc);
+    }
+
+    return count;
+}
+
+/* Create a copy of the trace descriptor of an existing compilation */
+JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
+                                            const JitEntry *knownEntry)
+{
+    const JitEntry *jitEntry = knownEntry ? knownEntry
+                                          : dvmJitFindEntry(pc, false);
+    if ((jitEntry == NULL) || (jitEntry->codeAddress == 0))
+        return NULL;
+
+    JitTraceDescription *desc =
+        getTraceDescriptionPointer(getTraceBase(jitEntry));
+
+    /* Now make a copy and return */
+    int descSize = getTraceDescriptionSize(desc);
+    JitTraceDescription *newCopy = (JitTraceDescription *) malloc(descSize);
+    memcpy(newCopy, desc, descSize);
+    return newCopy;
+}
+
+/* qsort callback function */
+static int sortTraceProfileCount(const void *entry1, const void *entry2)
+{
+    const JitEntry *jitEntry1 = (const JitEntry *)entry1;
+    const JitEntry *jitEntry2 = (const JitEntry *)entry2;
+
+    JitTraceCounter_t count1 = getProfileCount(jitEntry1);
+    JitTraceCounter_t count2 = getProfileCount(jitEntry2);
+    return (count1 == count2) ? 0 : ((count1 > count2) ? -1 : 1);
+}
+
+/* Sort the trace profile counts and dump them */
+void dvmCompilerSortAndPrintTraceProfiles()
+{
+    JitEntry *sortedEntries;
+    int numTraces = 0;
+    unsigned long sum = 0;
+    unsigned int i;
+
+    /* Make sure that the table is not changing */
+    dvmLockMutex(&gDvmJit.tableLock);
+
+    /* Sort the entries by descending order */
+    sortedEntries = (JitEntry *)malloc(sizeof(JitEntry) * gDvmJit.jitTableSize);
+    if (sortedEntries == NULL)
+        goto done;
+    memcpy(sortedEntries, gDvmJit.pJitEntryTable,
+           sizeof(JitEntry) * gDvmJit.jitTableSize);
+    qsort(sortedEntries, gDvmJit.jitTableSize, sizeof(JitEntry),
+          sortTraceProfileCount);
+
+    /* Analyze the sorted entries */
+    for (i=0; i < gDvmJit.jitTableSize; i++) {
+        if (sortedEntries[i].dPC != 0) {
+            sum += dumpTraceProfile(&sortedEntries[i],
+                                       true /* silent */,
+                                       false /* reset */,
+                                       0);
+            numTraces++;
+        }
+    }
+    if (numTraces == 0)
+        numTraces = 1;
+    if (sum == 0) {
+        sum = 1;
+    }
+
+    LOGD("JIT: Average execution count -> %d",(int)(sum / numTraces));
+
+    /* Dump the sorted entries. The count of each trace will be reset to 0. */
+    for (i=0; i < gDvmJit.jitTableSize; i++) {
+        if (sortedEntries[i].dPC != 0) {
+            dumpTraceProfile(&sortedEntries[i],
+                             false /* silent */,
+                             true /* reset */,
+                             sum);
+        }
+    }
+
+    for (i=0; i < gDvmJit.jitTableSize && i < 10; i++) {
+        /* Stip interpreter stubs */
+        if (sortedEntries[i].codeAddress == dvmCompilerGetInterpretTemplate()) {
+            continue;
+        }
+        JitTraceDescription* desc =
+            dvmCopyTraceDescriptor(NULL, &sortedEntries[i]);
+        if (desc) {
+            dvmCompilerWorkEnqueue(sortedEntries[i].dPC,
+                                   kWorkOrderTraceDebug, desc);
+        }
+    }
+
+    free(sortedEntries);
+done:
+    dvmUnlockMutex(&gDvmJit.tableLock);
+    return;
+}
+
+static void findClassPointersSingleTrace(char *base, void (*callback)(void *))
+{
+    unsigned int chainTypeIdx, chainIdx;
+    ChainCellCounts *pChainCellCounts = getChainCellCountsPointer(base);
+    int cellSize = getChainCellSize(pChainCellCounts);
+    /* Scan the chaining cells */
+    if (cellSize) {
+        /* Locate the beginning of the chain cell region */
+        u4 *pChainCells = ((u4 *) pChainCellCounts) - cellSize -
+            pChainCellCounts->u.count[kChainingCellGap];
+        /* The cells are sorted in order - walk through them */
+        for (chainTypeIdx = 0; chainTypeIdx < kChainingCellGap;
+             chainTypeIdx++) {
+            if (chainTypeIdx != kChainingCellInvokePredicted) {
+                /* In 32-bit words */
+                pChainCells += (CHAIN_CELL_NORMAL_SIZE >> 2) *
+                    pChainCellCounts->u.count[chainTypeIdx];
+                continue;
+            }
+            for (chainIdx = 0;
+                 chainIdx < pChainCellCounts->u.count[chainTypeIdx];
+                 chainIdx++) {
+                PredictedChainingCell *cell =
+                    (PredictedChainingCell *) pChainCells;
+                /*
+                 * Report the cell if it contains a sane class
+                 * pointer.
+                 */
+                if (cell->clazz != NULL &&
+                    cell->clazz !=
+                      (ClassObject *) PREDICTED_CHAIN_FAKE_CLAZZ) {
+                    callback(&cell->clazz);
+                }
+                pChainCells += CHAIN_CELL_PREDICTED_SIZE >> 2;
+            }
+        }
+    }
+
+    /* Scan the class pointer pool */
+    JitTraceDescription *desc = getTraceDescriptionPointer(base);
+    int descSize = getTraceDescriptionSize(desc);
+    int *classPointerP = (int *) ((char *) desc + descSize);
+    int numClassPointers = *classPointerP++;
+    for (; numClassPointers; numClassPointers--, classPointerP++) {
+        callback(classPointerP);
+    }
+}
+
+/*
+ * Scan class pointers in each translation and pass its address to the callback
+ * function. Currently such a pointers can be found in the pointer pool and the
+ * clazz field in the predicted chaining cells.
+ */
+void dvmJitScanAllClassPointers(void (*callback)(void *))
+{
+    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+
+    /* Handle the inflight compilation first */
+    if (gDvmJit.inflightBaseAddr)
+        findClassPointersSingleTrace((char *) gDvmJit.inflightBaseAddr,
+                                     callback);
+
+    if (gDvmJit.pJitEntryTable != NULL) {
+        unsigned int traceIdx;
+        dvmLockMutex(&gDvmJit.tableLock);
+        for (traceIdx = 0; traceIdx < gDvmJit.jitTableSize; traceIdx++) {
+            const JitEntry *entry = &gDvmJit.pJitEntryTable[traceIdx];
+            if (entry->dPC &&
+                !entry->u.info.isMethodEntry &&
+                entry->codeAddress &&
+                (entry->codeAddress != dvmCompilerGetInterpretTemplate())) {
+                char *base = getTraceBase(entry);
+                findClassPointersSingleTrace(base, callback);
+            }
+        }
+        dvmUnlockMutex(&gDvmJit.tableLock);
+    }
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
+}
+
+/*
+ * Provide the final touch on the class object pointer pool to install the
+ * actual pointers. The thread has to be in the running state.
+ */
+void dvmJitInstallClassObjectPointers(CompilationUnit *cUnit, char *codeAddress)
+{
+    char *base = codeAddress - cUnit->headerSize;
+
+    /* Scan the class pointer pool */
+    JitTraceDescription *desc = getTraceDescriptionPointer(base);
+    int descSize = getTraceDescriptionSize(desc);
+    intptr_t *classPointerP = (int *) ((char *) desc + descSize);
+    int numClassPointers = *(int *)classPointerP++;
+    intptr_t *startClassPointerP = classPointerP;
+
+    /*
+     * Change the thread state to VM_RUNNING so that GC won't be happening
+     * when the assembler looks up the class pointers. May suspend the current
+     * thread if there is a pending request before the state is actually
+     * changed to RUNNING.
+     */
+    dvmChangeStatus(gDvmJit.compilerThread, THREAD_RUNNING);
+
+    /*
+     * Unprotecting the code cache will need to acquire the code cache
+     * protection lock first. Doing so after the state change may increase the
+     * time spent in the RUNNING state (which may delay the next GC request
+     * should there be contention on codeCacheProtectionLock). In practice
+     * this is probably not going to happen often since a GC is just served.
+     * More importantly, acquiring the lock before the state change will
+     * cause deadlock (b/4192964).
+     */
+    UNPROTECT_CODE_CACHE(startClassPointerP,
+                         numClassPointers * sizeof(intptr_t));
+#if defined(WITH_JIT_TUNING)
+    u8 startTime = dvmGetRelativeTimeUsec();
+#endif
+    for (;numClassPointers; numClassPointers--) {
+        CallsiteInfo *callsiteInfo = (CallsiteInfo *) *classPointerP;
+        ClassObject *clazz = dvmFindClassNoInit(
+            callsiteInfo->classDescriptor, callsiteInfo->classLoader);
+        assert(!strcmp(clazz->descriptor, callsiteInfo->classDescriptor));
+        *classPointerP++ = (intptr_t) clazz;
+    }
+
+    /*
+     * Register the base address so that if GC kicks in after the thread state
+     * has been changed to VMWAIT and before the compiled code is registered
+     * in the JIT table, its content can be patched if class objects are
+     * moved.
+     */
+    gDvmJit.inflightBaseAddr = base;
+
+#if defined(WITH_JIT_TUNING)
+    u8 blockTime = dvmGetRelativeTimeUsec() - startTime;
+    gDvmJit.compilerThreadBlockGCTime += blockTime;
+    if (blockTime > gDvmJit.maxCompilerThreadBlockGCTime)
+        gDvmJit.maxCompilerThreadBlockGCTime = blockTime;
+    gDvmJit.numCompilerThreadBlockGC++;
+#endif
+    UPDATE_CODE_CACHE_PATCHES();
+
+    PROTECT_CODE_CACHE(startClassPointerP, numClassPointers * sizeof(intptr_t));
+
+    /* Change the thread state back to VMWAIT */
+    dvmChangeStatus(gDvmJit.compilerThread, THREAD_VMWAIT);
+}
+
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * The following are used to keep compiled loads and stores from modifying
+ * memory during self verification mode.
+ *
+ * Stores do not modify memory. Instead, the address and value pair are stored
+ * into heapSpace. Addresses within heapSpace are unique. For accesses smaller
+ * than a word, the word containing the address is loaded first before being
+ * updated.
+ *
+ * Loads check heapSpace first and return data from there if an entry exists.
+ * Otherwise, data is loaded from memory as usual.
+ */
+
+/* Used to specify sizes of memory operations */
+enum {
+    kSVByte,
+    kSVSignedByte,
+    kSVHalfword,
+    kSVSignedHalfword,
+    kSVWord,
+    kSVDoubleword,
+    kSVVariable,
+};
+
+/* Load the value of a decoded register from the stack */
+static int selfVerificationMemRegLoad(int* sp, int reg)
+{
+assert(0); /* MIPSTODO retarg func */
+    return *(sp + reg);
+}
+
+/* Load the value of a decoded doubleword register from the stack */
+static s8 selfVerificationMemRegLoadDouble(int* sp, int reg)
+{
+assert(0); /* MIPSTODO retarg func */
+    return *((s8*)(sp + reg));
+}
+
+/* Store the value of a decoded register out to the stack */
+static void selfVerificationMemRegStore(int* sp, int data, int reg)
+{
+assert(0); /* MIPSTODO retarg func */
+    *(sp + reg) = data;
+}
+
+/* Store the value of a decoded doubleword register out to the stack */
+static void selfVerificationMemRegStoreDouble(int* sp, s8 data, int reg)
+{
+assert(0); /* MIPSTODO retarg func */
+    *((s8*)(sp + reg)) = data;
+}
+
+/*
+ * Load the specified size of data from the specified address, checking
+ * heapSpace first if Self Verification mode wrote to it previously, and
+ * falling back to actual memory otherwise.
+ */
+static int selfVerificationLoad(int addr, int size)
+{
+assert(0); /* MIPSTODO retarg func */
+    Thread *self = dvmThreadSelf();
+    ShadowSpace *shadowSpace = self->shadowSpace;
+    ShadowHeap *heapSpacePtr;
+
+    int data;
+    int maskedAddr = addr & 0xFFFFFFFC;
+    int alignment = addr & 0x3;
+
+    for (heapSpacePtr = shadowSpace->heapSpace;
+         heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+        if (heapSpacePtr->addr == maskedAddr) {
+            addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+            break;
+        }
+    }
+
+    switch (size) {
+        case kSVByte:
+            data = *((u1*) addr);
+            break;
+        case kSVSignedByte:
+            data = *((s1*) addr);
+            break;
+        case kSVHalfword:
+            data = *((u2*) addr);
+            break;
+        case kSVSignedHalfword:
+            data = *((s2*) addr);
+            break;
+        case kSVWord:
+            data = *((u4*) addr);
+            break;
+        default:
+            LOGE("*** ERROR: BAD SIZE IN selfVerificationLoad: %d", size);
+            data = 0;
+            dvmAbort();
+    }
+
+    //LOGD("*** HEAP LOAD: Addr: %#x Data: %#x Size: %d", addr, data, size);
+    return data;
+}
+
+/* Like selfVerificationLoad, but specifically for doublewords */
+static s8 selfVerificationLoadDoubleword(int addr)
+{
+assert(0); /* MIPSTODO retarg func */
+    Thread *self = dvmThreadSelf();
+    ShadowSpace* shadowSpace = self->shadowSpace;
+    ShadowHeap* heapSpacePtr;
+
+    int addr2 = addr+4;
+    unsigned int data = *((unsigned int*) addr);
+    unsigned int data2 = *((unsigned int*) addr2);
+
+    for (heapSpacePtr = shadowSpace->heapSpace;
+         heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+        if (heapSpacePtr->addr == addr) {
+            data = heapSpacePtr->data;
+        } else if (heapSpacePtr->addr == addr2) {
+            data2 = heapSpacePtr->data;
+        }
+    }
+
+    //LOGD("*** HEAP LOAD DOUBLEWORD: Addr: %#x Data: %#x Data2: %#x",
+    //    addr, data, data2);
+    return (((s8) data2) << 32) | data;
+}
+
+/*
+ * Handles a store of a specified size of data to a specified address.
+ * This gets logged as an addr/data pair in heapSpace instead of modifying
+ * memory.  Addresses in heapSpace are unique, and accesses smaller than a
+ * word pull the entire word from memory first before updating.
+ */
+static void selfVerificationStore(int addr, int data, int size)
+{
+assert(0); /* MIPSTODO retarg func */
+    Thread *self = dvmThreadSelf();
+    ShadowSpace *shadowSpace = self->shadowSpace;
+    ShadowHeap *heapSpacePtr;
+
+    int maskedAddr = addr & 0xFFFFFFFC;
+    int alignment = addr & 0x3;
+
+    //LOGD("*** HEAP STORE: Addr: %#x Data: %#x Size: %d", addr, data, size);
+
+    for (heapSpacePtr = shadowSpace->heapSpace;
+         heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+        if (heapSpacePtr->addr == maskedAddr) break;
+    }
+
+    if (heapSpacePtr == shadowSpace->heapSpaceTail) {
+        heapSpacePtr->addr = maskedAddr;
+        heapSpacePtr->data = *((unsigned int*) maskedAddr);
+        shadowSpace->heapSpaceTail++;
+    }
+
+    addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+    switch (size) {
+        case kSVByte:
+            *((u1*) addr) = data;
+            break;
+        case kSVSignedByte:
+            *((s1*) addr) = data;
+            break;
+        case kSVHalfword:
+            *((u2*) addr) = data;
+            break;
+        case kSVSignedHalfword:
+            *((s2*) addr) = data;
+            break;
+        case kSVWord:
+            *((u4*) addr) = data;
+            break;
+        default:
+            LOGE("*** ERROR: BAD SIZE IN selfVerificationSave: %d", size);
+            dvmAbort();
+    }
+}
+
+/* Like selfVerificationStore, but specifically for doublewords */
+static void selfVerificationStoreDoubleword(int addr, s8 double_data)
+{
+assert(0); /* MIPSTODO retarg func */
+    Thread *self = dvmThreadSelf();
+    ShadowSpace *shadowSpace = self->shadowSpace;
+    ShadowHeap *heapSpacePtr;
+
+    int addr2 = addr+4;
+    int data = double_data;
+    int data2 = double_data >> 32;
+    bool store1 = false, store2 = false;
+
+    //LOGD("*** HEAP STORE DOUBLEWORD: Addr: %#x Data: %#x, Data2: %#x",
+    //    addr, data, data2);
+
+    for (heapSpacePtr = shadowSpace->heapSpace;
+         heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+        if (heapSpacePtr->addr == addr) {
+            heapSpacePtr->data = data;
+            store1 = true;
+        } else if (heapSpacePtr->addr == addr2) {
+            heapSpacePtr->data = data2;
+            store2 = true;
+        }
+    }
+
+    if (!store1) {
+        shadowSpace->heapSpaceTail->addr = addr;
+        shadowSpace->heapSpaceTail->data = data;
+        shadowSpace->heapSpaceTail++;
+    }
+    if (!store2) {
+        shadowSpace->heapSpaceTail->addr = addr2;
+        shadowSpace->heapSpaceTail->data = data2;
+        shadowSpace->heapSpaceTail++;
+    }
+}
+
+/*
+ * Decodes the memory instruction at the address specified in the link
+ * register. All registers (r0-r12,lr) and fp registers (d0-d15) are stored
+ * consecutively on the stack beginning at the specified stack pointer.
+ * Calls the proper Self Verification handler for the memory instruction and
+ * updates the link register to point past the decoded memory instruction.
+ */
+void dvmSelfVerificationMemOpDecode(int lr, int* sp)
+{
+assert(0); /* MIPSTODO retarg func */
+    enum {
+        kMemOpLdrPcRel = 0x09, // ldr(3)  [01001] rd[10..8] imm_8[7..0]
+        kMemOpRRR      = 0x0A, // Full opcode is 7 bits
+        kMemOp2Single  = 0x0A, // Used for Vstrs and Vldrs
+        kMemOpRRR2     = 0x0B, // Full opcode is 7 bits
+        kMemOp2Double  = 0x0B, // Used for Vstrd and Vldrd
+        kMemOpStrRRI5  = 0x0C, // str(1)  [01100] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpLdrRRI5  = 0x0D, // ldr(1)  [01101] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpStrbRRI5 = 0x0E, // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpLdrbRRI5 = 0x0F, // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpStrhRRI5 = 0x10, // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpLdrhRRI5 = 0x11, // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0]
+        kMemOpLdrSpRel = 0x13, // ldr(4)  [10011] rd[10..8] imm_8[7..0]
+        kMemOpStmia    = 0x18, // stmia   [11000] rn[10..8] reglist [7..0]
+        kMemOpLdmia    = 0x19, // ldmia   [11001] rn[10..8] reglist [7..0]
+        kMemOpStrRRR   = 0x28, // str(2)  [0101000] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpStrhRRR  = 0x29, // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpStrbRRR  = 0x2A, // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpLdrsbRRR = 0x2B, // ldrsb   [0101011] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpLdrRRR   = 0x2C, // ldr(2)  [0101100] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpLdrhRRR  = 0x2D, // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpLdrbRRR  = 0x2E, // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0]
+        kMemOpLdrshRRR = 0x2F, // ldrsh   [0101111] rm[8..6] rn[5..3] rd[2..0]
+        kMemOp2Stmia   = 0xE88, // stmia  [111010001000[ rn[19..16] mask[15..0]
+        kMemOp2Ldmia   = 0xE89, // ldmia  [111010001001[ rn[19..16] mask[15..0]
+        kMemOp2Stmia2  = 0xE8A, // stmia  [111010001010[ rn[19..16] mask[15..0]
+        kMemOp2Ldmia2  = 0xE8B, // ldmia  [111010001011[ rn[19..16] mask[15..0]
+        kMemOp2Vstr    = 0xED8, // Used for Vstrs and Vstrd
+        kMemOp2Vldr    = 0xED9, // Used for Vldrs and Vldrd
+        kMemOp2Vstr2   = 0xEDC, // Used for Vstrs and Vstrd
+        kMemOp2Vldr2   = 0xEDD, // Used for Vstrs and Vstrd
+        kMemOp2StrbRRR = 0xF80, /* str rt,[rn,rm,LSL #imm] [111110000000]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2LdrbRRR = 0xF81, /* ldrb rt,[rn,rm,LSL #imm] [111110000001]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2StrhRRR = 0xF82, /* str rt,[rn,rm,LSL #imm] [111110000010]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2LdrhRRR = 0xF83, /* ldrh rt,[rn,rm,LSL #imm] [111110000011]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2StrRRR  = 0xF84, /* str rt,[rn,rm,LSL #imm] [111110000100]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2LdrRRR  = 0xF85, /* ldr rt,[rn,rm,LSL #imm] [111110000101]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2StrbRRI12 = 0xF88, /* strb rt,[rn,#imm12] [111110001000]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2LdrbRRI12 = 0xF89, /* ldrb rt,[rn,#imm12] [111110001001]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2StrhRRI12 = 0xF8A, /* strh rt,[rn,#imm12] [111110001010]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2LdrhRRI12 = 0xF8B, /* ldrh rt,[rn,#imm12] [111110001011]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2StrRRI12 = 0xF8C, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
+                                       rn[19..16] rt[15..12] imm12[11..0] */
+        kMemOp2LdrRRI12 = 0xF8D, /* ldr(Imm,T3) rd,[rn,#imm12] [111110001101]
+                                       rn[19..16] rt[15..12] imm12[11..0] */
+        kMemOp2LdrsbRRR = 0xF91, /* ldrsb rt,[rn,rm,LSL #imm] [111110010001]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2LdrshRRR = 0xF93, /* ldrsh rt,[rn,rm,LSL #imm] [111110010011]
+                                rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
+        kMemOp2LdrsbRRI12 = 0xF99, /* ldrsb rt,[rn,#imm12] [111110011001]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2LdrshRRI12 = 0xF9B, /* ldrsh rt,[rn,#imm12] [111110011011]
+                                       rt[15..12] rn[19..16] imm12[11..0] */
+        kMemOp2        = 0xE000, // top 3 bits set indicates Thumb2
+    };
+
+    int addr, offset, data;
+    long long double_data;
+    int size = kSVWord;
+    bool store = false;
+    unsigned int *lr_masked = (unsigned int *) (lr & 0xFFFFFFFE);
+    unsigned int insn = *lr_masked;
+
+    int old_lr;
+    old_lr = selfVerificationMemRegLoad(sp, 13);
+
+    if ((insn & kMemOp2) == kMemOp2) {
+        insn = (insn << 16) | (insn >> 16);
+        //LOGD("*** THUMB2 - Addr: %#x Insn: %#x", lr, insn);
+
+        int opcode12 = (insn >> 20) & 0xFFF;
+        int opcode6 = (insn >> 6) & 0x3F;
+        int opcode4 = (insn >> 8) & 0xF;
+        int imm2 = (insn >> 4) & 0x3;
+        int imm8 = insn & 0xFF;
+        int imm12 = insn & 0xFFF;
+        int rd = (insn >> 12) & 0xF;
+        int rm = insn & 0xF;
+        int rn = (insn >> 16) & 0xF;
+        int rt = (insn >> 12) & 0xF;
+        bool wBack = true;
+
+        // Update the link register
+        selfVerificationMemRegStore(sp, old_lr+4, 13);
+
+        // Determine whether the mem op is a store or load
+        switch (opcode12) {
+            case kMemOp2Stmia:
+            case kMemOp2Stmia2:
+            case kMemOp2Vstr:
+            case kMemOp2Vstr2:
+            case kMemOp2StrbRRR:
+            case kMemOp2StrhRRR:
+            case kMemOp2StrRRR:
+            case kMemOp2StrbRRI12:
+            case kMemOp2StrhRRI12:
+            case kMemOp2StrRRI12:
+                store = true;
+        }
+
+        // Determine the size of the mem access
+        switch (opcode12) {
+            case kMemOp2StrbRRR:
+            case kMemOp2LdrbRRR:
+            case kMemOp2StrbRRI12:
+            case kMemOp2LdrbRRI12:
+                size = kSVByte;
+                break;
+            case kMemOp2LdrsbRRR:
+            case kMemOp2LdrsbRRI12:
+                size = kSVSignedByte;
+                break;
+            case kMemOp2StrhRRR:
+            case kMemOp2LdrhRRR:
+            case kMemOp2StrhRRI12:
+            case kMemOp2LdrhRRI12:
+                size = kSVHalfword;
+                break;
+            case kMemOp2LdrshRRR:
+            case kMemOp2LdrshRRI12:
+                size = kSVSignedHalfword;
+                break;
+            case kMemOp2Vstr:
+            case kMemOp2Vstr2:
+            case kMemOp2Vldr:
+            case kMemOp2Vldr2:
+                if (opcode4 == kMemOp2Double) size = kSVDoubleword;
+                break;
+            case kMemOp2Stmia:
+            case kMemOp2Ldmia:
+            case kMemOp2Stmia2:
+            case kMemOp2Ldmia2:
+                size = kSVVariable;
+                break;
+        }
+
+        // Load the value of the address
+        addr = selfVerificationMemRegLoad(sp, rn);
+
+        // Figure out the offset
+        switch (opcode12) {
+            case kMemOp2Vstr:
+            case kMemOp2Vstr2:
+            case kMemOp2Vldr:
+            case kMemOp2Vldr2:
+                offset = imm8 << 2;
+                if (opcode4 == kMemOp2Single) {
+                    rt = rd << 1;
+                    if (insn & 0x400000) rt |= 0x1;
+                } else if (opcode4 == kMemOp2Double) {
+                    if (insn & 0x400000) rt |= 0x10;
+                    rt = rt << 1;
+                } else {
+                    LOGE("*** ERROR: UNRECOGNIZED VECTOR MEM OP: %x", opcode4);
+                    dvmAbort();
+                }
+                rt += 14;
+                break;
+            case kMemOp2StrbRRR:
+            case kMemOp2LdrbRRR:
+            case kMemOp2StrhRRR:
+            case kMemOp2LdrhRRR:
+            case kMemOp2StrRRR:
+            case kMemOp2LdrRRR:
+            case kMemOp2LdrsbRRR:
+            case kMemOp2LdrshRRR:
+                offset = selfVerificationMemRegLoad(sp, rm) << imm2;
+                break;
+            case kMemOp2StrbRRI12:
+            case kMemOp2LdrbRRI12:
+            case kMemOp2StrhRRI12:
+            case kMemOp2LdrhRRI12:
+            case kMemOp2StrRRI12:
+            case kMemOp2LdrRRI12:
+            case kMemOp2LdrsbRRI12:
+            case kMemOp2LdrshRRI12:
+                offset = imm12;
+                break;
+            case kMemOp2Stmia:
+            case kMemOp2Ldmia:
+                wBack = false;
+            case kMemOp2Stmia2:
+            case kMemOp2Ldmia2:
+                offset = 0;
+                break;
+            default:
+                LOGE("*** ERROR: UNRECOGNIZED THUMB2 MEM OP: %x", opcode12);
+                offset = 0;
+                dvmAbort();
+        }
+
+        // Handle the decoded mem op accordingly
+        if (store) {
+            if (size == kSVVariable) {
+                LOGD("*** THUMB2 STMIA CURRENTLY UNUSED (AND UNTESTED)");
+                int i;
+                int regList = insn & 0xFFFF;
+                for (i = 0; i < 16; i++) {
+                    if (regList & 0x1) {
+                        data = selfVerificationMemRegLoad(sp, i);
+                        selfVerificationStore(addr, data, kSVWord);
+                        addr += 4;
+                    }
+                    regList = regList >> 1;
+                }
+                if (wBack) selfVerificationMemRegStore(sp, addr, rn);
+            } else if (size == kSVDoubleword) {
+                double_data = selfVerificationMemRegLoadDouble(sp, rt);
+                selfVerificationStoreDoubleword(addr+offset, double_data);
+            } else {
+                data = selfVerificationMemRegLoad(sp, rt);
+                selfVerificationStore(addr+offset, data, size);
+            }
+        } else {
+            if (size == kSVVariable) {
+                LOGD("*** THUMB2 LDMIA CURRENTLY UNUSED (AND UNTESTED)");
+                int i;
+                int regList = insn & 0xFFFF;
+                for (i = 0; i < 16; i++) {
+                    if (regList & 0x1) {
+                        data = selfVerificationLoad(addr, kSVWord);
+                        selfVerificationMemRegStore(sp, data, i);
+                        addr += 4;
+                    }
+                    regList = regList >> 1;
+                }
+                if (wBack) selfVerificationMemRegStore(sp, addr, rn);
+            } else if (size == kSVDoubleword) {
+                double_data = selfVerificationLoadDoubleword(addr+offset);
+                selfVerificationMemRegStoreDouble(sp, double_data, rt);
+            } else {
+                data = selfVerificationLoad(addr+offset, size);
+                selfVerificationMemRegStore(sp, data, rt);
+            }
+        }
+    } else {
+        //LOGD("*** THUMB - Addr: %#x Insn: %#x", lr, insn);
+
+        // Update the link register
+        selfVerificationMemRegStore(sp, old_lr+2, 13);
+
+        int opcode5 = (insn >> 11) & 0x1F;
+        int opcode7 = (insn >> 9) & 0x7F;
+        int imm = (insn >> 6) & 0x1F;
+        int rd = (insn >> 8) & 0x7;
+        int rm = (insn >> 6) & 0x7;
+        int rn = (insn >> 3) & 0x7;
+        int rt = insn & 0x7;
+
+        // Determine whether the mem op is a store or load
+        switch (opcode5) {
+            case kMemOpRRR:
+                switch (opcode7) {
+                    case kMemOpStrRRR:
+                    case kMemOpStrhRRR:
+                    case kMemOpStrbRRR:
+                        store = true;
+                }
+                break;
+            case kMemOpStrRRI5:
+            case kMemOpStrbRRI5:
+            case kMemOpStrhRRI5:
+            case kMemOpStmia:
+                store = true;
+        }
+
+        // Determine the size of the mem access
+        switch (opcode5) {
+            case kMemOpRRR:
+            case kMemOpRRR2:
+                switch (opcode7) {
+                    case kMemOpStrbRRR:
+                    case kMemOpLdrbRRR:
+                        size = kSVByte;
+                        break;
+                    case kMemOpLdrsbRRR:
+                        size = kSVSignedByte;
+                        break;
+                    case kMemOpStrhRRR:
+                    case kMemOpLdrhRRR:
+                        size = kSVHalfword;
+                        break;
+                    case kMemOpLdrshRRR:
+                        size = kSVSignedHalfword;
+                        break;
+                }
+                break;
+            case kMemOpStrbRRI5:
+            case kMemOpLdrbRRI5:
+                size = kSVByte;
+                break;
+            case kMemOpStrhRRI5:
+            case kMemOpLdrhRRI5:
+                size = kSVHalfword;
+                break;
+            case kMemOpStmia:
+            case kMemOpLdmia:
+                size = kSVVariable;
+                break;
+        }
+
+        // Load the value of the address
+        if (opcode5 == kMemOpLdrPcRel)
+            addr = selfVerificationMemRegLoad(sp, 4);
+        else if (opcode5 == kMemOpStmia || opcode5 == kMemOpLdmia)
+            addr = selfVerificationMemRegLoad(sp, rd);
+        else
+            addr = selfVerificationMemRegLoad(sp, rn);
+
+        // Figure out the offset
+        switch (opcode5) {
+            case kMemOpLdrPcRel:
+                offset = (insn & 0xFF) << 2;
+                rt = rd;
+                break;
+            case kMemOpRRR:
+            case kMemOpRRR2:
+                offset = selfVerificationMemRegLoad(sp, rm);
+                break;
+            case kMemOpStrRRI5:
+            case kMemOpLdrRRI5:
+                offset = imm << 2;
+                break;
+            case kMemOpStrhRRI5:
+            case kMemOpLdrhRRI5:
+                offset = imm << 1;
+                break;
+            case kMemOpStrbRRI5:
+            case kMemOpLdrbRRI5:
+                offset = imm;
+                break;
+            case kMemOpStmia:
+            case kMemOpLdmia:
+                offset = 0;
+                break;
+            default:
+                LOGE("*** ERROR: UNRECOGNIZED THUMB MEM OP: %x", opcode5);
+                offset = 0;
+                dvmAbort();
+        }
+
+        // Handle the decoded mem op accordingly
+        if (store) {
+            if (size == kSVVariable) {
+                int i;
+                int regList = insn & 0xFF;
+                for (i = 0; i < 8; i++) {
+                    if (regList & 0x1) {
+                        data = selfVerificationMemRegLoad(sp, i);
+                        selfVerificationStore(addr, data, kSVWord);
+                        addr += 4;
+                    }
+                    regList = regList >> 1;
+                }
+                selfVerificationMemRegStore(sp, addr, rd);
+            } else {
+                data = selfVerificationMemRegLoad(sp, rt);
+                selfVerificationStore(addr+offset, data, size);
+            }
+        } else {
+            if (size == kSVVariable) {
+                bool wBack = true;
+                int i;
+                int regList = insn & 0xFF;
+                for (i = 0; i < 8; i++) {
+                    if (regList & 0x1) {
+                        if (i == rd) wBack = false;
+                        data = selfVerificationLoad(addr, kSVWord);
+                        selfVerificationMemRegStore(sp, data, i);
+                        addr += 4;
+                    }
+                    regList = regList >> 1;
+                }
+                if (wBack) selfVerificationMemRegStore(sp, addr, rd);
+            } else {
+                data = selfVerificationLoad(addr+offset, size);
+                selfVerificationMemRegStore(sp, data, rt);
+            }
+        }
+    }
+}
+#endif
diff --git a/vm/compiler/codegen/mips/CalloutHelper.h b/vm/compiler/codegen/mips/CalloutHelper.h
new file mode 100644
index 0000000..6e2343d
--- /dev/null
+++ b/vm/compiler/codegen/mips/CalloutHelper.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_COMPILER_CODEGEN_MIPS_CALLOUT_HELPER_H_
+#define DALVIK_VM_COMPILER_CODEGEN_MIPS_CALLOUT_HELPER_H_
+
+#include "Dalvik.h"
+
+/*
+ * Declare/comment prototypes of all native callout functions invoked by the
+ * JIT'ed code here and use the LOAD_FUNC_ADDR macro to load the address into
+ * a register. In this way we have a centralized place to find out all native
+ * helper functions and we can grep for LOAD_FUNC_ADDR to find out all the
+ * callsites.
+ */
+
+/* Load a statically compiled function address as a constant */
+#define LOAD_FUNC_ADDR(cUnit, reg, addr) loadConstant(cUnit, reg, addr)
+
+/* Conversions */
+extern "C" float __floatsisf(int op1);             // OP_INT_TO_FLOAT
+extern "C" int __fixsfsi(float op1);               // OP_FLOAT_TO_INT
+extern "C" float __truncdfsf2(double op1);         // OP_DOUBLE_TO_FLOAT
+extern "C" double __extendsfdf2(float op1);        // OP_FLOAT_TO_DOUBLE
+extern "C" double __floatsidf(int op1);            // OP_INT_TO_DOUBLE
+extern "C" int __fixdfsi(double op1);              // OP_DOUBLE_TO_INT
+extern "C" float __floatdisf(long long op1);       // OP_LONG_TO_FLOAT
+extern "C" double __floatdidf(long long op1);      // OP_LONG_TO_DOUBLE
+extern "C" long long __fixsfdi(float op1);         // OP_FLOAT_TO_LONG
+extern "C" long long __fixdfdi(double op1);        // OP_DOUBLE_TO_LONG
+
+/* Single-precision FP arithmetics */
+extern "C" float __addsf3(float a, float b);   // OP_ADD_FLOAT[_2ADDR]
+extern "C" float __subsf3(float a, float b);   // OP_SUB_FLOAT[_2ADDR]
+extern "C" float __divsf3(float a, float b);   // OP_DIV_FLOAT[_2ADDR]
+extern "C" float __mulsf3(float a, float b);   // OP_MUL_FLOAT[_2ADDR]
+extern "C" float fmodf(float a, float b);          // OP_REM_FLOAT[_2ADDR]
+
+/* Double-precision FP arithmetics */
+extern "C" double __adddf3(double a, double b); // OP_ADD_DOUBLE[_2ADDR]
+extern "C" double __subdf3(double a, double b); // OP_SUB_DOUBLE[_2ADDR]
+extern "C" double __divdf3(double a, double b); // OP_DIV_DOUBLE[_2ADDR]
+extern "C" double __muldf3(double a, double b); // OP_MUL_DOUBLE[_2ADDR]
+extern "C" double fmod(double a, double b);         // OP_REM_DOUBLE[_2ADDR]
+
+/* Long long arithmetics - OP_REM_LONG[_2ADDR] & OP_DIV_LONG[_2ADDR] */
+extern "C" long long __divdi3(long long op1, long long op2);
+extern "C" long long __moddi3(long long op1, long long op2);
+
+/* Originally declared in Sync.h */
+bool dvmUnlockObject(struct Thread* self, struct Object* obj); //OP_MONITOR_EXIT
+
+/* Originally declared in oo/TypeCheck.h */
+bool dvmCanPutArrayElement(const ClassObject* elemClass,   // OP_APUT_OBJECT
+                           const ClassObject* arrayClass);
+int dvmInstanceofNonTrivial(const ClassObject* instance,   // OP_CHECK_CAST &&
+                            const ClassObject* clazz);     // OP_INSTANCE_OF
+
+/* Originally declared in oo/Array.h */
+ArrayObject* dvmAllocArrayByClass(ClassObject* arrayClass, // OP_NEW_ARRAY
+                                  size_t length, int allocFlags);
+
+/* Originally declared in interp/InterpDefs.h */
+bool dvmInterpHandleFillArrayData(ArrayObject* arrayObject,// OP_FILL_ARRAY_DATA
+                                  const u2* arrayData);
+
+/* Originally declared in compiler/codegen/mips/Assemble.c */
+const Method *dvmJitToPatchPredictedChain(const Method *method,
+                                          Thread *self,
+                                          PredictedChainingCell *cell,
+                                          const ClassObject *clazz);
+
+/*
+ * Switch dispatch offset calculation for OP_PACKED_SWITCH & OP_SPARSE_SWITCH
+ * Used in CodegenDriver.c
+ * static s8 findPackedSwitchIndex(const u2* switchData, int testVal, int pc);
+ * static s8 findSparseSwitchIndex(const u2* switchData, int testVal, int pc);
+ */
+
+/*
+ * Resolve interface callsites - OP_INVOKE_INTERFACE & OP_INVOKE_INTERFACE_RANGE
+ *
+ * Originally declared in mterp/common/FindInterface.h and only comment it here
+ * due to the INLINE attribute.
+ *
+ * INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ *  u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+ */
+
+/* Originally declared in alloc/Alloc.h */
+Object* dvmAllocObject(ClassObject* clazz, int flags);  // OP_NEW_INSTANCE
+
+/*
+ * Functions declared in gDvmInlineOpsTable[] are used for
+ * OP_EXECUTE_INLINE & OP_EXECUTE_INLINE_RANGE.
+ */
+extern "C" double sqrt(double x);  // INLINE_MATH_SQRT
+
+/*
+ * The following functions are invoked through the compiler templates (declared
+ * in compiler/template/armv5te/footer.S:
+ *
+ *      __aeabi_cdcmple         // CMPG_DOUBLE
+ *      __aeabi_cfcmple         // CMPG_FLOAT
+ *      dvmLockObject           // MONITOR_ENTER
+ */
+
+#endif  // DALVIK_VM_COMPILER_CODEGEN_MIPS_CALLOUT_HELPER_H_
diff --git a/vm/compiler/codegen/mips/Codegen.h b/vm/compiler/codegen/mips/Codegen.h
new file mode 100644
index 0000000..107fa86
--- /dev/null
+++ b/vm/compiler/codegen/mips/Codegen.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains register alloction support and is intended to be
+ * included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+#include "compiler/CompilerIR.h"
+#include "CalloutHelper.h"
+
+#if defined(_CODEGEN_C)
+/*
+ * loadConstant() sometimes needs to add a small imm to a pre-existing constant
+ */
+static MipsLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+                        int value);
+static MipsLIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+                        int rSrc2);
+
+/* Forward-declare the portable versions due to circular dependency */
+static bool genArithOpFloatPortable(CompilationUnit *cUnit, MIR *mir,
+                                    RegLocation rlDest, RegLocation rlSrc1,
+                                    RegLocation rlSrc2);
+
+static bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
+                                     RegLocation rlDest, RegLocation rlSrc1,
+                                     RegLocation rlSrc2);
+
+static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir);
+
+static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir);
+
+static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir);
+
+
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+/* Self Verification memory instruction decoder */
+extern "C" void dvmSelfVerificationMemOpDecode(int lr, int* sp);
+#endif
+
+/*
+ * Architecture-dependent register allocation routines implemented in
+ * Mips/Ralloc.c
+ */
+extern int dvmCompilerAllocTypedTempPair(CompilationUnit *cUnit,
+                                         bool fpHint, int regClass);
+
+extern int dvmCompilerAllocTypedTemp(CompilationUnit *cUnit, bool fpHint,
+                                     int regClass);
+
+extern MipsLIR* dvmCompilerRegCopyNoInsert(CompilationUnit *cUnit, int rDest,
+                                          int rSrc);
+
+extern MipsLIR* dvmCompilerRegCopy(CompilationUnit *cUnit, int rDest, int rSrc);
+
+extern void dvmCompilerRegCopyWide(CompilationUnit *cUnit, int destLo,
+                                   int destHi, int srcLo, int srcHi);
+
+extern void dvmCompilerSetupResourceMasks(MipsLIR *lir);
+
+extern void dvmCompilerFlushRegImpl(CompilationUnit *cUnit, int rBase,
+                                    int displacement, int rSrc, OpSize size);
+
+extern void dvmCompilerFlushRegWideImpl(CompilationUnit *cUnit, int rBase,
+                                        int displacement, int rSrcLo,
+                                        int rSrcHi);
diff --git a/vm/compiler/codegen/mips/CodegenCommon.cpp b/vm/compiler/codegen/mips/CodegenCommon.cpp
new file mode 100644
index 0000000..287e8c1
--- /dev/null
+++ b/vm/compiler/codegen/mips/CodegenCommon.cpp
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen and support common to all supported
+ * Mips variants.  It is included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ * which combines this common code with specific support found in the
+ * applicable directory below this one.
+ */
+
+#include "compiler/Loop.h"
+
+/* Array holding the entry offset of each template relative to the first one */
+static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
+
+/* Track exercised opcodes */
+static int opcodeCoverage[256];
+
+static void setMemRefType(MipsLIR *lir, bool isLoad, int memType)
+{
+    /* MIPSTODO simplify setMemRefType() */
+    u8 *maskPtr;
+    u8 mask = ENCODE_MEM;;
+    assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
+
+    if (isLoad) {
+        maskPtr = &lir->useMask;
+    } else {
+        maskPtr = &lir->defMask;
+    }
+    /* Clear out the memref flags */
+    *maskPtr &= ~mask;
+    /* ..and then add back the one we need */
+    switch(memType) {
+        case kLiteral:
+            assert(isLoad);
+            *maskPtr |= ENCODE_LITERAL;
+            break;
+        case kDalvikReg:
+            *maskPtr |= ENCODE_DALVIK_REG;
+            break;
+        case kHeapRef:
+            *maskPtr |= ENCODE_HEAP_REF;
+            break;
+        case kMustNotAlias:
+            /* Currently only loads can be marked as kMustNotAlias */
+            assert(!(EncodingMap[lir->opcode].flags & IS_STORE));
+            *maskPtr |= ENCODE_MUST_NOT_ALIAS;
+            break;
+        default:
+            LOGE("Jit: invalid memref kind - %d", memType);
+            assert(0);  // Bail if debug build, set worst-case in the field
+            *maskPtr |= ENCODE_ALL;
+    }
+}
+
+/*
+ * Mark load/store instructions that access Dalvik registers through rFP +
+ * offset.
+ */
+static void annotateDalvikRegAccess(MipsLIR *lir, int regId, bool isLoad)
+{
+    /* MIPSTODO simplify annotateDalvikRegAccess() */
+    setMemRefType(lir, isLoad, kDalvikReg);
+
+    /*
+     * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
+     * access.
+     */
+    lir->aliasInfo = regId;
+    if (DOUBLEREG(lir->operands[0])) {
+        lir->aliasInfo |= 0x80000000;
+    }
+}
+
+/*
+ * Decode the register id
+ */
+static inline u8 getRegMaskCommon(int reg)
+{
+    u8 seed;
+    int shift;
+    int regId = reg & 0x1f;
+
+    /*
+     * Each double register is equal to a pair of single-precision FP registers
+     */
+    if (!DOUBLEREG(reg)) {
+        seed = 1;
+    } else {
+        assert((regId & 1) == 0); /* double registers must be even */
+        seed = 3;
+    }
+
+    if (FPREG(reg)) {
+       assert(regId < 16); /* only 16 fp regs */
+       shift = kFPReg0;
+    } else if (EXTRAREG(reg)) {
+       assert(regId < 3); /* only 3 extra regs */
+       shift = kFPRegEnd;
+    } else {
+       shift = 0;
+    }
+
+    /* Expand the double register id into single offset */
+    shift += regId;
+    return (seed << shift);
+}
+
+/* External version of getRegMaskCommon */
+u8 dvmGetRegResourceMask(int reg)
+{
+    return getRegMaskCommon(reg);
+}
+
+/*
+ * Mark the corresponding bit(s).
+ */
+static inline void setupRegMask(u8 *mask, int reg)
+{
+    *mask |= getRegMaskCommon(reg);
+}
+
+/*
+ * Set up the proper fields in the resource mask
+ */
+static void setupResourceMasks(MipsLIR *lir)
+{
+    /* MIPSTODO simplify setupResourceMasks() */
+    int opcode = lir->opcode;
+    int flags;
+
+    if (opcode <= 0) {
+        lir->useMask = lir->defMask = 0;
+        return;
+    }
+
+    flags = EncodingMap[lir->opcode].flags;
+
+    /* Set up the mask for resources that are updated */
+    if (flags & (IS_LOAD | IS_STORE)) {
+        /* Default to heap - will catch specialized classes later */
+        setMemRefType(lir, flags & IS_LOAD, kHeapRef);
+    }
+
+    /*
+     * Conservatively assume the branch here will call out a function that in
+     * turn will trash everything.
+     */
+    if (flags & IS_BRANCH) {
+        lir->defMask = lir->useMask = ENCODE_ALL;
+        return;
+    }
+
+    if (flags & REG_DEF0) {
+        setupRegMask(&lir->defMask, lir->operands[0]);
+    }
+
+    if (flags & REG_DEF1) {
+        setupRegMask(&lir->defMask, lir->operands[1]);
+    }
+
+    if (flags & REG_DEF_SP) {
+        lir->defMask |= ENCODE_REG_SP;
+    }
+
+    if (flags & REG_DEF_LR) {
+        lir->defMask |= ENCODE_REG_LR;
+    }
+
+    if (flags & REG_DEF_LIST0) {
+        lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
+    }
+
+    if (flags & REG_DEF_LIST1) {
+        lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
+    }
+
+    if (flags & SETS_CCODES) {
+        lir->defMask |= ENCODE_CCODE;
+    }
+
+    /* Conservatively treat the IT block */
+    if (flags & IS_IT) {
+        lir->defMask = ENCODE_ALL;
+    }
+
+    if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+        int i;
+
+        for (i = 0; i < 4; i++) {
+            if (flags & (1 << (kRegUse0 + i))) {
+                setupRegMask(&lir->useMask, lir->operands[i]);
+            }
+        }
+    }
+
+    if (flags & REG_USE_PC) {
+        lir->useMask |= ENCODE_REG_PC;
+    }
+
+    if (flags & REG_USE_SP) {
+        lir->useMask |= ENCODE_REG_SP;
+    }
+
+    if (flags & REG_USE_LIST0) {
+        lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
+    }
+
+    if (flags & REG_USE_LIST1) {
+        lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
+    }
+
+    if (flags & USES_CCODES) {
+        lir->useMask |= ENCODE_CCODE;
+    }
+}
+
+/*
+ * Set up the accurate resource mask for branch instructions
+ */
+static void relaxBranchMasks(MipsLIR *lir)
+{
+    int flags = EncodingMap[lir->opcode].flags;
+
+    /* Make sure only branch instructions are passed here */
+    assert(flags & IS_BRANCH);
+
+    lir->defMask |= ENCODE_REG_PC;
+    lir->useMask |= ENCODE_REG_PC;
+
+
+    if (flags & REG_DEF_LR) {
+        lir->defMask |= ENCODE_REG_LR;
+    }
+
+    if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+        int i;
+
+        for (i = 0; i < 4; i++) {
+            if (flags & (1 << (kRegUse0 + i))) {
+                setupRegMask(&lir->useMask, lir->operands[i]);
+            }
+        }
+    }
+
+    if (flags & USES_CCODES) {
+        lir->useMask |= ENCODE_CCODE;
+    }
+}
+
+/*
+ * The following are building blocks to construct low-level IRs with 0 - 4
+ * operands.
+ */
+static MipsLIR *newLIR0(CompilationUnit *cUnit, MipsOpCode opcode)
+{
+    MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    assert(isPseudoOpCode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND));
+    insn->opcode = opcode;
+    setupResourceMasks(insn);
+    dvmCompilerAppendLIR(cUnit, (LIR *) insn);
+    return insn;
+}
+
+static MipsLIR *newLIR1(CompilationUnit *cUnit, MipsOpCode opcode,
+                           int dest)
+{
+    MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    assert(isPseudoOpCode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP));
+    insn->opcode = opcode;
+    insn->operands[0] = dest;
+    setupResourceMasks(insn);
+    dvmCompilerAppendLIR(cUnit, (LIR *) insn);
+    return insn;
+}
+
+static MipsLIR *newLIR2(CompilationUnit *cUnit, MipsOpCode opcode,
+                           int dest, int src1)
+{
+    MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    assert(isPseudoOpCode(opcode) ||
+           (EncodingMap[opcode].flags & IS_BINARY_OP));
+    insn->opcode = opcode;
+    insn->operands[0] = dest;
+    insn->operands[1] = src1;
+    setupResourceMasks(insn);
+    dvmCompilerAppendLIR(cUnit, (LIR *) insn);
+    return insn;
+}
+
+static MipsLIR *newLIR3(CompilationUnit *cUnit, MipsOpCode opcode,
+                           int dest, int src1, int src2)
+{
+    MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    if (!(EncodingMap[opcode].flags & IS_TERTIARY_OP)) {
+        LOGE("Bad LIR3: %s[%d]",EncodingMap[opcode].name,opcode);
+    }
+    assert(isPseudoOpCode(opcode) ||
+           (EncodingMap[opcode].flags & IS_TERTIARY_OP));
+    insn->opcode = opcode;
+    insn->operands[0] = dest;
+    insn->operands[1] = src1;
+    insn->operands[2] = src2;
+    setupResourceMasks(insn);
+    dvmCompilerAppendLIR(cUnit, (LIR *) insn);
+    return insn;
+}
+
+static MipsLIR *newLIR4(CompilationUnit *cUnit, MipsOpCode opcode,
+                           int dest, int src1, int src2, int info)
+{
+    MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    assert(isPseudoOpCode(opcode) ||
+           (EncodingMap[opcode].flags & IS_QUAD_OP));
+    insn->opcode = opcode;
+    insn->operands[0] = dest;
+    insn->operands[1] = src1;
+    insn->operands[2] = src2;
+    insn->operands[3] = info;
+    setupResourceMasks(insn);
+    dvmCompilerAppendLIR(cUnit, (LIR *) insn);
+    return insn;
+}
+
+/*
+ * If the next instruction is a move-result or move-result-long,
+ * return the target Dalvik sReg[s] and convert the next to a
+ * nop.  Otherwise, return INVALID_SREG.  Used to optimize method inlining.
+ */
+static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir,
+                                  bool fpHint)
+{
+    if (mir->next &&
+        ((mir->next->dalvikInsn.opcode == OP_MOVE_RESULT) ||
+         (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_OBJECT))) {
+        mir->next->dalvikInsn.opcode = OP_NOP;
+        return dvmCompilerGetDest(cUnit, mir->next, 0);
+    } else {
+        RegLocation res = LOC_DALVIK_RETURN_VAL;
+        res.fp = fpHint;
+        return res;
+    }
+}
+
+/*
+ * The following are building blocks to insert constants into the pool or
+ * instruction streams.
+ */
+
+/* Add a 32-bit constant either in the constant pool or mixed with code */
+static MipsLIR *addWordData(CompilationUnit *cUnit, LIR **constantListP,
+                           int value)
+{
+    /* Add the constant to the literal pool */
+    if (constantListP) {
+        MipsLIR *newValue = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+        newValue->operands[0] = value;
+        newValue->generic.next = *constantListP;
+        *constantListP = (LIR *) newValue;
+        return newValue;
+    } else {
+        /* Add the constant in the middle of code stream */
+        newLIR1(cUnit, kMips32BitData, value);
+    }
+    return NULL;
+}
+
+static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir,
+                                      bool fpHint)
+{
+    if (mir->next &&
+        (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_WIDE)) {
+        mir->next->dalvikInsn.opcode = OP_NOP;
+        return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1);
+    } else {
+        RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE;
+        res.fp = fpHint;
+        return res;
+    }
+}
+
+
+/*
+ * Generate an kMipsPseudoBarrier marker to indicate the boundary of special
+ * blocks.
+ */
+static void genBarrier(CompilationUnit *cUnit)
+{
+    MipsLIR *barrier = newLIR0(cUnit, kMipsPseudoBarrier);
+    /* Mark all resources as being clobbered */
+    barrier->defMask = -1;
+}
+
+/* Create the PC reconstruction slot if not already done */
+extern MipsLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset,
+                              MipsLIR *branch,
+                              MipsLIR *pcrLabel)
+{
+    /* Forget all def info (because we might rollback here.  Bug #2367397 */
+    dvmCompilerResetDefTracking(cUnit);
+
+    /* Set up the place holder to reconstruct this Dalvik PC */
+    if (pcrLabel == NULL) {
+        int dPC = (int) (cUnit->method->insns + dOffset);
+        pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+        pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
+        pcrLabel->operands[0] = dPC;
+        pcrLabel->operands[1] = dOffset;
+        /* Insert the place holder to the growable list */
+        dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                              (intptr_t) pcrLabel);
+    }
+    /* Branch to the PC reconstruction code */
+    branch->generic.target = (LIR *) pcrLabel;
+
+    /* Clear the conservative flags for branches that punt to the interpreter */
+    relaxBranchMasks(branch);
+
+    return pcrLabel;
+}
diff --git a/vm/compiler/codegen/mips/CodegenDriver.cpp b/vm/compiler/codegen/mips/CodegenDriver.cpp
new file mode 100644
index 0000000..6ef2ce4
--- /dev/null
+++ b/vm/compiler/codegen/mips/CodegenDriver.cpp
@@ -0,0 +1,4938 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen and support common to all supported
+ * Mips variants.  It is included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ * which combines this common code with specific support found in the
+ * applicable directory below this one.
+ */
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+static void markCard(CompilationUnit *cUnit, int valReg, int tgtAddrReg)
+{
+    int regCardBase = dvmCompilerAllocTemp(cUnit);
+    int regCardNo = dvmCompilerAllocTemp(cUnit);
+    MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBeq, valReg, r_ZERO);
+    loadWordDisp(cUnit, rSELF, offsetof(Thread, cardTable),
+                 regCardBase);
+    opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
+    storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+                     kUnsignedByte);
+    MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+    target->defMask = ENCODE_ALL;
+    branchOver->generic.target = (LIR *)target;
+    dvmCompilerFreeTemp(cUnit, regCardBase);
+    dvmCompilerFreeTemp(cUnit, regCardNo);
+}
+
+static bool genConversionCall(CompilationUnit *cUnit, MIR *mir, void *funct,
+                                     int srcSize, int tgtSize)
+{
+    /*
+     * Don't optimize the register usage since it calls out to template
+     * functions
+     */
+    RegLocation rlSrc;
+    RegLocation rlDest;
+    int srcReg = 0;
+    int srcRegHi = 0;
+    dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
+
+    if (srcSize == kWord) {
+        srcReg = r_A0;
+    } else if (srcSize == kSingle) {
+#ifdef __mips_hard_float
+        srcReg = r_F12;
+#else
+        srcReg = r_A0;
+#endif
+    } else if (srcSize == kLong) {
+        srcReg = r_ARG0;
+        srcRegHi = r_ARG1;
+    } else if (srcSize == kDouble) {
+#ifdef __mips_hard_float
+        srcReg = r_FARG0;
+        srcRegHi = r_FARG1;
+#else
+        srcReg = r_ARG0;
+        srcRegHi = r_ARG1;
+#endif
+    }
+    else {
+        assert(0);
+    }
+
+    if (srcSize == kWord || srcSize == kSingle) {
+        rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+        loadValueDirectFixed(cUnit, rlSrc, srcReg);
+    } else {
+        rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+        loadValueDirectWideFixed(cUnit, rlSrc, srcReg, srcRegHi);
+    }
+    LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    dvmCompilerClobberCallRegs(cUnit);
+    if (tgtSize == kWord || tgtSize == kSingle) {
+        RegLocation rlResult;
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+#ifdef __mips_hard_float
+        if (tgtSize == kSingle)
+            rlResult = dvmCompilerGetReturnAlt(cUnit);
+        else
+            rlResult = dvmCompilerGetReturn(cUnit);
+#else
+        rlResult = dvmCompilerGetReturn(cUnit);
+#endif
+        storeValue(cUnit, rlDest, rlResult);
+    } else {
+        RegLocation rlResult;
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+#ifdef __mips_hard_float
+        if (tgtSize == kDouble)
+            rlResult = dvmCompilerGetReturnWideAlt(cUnit);
+        else
+            rlResult = dvmCompilerGetReturnWide(cUnit);
+#else
+        rlResult = dvmCompilerGetReturnWide(cUnit);
+#endif
+        storeValueWide(cUnit, rlDest, rlResult);
+    }
+    return false;
+}
+
+
+static bool genArithOpFloatPortable(CompilationUnit *cUnit, MIR *mir,
+                                    RegLocation rlDest, RegLocation rlSrc1,
+                                    RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    void* funct;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_FLOAT_2ADDR:
+        case OP_ADD_FLOAT:
+            funct = (void*) __addsf3;
+            break;
+        case OP_SUB_FLOAT_2ADDR:
+        case OP_SUB_FLOAT:
+            funct = (void*) __subsf3;
+            break;
+        case OP_DIV_FLOAT_2ADDR:
+        case OP_DIV_FLOAT:
+            funct = (void*) __divsf3;
+            break;
+        case OP_MUL_FLOAT_2ADDR:
+        case OP_MUL_FLOAT:
+            funct = (void*) __mulsf3;
+            break;
+        case OP_REM_FLOAT_2ADDR:
+        case OP_REM_FLOAT:
+            funct = (void*) fmodf;
+            break;
+        case OP_NEG_FLOAT: {
+            genNegFloat(cUnit, rlDest, rlSrc1);
+            return false;
+        }
+        default:
+            return true;
+    }
+
+    dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
+#ifdef __mips_hard_float
+    loadValueDirectFixed(cUnit, rlSrc1, r_F12);
+    loadValueDirectFixed(cUnit, rlSrc2, r_F14);
+#else
+    loadValueDirectFixed(cUnit, rlSrc1, r_A0);
+    loadValueDirectFixed(cUnit, rlSrc2, r_A1);
+#endif
+    LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    dvmCompilerClobberCallRegs(cUnit);
+#ifdef __mips_hard_float
+    rlResult = dvmCompilerGetReturnAlt(cUnit);
+#else
+    rlResult = dvmCompilerGetReturn(cUnit);
+#endif
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
+                                     RegLocation rlDest, RegLocation rlSrc1,
+                                     RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    void* funct;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_DOUBLE_2ADDR:
+        case OP_ADD_DOUBLE:
+            funct = (void*) __adddf3;
+            break;
+        case OP_SUB_DOUBLE_2ADDR:
+        case OP_SUB_DOUBLE:
+            funct = (void*) __subdf3;
+            break;
+        case OP_DIV_DOUBLE_2ADDR:
+        case OP_DIV_DOUBLE:
+            funct = (void*) __divsf3;
+            break;
+        case OP_MUL_DOUBLE_2ADDR:
+        case OP_MUL_DOUBLE:
+            funct = (void*) __muldf3;
+            break;
+        case OP_REM_DOUBLE_2ADDR:
+        case OP_REM_DOUBLE:
+            funct = (void*) (double (*)(double, double)) fmod;
+            break;
+        case OP_NEG_DOUBLE: {
+            genNegDouble(cUnit, rlDest, rlSrc1);
+            return false;
+        }
+        default:
+            return true;
+    }
+    dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
+    LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
+#ifdef __mips_hard_float
+    loadValueDirectWideFixed(cUnit, rlSrc1, r_F12, r_F13);
+    loadValueDirectWideFixed(cUnit, rlSrc2, r_F14, r_F15);
+#else
+    loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
+    loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
+#endif
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    dvmCompilerClobberCallRegs(cUnit);
+#ifdef __mips_hard_float
+    rlResult = dvmCompilerGetReturnWideAlt(cUnit);
+#else
+    rlResult = dvmCompilerGetReturnWide(cUnit);
+#endif
+    storeValueWide(cUnit, rlDest, rlResult);
+#if defined(WITH_SELF_VERIFICATION)
+    cUnit->usesLinkRegister = true;
+#endif
+    return false;
+}
+
+static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+
+    switch (opcode) {
+        case OP_INT_TO_FLOAT:
+            return genConversionCall(cUnit, mir, (void*)__floatsisf, kWord, kSingle);
+        case OP_FLOAT_TO_INT:
+            return genConversionCall(cUnit, mir, (void*)__fixsfsi, kSingle, kWord);
+        case OP_DOUBLE_TO_FLOAT:
+            return genConversionCall(cUnit, mir, (void*)__truncdfsf2, kDouble, kSingle);
+        case OP_FLOAT_TO_DOUBLE:
+            return genConversionCall(cUnit, mir, (void*)__extendsfdf2, kSingle, kDouble);
+        case OP_INT_TO_DOUBLE:
+            return genConversionCall(cUnit, mir, (void*)__floatsidf, kWord, kDouble);
+        case OP_DOUBLE_TO_INT:
+            return genConversionCall(cUnit, mir, (void*)__fixdfsi, kDouble, kWord);
+        case OP_FLOAT_TO_LONG:
+            return genConversionCall(cUnit, mir, (void*)__fixsfdi, kSingle, kLong);
+        case OP_LONG_TO_FLOAT:
+            return genConversionCall(cUnit, mir, (void*)__floatdisf, kLong, kSingle);
+        case OP_DOUBLE_TO_LONG:
+            return genConversionCall(cUnit, mir, (void*)__fixdfdi, kDouble, kLong);
+        case OP_LONG_TO_DOUBLE:
+            return genConversionCall(cUnit, mir, (void*)__floatdidf, kLong, kDouble);
+        default:
+            return true;
+    }
+    return false;
+}
+
+#if defined(WITH_SELF_VERIFICATION)
+static void selfVerificationBranchInsert(LIR *currentLIR, Mipsopcode opcode,
+                          int dest, int src1)
+{
+assert(0); /* MIPSTODO port selfVerificationBranchInsert() */
+     MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+     insn->opcode = opcode;
+     insn->operands[0] = dest;
+     insn->operands[1] = src1;
+     setupResourceMasks(insn);
+     dvmCompilerInsertLIRBefore(currentLIR, (LIR *) insn);
+}
+
+/*
+ * Example where r14 (LR) is preserved around a heap access under
+ * self-verification mode in Thumb2:
+ *
+ * D/dalvikvm( 1538): 0x59414c5e (0026): ldr     r14, [r15pc, #220] <-hoisted
+ * D/dalvikvm( 1538): 0x59414c62 (002a): mla     r4, r0, r8, r4
+ * D/dalvikvm( 1538): 0x59414c66 (002e): adds    r3, r4, r3
+ * D/dalvikvm( 1538): 0x59414c6a (0032): push    <r5, r14>    ---+
+ * D/dalvikvm( 1538): 0x59414c6c (0034): blx_1   0x5940f494      |
+ * D/dalvikvm( 1538): 0x59414c6e (0036): blx_2   see above       <-MEM_OP_DECODE
+ * D/dalvikvm( 1538): 0x59414c70 (0038): ldr     r10, [r9, #0]   |
+ * D/dalvikvm( 1538): 0x59414c74 (003c): pop     <r5, r14>    ---+
+ * D/dalvikvm( 1538): 0x59414c78 (0040): mov     r11, r10
+ * D/dalvikvm( 1538): 0x59414c7a (0042): asr     r12, r11, #31
+ * D/dalvikvm( 1538): 0x59414c7e (0046): movs    r0, r2
+ * D/dalvikvm( 1538): 0x59414c80 (0048): movs    r1, r3
+ * D/dalvikvm( 1538): 0x59414c82 (004a): str     r2, [r5, #16]
+ * D/dalvikvm( 1538): 0x59414c84 (004c): mov     r2, r11
+ * D/dalvikvm( 1538): 0x59414c86 (004e): str     r3, [r5, #20]
+ * D/dalvikvm( 1538): 0x59414c88 (0050): mov     r3, r12
+ * D/dalvikvm( 1538): 0x59414c8a (0052): str     r11, [r5, #24]
+ * D/dalvikvm( 1538): 0x59414c8e (0056): str     r12, [r5, #28]
+ * D/dalvikvm( 1538): 0x59414c92 (005a): blx     r14             <-use of LR
+ *
+ */
+static void selfVerificationBranchInsertPass(CompilationUnit *cUnit)
+{
+assert(0); /* MIPSTODO port selfVerificationBranchInsertPass() */
+    MipsLIR *thisLIR;
+    Templateopcode opcode = TEMPLATE_MEM_OP_DECODE;
+
+    for (thisLIR = (MipsLIR *) cUnit->firstLIRInsn;
+         thisLIR != (MipsLIR *) cUnit->lastLIRInsn;
+         thisLIR = NEXT_LIR(thisLIR)) {
+        if (!thisLIR->flags.isNop && thisLIR->flags.insertWrapper) {
+            /*
+             * Push r5(FP) and r14(LR) onto stack. We need to make sure that
+             * SP is 8-byte aligned, and we use r5 as a temp to restore LR
+             * for Thumb-only target since LR cannot be directly accessed in
+             * Thumb mode. Another reason to choose r5 here is it is the Dalvik
+             * frame pointer and cannot be the target of the emulated heap
+             * load.
+             */
+            if (cUnit->usesLinkRegister) {
+                genSelfVerificationPreBranch(cUnit, thisLIR);
+            }
+
+            /* Branch to mem op decode template */
+            selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx1,
+                       (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+                       (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+            selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx2,
+                       (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
+                       (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+
+            /* Restore LR */
+            if (cUnit->usesLinkRegister) {
+                genSelfVerificationPostBranch(cUnit, thisLIR);
+            }
+        }
+    }
+}
+#endif
+
+/* Generate conditional branch instructions */
+static MipsLIR *genConditionalBranchMips(CompilationUnit *cUnit,
+                                    MipsOpCode opc, int rs, int rt,
+                                    MipsLIR *target)
+{
+    MipsLIR *branch = opCompareBranch(cUnit, opc, rs, rt);
+    branch->generic.target = (LIR *) target;
+    return branch;
+}
+
+/* Generate a unconditional branch to go to the interpreter */
+static inline MipsLIR *genTrap(CompilationUnit *cUnit, int dOffset,
+                                  MipsLIR *pcrLabel)
+{
+    MipsLIR *branch = opNone(cUnit, kOpUncondBr);
+    return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+}
+
+/* Load a wide field from an object instance */
+static void genIGetWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
+{
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    RegLocation rlResult;
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    int regPtr = dvmCompilerAllocTemp(cUnit);
+
+    assert(rlDest.wide);
+
+    genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
+                 NULL);/* null object? */
+    opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+
+    HEAP_ACCESS_SHADOW(true);
+    loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
+    HEAP_ACCESS_SHADOW(false);
+
+    dvmCompilerFreeTemp(cUnit, regPtr);
+    storeValueWide(cUnit, rlDest, rlResult);
+}
+
+/* Store a wide field to an object instance */
+static void genIPutWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
+{
+    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 2);
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    int regPtr;
+    rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
+    genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
+                 NULL);/* null object? */
+    regPtr = dvmCompilerAllocTemp(cUnit);
+    opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
+
+    HEAP_ACCESS_SHADOW(true);
+    storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
+    HEAP_ACCESS_SHADOW(false);
+
+    dvmCompilerFreeTemp(cUnit, regPtr);
+}
+
+/*
+ * Load a field from an object instance
+ *
+ */
+static void genIGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
+                    int fieldOffset, bool isVolatile)
+{
+    RegLocation rlResult;
+    RegisterClass regClass = dvmCompilerRegClassBySize(size);
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, regClass, true);
+    genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
+                 NULL);/* null object? */
+
+    HEAP_ACCESS_SHADOW(true);
+    loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
+                 size, rlObj.sRegLow);
+    HEAP_ACCESS_SHADOW(false);
+    if (isVolatile) {
+	    dvmCompilerGenMemBarrier(cUnit, 0);
+    }
+
+    storeValue(cUnit, rlDest, rlResult);
+}
+
+/*
+ * Store a field to an object instance
+ *
+ */
+static void genIPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
+                    int fieldOffset, bool isObject, bool isVolatile)
+{
+    RegisterClass regClass = dvmCompilerRegClassBySize(size);
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 1);
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    rlSrc = loadValue(cUnit, rlSrc, regClass);
+    genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
+                 NULL);/* null object? */
+
+    if (isVolatile) {
+	dvmCompilerGenMemBarrier(cUnit, 0);
+    }
+    HEAP_ACCESS_SHADOW(true);
+    storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, size);
+    HEAP_ACCESS_SHADOW(false);
+    if (isVolatile) {
+        dvmCompilerGenMemBarrier(cUnit, 0);
+    }
+    if (isObject) {
+        /* NOTE: marking card based on object head */
+        markCard(cUnit, rlSrc.lowReg, rlObj.lowReg);
+    }
+}
+
+
+/*
+ * Generate array load
+ */
+static void genArrayGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
+                        RegLocation rlArray, RegLocation rlIndex,
+                        RegLocation rlDest, int scale)
+{
+    RegisterClass regClass = dvmCompilerRegClassBySize(size);
+    int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+    int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
+    RegLocation rlResult;
+    rlArray = loadValue(cUnit, rlArray, kCoreReg);
+    rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
+    int regPtr;
+
+    /* null object? */
+    MipsLIR * pcrLabel = NULL;
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+        pcrLabel = genNullCheck(cUnit, rlArray.sRegLow,
+                                rlArray.lowReg, mir->offset, NULL);
+    }
+
+    regPtr = dvmCompilerAllocTemp(cUnit);
+
+    assert(IS_SIMM16(dataOffset));
+    if (scale) {
+        opRegRegImm(cUnit, kOpLsl, regPtr, rlIndex.lowReg, scale);
+    }
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+        int regLen = dvmCompilerAllocTemp(cUnit);
+        /* Get len */
+        loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+        genBoundsCheck(cUnit, rlIndex.lowReg, regLen, mir->offset,
+                       pcrLabel);
+        dvmCompilerFreeTemp(cUnit, regLen);
+    }
+
+    if (scale) {
+        opRegReg(cUnit, kOpAdd, regPtr, rlArray.lowReg);
+    } else {
+        opRegRegReg(cUnit, kOpAdd, regPtr, rlArray.lowReg, rlIndex.lowReg);
+    }
+
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, regClass, true);
+    if ((size == kLong) || (size == kDouble)) {
+        HEAP_ACCESS_SHADOW(true);
+        loadBaseDispWide(cUnit, mir, regPtr, dataOffset, rlResult.lowReg,
+                         rlResult.highReg, INVALID_SREG);
+        HEAP_ACCESS_SHADOW(false);
+        dvmCompilerFreeTemp(cUnit, regPtr);
+        storeValueWide(cUnit, rlDest, rlResult);
+    } else {
+        HEAP_ACCESS_SHADOW(true);
+        loadBaseDisp(cUnit, mir, regPtr, dataOffset, rlResult.lowReg,
+                     size, INVALID_SREG);
+        HEAP_ACCESS_SHADOW(false);
+        dvmCompilerFreeTemp(cUnit, regPtr);
+        storeValue(cUnit, rlDest, rlResult);
+    }
+}
+
+/*
+ * Generate array store
+ *
+ */
+static void genArrayPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
+                        RegLocation rlArray, RegLocation rlIndex,
+                        RegLocation rlSrc, int scale)
+{
+    RegisterClass regClass = dvmCompilerRegClassBySize(size);
+    int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+    int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
+
+    int regPtr;
+    rlArray = loadValue(cUnit, rlArray, kCoreReg);
+    rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
+
+    if (dvmCompilerIsTemp(cUnit, rlArray.lowReg)) {
+        dvmCompilerClobber(cUnit, rlArray.lowReg);
+        regPtr = rlArray.lowReg;
+    } else {
+        regPtr = dvmCompilerAllocTemp(cUnit);
+        genRegCopy(cUnit, regPtr, rlArray.lowReg);
+    }
+
+    /* null object? */
+    MipsLIR * pcrLabel = NULL;
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+        pcrLabel = genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg,
+                                mir->offset, NULL);
+    }
+
+    assert(IS_SIMM16(dataOffset));
+    int tReg = dvmCompilerAllocTemp(cUnit);
+    if (scale) {
+        opRegRegImm(cUnit, kOpLsl, tReg, rlIndex.lowReg, scale);
+    }
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+        int regLen = dvmCompilerAllocTemp(cUnit);
+        //NOTE: max live temps(4) here.
+        /* Get len */
+        loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+        genBoundsCheck(cUnit, rlIndex.lowReg, regLen, mir->offset,
+                       pcrLabel);
+        dvmCompilerFreeTemp(cUnit, regLen);
+    }
+
+    if (scale) {
+        opRegReg(cUnit, kOpAdd, tReg, rlArray.lowReg);
+    } else {
+        opRegRegReg(cUnit, kOpAdd, tReg, rlArray.lowReg, rlIndex.lowReg);
+    }
+
+    /* at this point, tReg points to array, 2 live temps */
+    if ((size == kLong) || (size == kDouble)) {
+        rlSrc = loadValueWide(cUnit, rlSrc, regClass);
+        HEAP_ACCESS_SHADOW(true);
+        storeBaseDispWide(cUnit, tReg, dataOffset, rlSrc.lowReg, rlSrc.highReg)
+        HEAP_ACCESS_SHADOW(false);
+        dvmCompilerFreeTemp(cUnit, tReg);
+        dvmCompilerFreeTemp(cUnit, regPtr);
+    } else {
+        rlSrc = loadValue(cUnit, rlSrc, regClass);
+        HEAP_ACCESS_SHADOW(true);
+        storeBaseDisp(cUnit, tReg, dataOffset, rlSrc.lowReg, size);
+        dvmCompilerFreeTemp(cUnit, tReg);
+        HEAP_ACCESS_SHADOW(false);
+    }
+}
+
+/*
+ * Generate array object store
+ * Must use explicit register allocation here because of
+ * call-out to dvmCanPutArrayElement
+ */
+static void genArrayObjectPut(CompilationUnit *cUnit, MIR *mir,
+                              RegLocation rlArray, RegLocation rlIndex,
+                              RegLocation rlSrc, int scale)
+{
+    int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+    int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
+
+    int regLen = r_A0;
+    int regPtr = r_S0;  /* Preserved across call */
+    int regArray = r_A1;
+    int regIndex = r_S4;  /* Preserved across call */
+
+    dvmCompilerFlushAllRegs(cUnit);
+    // moved lock for r_S0 and r_S4 here from below since genBoundsCheck
+    // allocates a temporary that can result in clobbering either of them
+    dvmCompilerLockTemp(cUnit, regPtr);   // r_S0
+    dvmCompilerLockTemp(cUnit, regIndex); // r_S4
+
+    loadValueDirectFixed(cUnit, rlArray, regArray);
+    loadValueDirectFixed(cUnit, rlIndex, regIndex);
+
+    /* null object? */
+    MipsLIR * pcrLabel = NULL;
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+        pcrLabel = genNullCheck(cUnit, rlArray.sRegLow, regArray,
+                                mir->offset, NULL);
+    }
+
+    if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+        /* Get len */
+        loadWordDisp(cUnit, regArray, lenOffset, regLen);
+        /* regPtr -> array data */
+        opRegRegImm(cUnit, kOpAdd, regPtr, regArray, dataOffset);
+        genBoundsCheck(cUnit, regIndex, regLen, mir->offset,
+                       pcrLabel);
+    } else {
+        /* regPtr -> array data */
+        opRegRegImm(cUnit, kOpAdd, regPtr, regArray, dataOffset);
+    }
+
+    /* Get object to store */
+    loadValueDirectFixed(cUnit, rlSrc, r_A0);
+    LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmCanPutArrayElement);
+
+    /* Are we storing null?  If so, avoid check */
+    MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBeqz, r_A0, -1);
+
+    /* Make sure the types are compatible */
+    loadWordDisp(cUnit, regArray, offsetof(Object, clazz), r_A1);
+    loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A0);
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    dvmCompilerClobberCallRegs(cUnit);
+
+    /*
+     * Using fixed registers here, and counting on r_S0 and r_S4 being
+     * preserved across the above call.  Tell the register allocation
+     * utilities about the regs we are using directly
+     */
+    dvmCompilerLockTemp(cUnit, r_A0);
+    dvmCompilerLockTemp(cUnit, r_A1);
+
+    /* Bad? - roll back and re-execute if so */
+    genRegImmCheck(cUnit, kMipsCondEq, r_V0, 0, mir->offset, pcrLabel);
+
+    /* Resume here - must reload element & array, regPtr & index preserved */
+    loadValueDirectFixed(cUnit, rlSrc, r_A0);
+    loadValueDirectFixed(cUnit, rlArray, r_A1);
+
+    MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+    target->defMask = ENCODE_ALL;
+    branchOver->generic.target = (LIR *) target;
+
+    HEAP_ACCESS_SHADOW(true);
+    storeBaseIndexed(cUnit, regPtr, regIndex, r_A0,
+                     scale, kWord);
+    HEAP_ACCESS_SHADOW(false);
+
+    dvmCompilerFreeTemp(cUnit, regPtr);
+    dvmCompilerFreeTemp(cUnit, regIndex);
+
+    /* NOTE: marking card here based on object head */
+    markCard(cUnit, r_A0, r_A1);
+}
+
+static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir,
+                           RegLocation rlDest, RegLocation rlSrc1,
+                           RegLocation rlShift)
+{
+    /*
+     * Don't mess with the regsiters here as there is a particular calling
+     * convention to the out-of-line handler.
+     */
+    RegLocation rlResult;
+
+    loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
+    loadValueDirect(cUnit, rlShift, r_A2);
+    switch( mir->dalvikInsn.opcode) {
+        case OP_SHL_LONG:
+        case OP_SHL_LONG_2ADDR:
+            genDispatchToHandler(cUnit, TEMPLATE_SHL_LONG);
+            break;
+        case OP_SHR_LONG:
+        case OP_SHR_LONG_2ADDR:
+            genDispatchToHandler(cUnit, TEMPLATE_SHR_LONG);
+            break;
+        case OP_USHR_LONG:
+        case OP_USHR_LONG_2ADDR:
+            genDispatchToHandler(cUnit, TEMPLATE_USHR_LONG);
+            break;
+        default:
+            return true;
+    }
+    rlResult = dvmCompilerGetReturnWide(cUnit);
+    storeValueWide(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
+                           RegLocation rlDest, RegLocation rlSrc1,
+                           RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    OpKind firstOp = kOpBkpt;
+    OpKind secondOp = kOpBkpt;
+    bool callOut = false;
+    void *callTgt;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_NOT_LONG:
+            rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
+            opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
+            storeValueWide(cUnit, rlDest, rlResult);
+            return false;
+            break;
+        case OP_ADD_LONG:
+        case OP_ADD_LONG_2ADDR:
+            firstOp = kOpAdd;
+            secondOp = kOpAdc;
+            break;
+        case OP_SUB_LONG:
+        case OP_SUB_LONG_2ADDR:
+            firstOp = kOpSub;
+            secondOp = kOpSbc;
+            break;
+        case OP_MUL_LONG:
+        case OP_MUL_LONG_2ADDR:
+            genMulLong(cUnit, rlDest, rlSrc1, rlSrc2);
+            return false;
+        case OP_DIV_LONG:
+        case OP_DIV_LONG_2ADDR:
+            callOut = true;
+            callTgt = (void*)__divdi3;
+            break;
+        case OP_REM_LONG:
+        case OP_REM_LONG_2ADDR:
+            callOut = true;
+            callTgt = (void*)__moddi3;
+            break;
+        case OP_AND_LONG_2ADDR:
+        case OP_AND_LONG:
+            firstOp = kOpAnd;
+            secondOp = kOpAnd;
+            break;
+        case OP_OR_LONG:
+        case OP_OR_LONG_2ADDR:
+            firstOp = kOpOr;
+            secondOp = kOpOr;
+            break;
+        case OP_XOR_LONG:
+        case OP_XOR_LONG_2ADDR:
+            firstOp = kOpXor;
+            secondOp = kOpXor;
+            break;
+        case OP_NEG_LONG: {
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            newLIR3(cUnit, kMipsSubu, rlResult.lowReg, r_ZERO, rlSrc2.lowReg);
+            newLIR3(cUnit, kMipsSubu, tReg, r_ZERO, rlSrc2.highReg);
+            newLIR3(cUnit, kMipsSltu, rlResult.highReg, r_ZERO, rlResult.lowReg);
+            newLIR3(cUnit, kMipsSubu, rlResult.highReg, tReg, rlResult.highReg);
+            dvmCompilerFreeTemp(cUnit, tReg);
+            storeValueWide(cUnit, rlDest, rlResult);
+            return false;
+            break;
+        }
+        default:
+            LOGE("Invalid long arith op");
+            dvmCompilerAbort(cUnit);
+    }
+    if (!callOut) {
+        genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
+    } else {
+        dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
+        loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
+        LOAD_FUNC_ADDR(cUnit, r_T9, (int) callTgt);
+        loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
+        opReg(cUnit, kOpBlx, r_T9);
+        newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+        dvmCompilerClobberCallRegs(cUnit);
+        rlResult = dvmCompilerGetReturnWide(cUnit);
+        storeValueWide(cUnit, rlDest, rlResult);
+#if defined(WITH_SELF_VERIFICATION)
+        cUnit->usesLinkRegister = true;
+#endif
+    }
+    return false;
+}
+
+static bool genArithOpInt(CompilationUnit *cUnit, MIR *mir,
+                          RegLocation rlDest, RegLocation rlSrc1,
+                          RegLocation rlSrc2)
+{
+    OpKind op = kOpBkpt;
+    bool checkZero = false;
+    bool unary = false;
+    RegLocation rlResult;
+    bool shiftOp = false;
+    int isDivRem = false;
+    MipsOpCode opc;
+    int divReg;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_NEG_INT:
+            op = kOpNeg;
+            unary = true;
+            break;
+        case OP_NOT_INT:
+            op = kOpMvn;
+            unary = true;
+            break;
+        case OP_ADD_INT:
+        case OP_ADD_INT_2ADDR:
+            op = kOpAdd;
+            break;
+        case OP_SUB_INT:
+        case OP_SUB_INT_2ADDR:
+            op = kOpSub;
+            break;
+        case OP_MUL_INT:
+        case OP_MUL_INT_2ADDR:
+            op = kOpMul;
+            break;
+        case OP_DIV_INT:
+        case OP_DIV_INT_2ADDR:
+            isDivRem = true;
+            checkZero = true;
+            opc = kMipsMflo;
+            divReg = r_LO;
+            break;
+        case OP_REM_INT:
+        case OP_REM_INT_2ADDR:
+            isDivRem = true;
+            checkZero = true;
+            opc = kMipsMfhi;
+            divReg = r_HI;
+            break;
+        case OP_AND_INT:
+        case OP_AND_INT_2ADDR:
+            op = kOpAnd;
+            break;
+        case OP_OR_INT:
+        case OP_OR_INT_2ADDR:
+            op = kOpOr;
+            break;
+        case OP_XOR_INT:
+        case OP_XOR_INT_2ADDR:
+            op = kOpXor;
+            break;
+        case OP_SHL_INT:
+        case OP_SHL_INT_2ADDR:
+            shiftOp = true;
+            op = kOpLsl;
+            break;
+        case OP_SHR_INT:
+        case OP_SHR_INT_2ADDR:
+            shiftOp = true;
+            op = kOpAsr;
+            break;
+        case OP_USHR_INT:
+        case OP_USHR_INT_2ADDR:
+            shiftOp = true;
+            op = kOpLsr;
+            break;
+        default:
+            LOGE("Invalid word arith op: %#x(%d)",
+                 mir->dalvikInsn.opcode, mir->dalvikInsn.opcode);
+            dvmCompilerAbort(cUnit);
+    }
+
+    rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+    if (unary) {
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+        opRegReg(cUnit, op, rlResult.lowReg,
+                 rlSrc1.lowReg);
+    } else if (isDivRem) {
+        rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+        if (checkZero) {
+            genNullCheck(cUnit, rlSrc2.sRegLow, rlSrc2.lowReg, mir->offset, NULL);
+        }
+        newLIR4(cUnit, kMipsDiv, r_HI, r_LO, rlSrc1.lowReg, rlSrc2.lowReg);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+        newLIR2(cUnit, opc, rlResult.lowReg, divReg);
+    } else {
+        rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+        if (shiftOp) {
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegRegReg(cUnit, op, rlResult.lowReg,
+                        rlSrc1.lowReg, tReg);
+            dvmCompilerFreeTemp(cUnit, tReg);
+        } else {
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegRegReg(cUnit, op, rlResult.lowReg,
+                        rlSrc1.lowReg, rlSrc2.lowReg);
+        }
+    }
+    storeValue(cUnit, rlDest, rlResult);
+
+    return false;
+}
+
+static bool genArithOp(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+    RegLocation rlDest;
+    RegLocation rlSrc1;
+    RegLocation rlSrc2;
+    /* Deduce sizes of operands */
+    if (mir->ssaRep->numUses == 2) {
+        rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
+        rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
+    } else if (mir->ssaRep->numUses == 3) {
+        rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+        rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 2);
+    } else {
+        rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+        rlSrc2 = dvmCompilerGetSrcWide(cUnit, mir, 2, 3);
+        assert(mir->ssaRep->numUses == 4);
+    }
+    if (mir->ssaRep->numDefs == 1) {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    } else {
+        assert(mir->ssaRep->numDefs == 2);
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    }
+
+    if ((opcode >= OP_ADD_LONG_2ADDR) && (opcode <= OP_XOR_LONG_2ADDR)) {
+        return genArithOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_LONG) && (opcode <= OP_XOR_LONG)) {
+        return genArithOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_SHL_LONG_2ADDR) && (opcode <= OP_USHR_LONG_2ADDR)) {
+        return genShiftOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_SHL_LONG) && (opcode <= OP_USHR_LONG)) {
+        return genShiftOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_INT_2ADDR) && (opcode <= OP_USHR_INT_2ADDR)) {
+        return genArithOpInt(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_INT) && (opcode <= OP_USHR_INT)) {
+        return genArithOpInt(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_FLOAT_2ADDR) && (opcode <= OP_REM_FLOAT_2ADDR)) {
+        return genArithOpFloat(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_FLOAT) && (opcode <= OP_REM_FLOAT)) {
+        return genArithOpFloat(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_DOUBLE_2ADDR) && (opcode <= OP_REM_DOUBLE_2ADDR)) {
+        return genArithOpDouble(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    if ((opcode >= OP_ADD_DOUBLE) && (opcode <= OP_REM_DOUBLE)) {
+        return genArithOpDouble(cUnit,mir, rlDest, rlSrc1, rlSrc2);
+    }
+    return true;
+}
+
+/* Generate unconditional branch instructions */
+static MipsLIR *genUnconditionalBranch(CompilationUnit *cUnit, MipsLIR *target)
+{
+    MipsLIR *branch = opNone(cUnit, kOpUncondBr);
+    branch->generic.target = (LIR *) target;
+    return branch;
+}
+
+/* Perform the actual operation for OP_RETURN_* */
+void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
+{
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                         TEMPLATE_RETURN_PROF : TEMPLATE_RETURN);
+#if defined(WITH_JIT_TUNING)
+    gDvmJit.returnOp++;
+#endif
+    int dPC = (int) (cUnit->method->insns + mir->offset);
+    /* Insert branch, but defer setting of target */
+    MipsLIR *branch = genUnconditionalBranch(cUnit, NULL);
+    /* Set up the place holder to reconstruct this Dalvik PC */
+    MipsLIR *pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
+    pcrLabel->operands[0] = dPC;
+    pcrLabel->operands[1] = mir->offset;
+    /* Insert the place holder to the growable list */
+    dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
+    /* Branch to the PC reconstruction code */
+    branch->generic.target = (LIR *) pcrLabel;
+}
+
+static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
+                                  DecodedInstruction *dInsn,
+                                  MipsLIR **pcrLabel)
+{
+    unsigned int i;
+    unsigned int regMask = 0;
+    RegLocation rlArg;
+    int numDone = 0;
+
+    /*
+     * Load arguments to r_A0..r_T0.  Note that these registers may contain
+     * live values, so we clobber them immediately after loading to prevent
+     * them from being used as sources for subsequent loads.
+     */
+    dvmCompilerLockAllTemps(cUnit);
+    for (i = 0; i < dInsn->vA; i++) {
+        regMask |= 1 << i;
+        rlArg = dvmCompilerGetSrc(cUnit, mir, numDone++);
+        loadValueDirectFixed(cUnit, rlArg, i+r_A0); /* r_A0 thru r_T0 */
+    }
+    if (regMask) {
+        /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
+        opRegRegImm(cUnit, kOpSub, r_S4, rFP,
+                    sizeof(StackSaveArea) + (dInsn->vA << 2));
+        /* generate null check */
+        if (pcrLabel) {
+            *pcrLabel = genNullCheck(cUnit, dvmCompilerSSASrc(mir, 0), r_A0,
+                                     mir->offset, NULL);
+        }
+        storeMultiple(cUnit, r_S4, regMask);
+    }
+}
+
+static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
+                                DecodedInstruction *dInsn,
+                                MipsLIR **pcrLabel)
+{
+    int srcOffset = dInsn->vC << 2;
+    int numArgs = dInsn->vA;
+    int regMask;
+
+    /*
+     * Note: here, all promoted registers will have been flushed
+     * back to the Dalvik base locations, so register usage restrictins
+     * are lifted.  All parms loaded from original Dalvik register
+     * region - even though some might conceivably have valid copies
+     * cached in a preserved register.
+     */
+    dvmCompilerLockAllTemps(cUnit);
+
+    /*
+     * r4PC     : &rFP[vC]
+     * r_S4: &newFP[0]
+     */
+    opRegRegImm(cUnit, kOpAdd, r4PC, rFP, srcOffset);
+    /* load [r_A0 up to r_A3)] */
+    regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1;
+    /*
+     * Protect the loadMultiple instruction from being reordered with other
+     * Dalvik stack accesses.
+     *
+     * This code is also shared by the invoke jumbo instructions, and this
+     * does not need to be done if the invoke jumbo has no arguments.
+     */
+    if (numArgs != 0) loadMultiple(cUnit, r4PC, regMask);
+
+    opRegRegImm(cUnit, kOpSub, r_S4, rFP,
+                sizeof(StackSaveArea) + (numArgs << 2));
+    /* generate null check */
+    if (pcrLabel) {
+        *pcrLabel = genNullCheck(cUnit, dvmCompilerSSASrc(mir, 0), r_A0,
+                                 mir->offset, NULL);
+    }
+
+    /*
+     * Handle remaining 4n arguments:
+     * store previously loaded 4 values and load the next 4 values
+     */
+    if (numArgs >= 8) {
+        MipsLIR *loopLabel = NULL;
+        /*
+         * r_A0 contains "this" and it will be used later, so push it to the stack
+         * first. Pushing r_S1 (rFP) is just for stack alignment purposes.
+         */
+
+        newLIR2(cUnit, kMipsMove, r_T0, r_A0);
+        newLIR2(cUnit, kMipsMove, r_T1, r_S1);
+
+        /* No need to generate the loop structure if numArgs <= 11 */
+        if (numArgs > 11) {
+            loadConstant(cUnit, rFP, ((numArgs - 4) >> 2) << 2);
+            loopLabel = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            loopLabel->defMask = ENCODE_ALL;
+        }
+        storeMultiple(cUnit, r_S4, regMask);
+        /*
+         * Protect the loadMultiple instruction from being reordered with other
+         * Dalvik stack accesses.
+         */
+        loadMultiple(cUnit, r4PC, regMask);
+        /* No need to generate the loop structure if numArgs <= 11 */
+        if (numArgs > 11) {
+            opRegImm(cUnit, kOpSub, rFP, 4);
+            genConditionalBranchMips(cUnit, kMipsBne, rFP, r_ZERO, loopLabel);
+        }
+    }
+
+    /* Save the last batch of loaded values */
+    if (numArgs != 0) storeMultiple(cUnit, r_S4, regMask);
+
+    /* Generate the loop epilogue - don't use r_A0 */
+    if ((numArgs > 4) && (numArgs % 4)) {
+        regMask = ((1 << (numArgs & 0x3)) - 1) << 1;
+        /*
+         * Protect the loadMultiple instruction from being reordered with other
+         * Dalvik stack accesses.
+         */
+        loadMultiple(cUnit, r4PC, regMask);
+    }
+    if (numArgs >= 8) {
+        newLIR2(cUnit, kMipsMove, r_A0, r_T0);
+        newLIR2(cUnit, kMipsMove, r_S1, r_T1);
+    }
+
+    /* Save the modulo 4 arguments */
+    if ((numArgs > 4) && (numArgs % 4)) {
+        storeMultiple(cUnit, r_S4, regMask);
+    }
+}
+
+/*
+ * Generate code to setup the call stack then jump to the chaining cell if it
+ * is not a native method.
+ */
+static void genInvokeSingletonCommon(CompilationUnit *cUnit, MIR *mir,
+                                     BasicBlock *bb, MipsLIR *labelList,
+                                     MipsLIR *pcrLabel,
+                                     const Method *calleeMethod)
+{
+    /*
+     * Note: all Dalvik register state should be flushed to
+     * memory by the point, so register usage restrictions no
+     * longer apply.  All temp & preserved registers may be used.
+     */
+    dvmCompilerLockAllTemps(cUnit);
+    MipsLIR *retChainingCell = &labelList[bb->fallThrough->id];
+
+    /* r_A1 = &retChainingCell */
+    dvmCompilerLockTemp(cUnit, r_A1);
+    MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+    addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+
+    /* r4PC = dalvikCallsite */
+    loadConstant(cUnit, r4PC,
+                 (int) (cUnit->method->insns + mir->offset));
+    /*
+     * r_A0 = calleeMethod (loaded upon calling genInvokeSingletonCommon)
+     * r_A1 = &ChainingCell
+     * r4PC = callsiteDPC
+     */
+    if (dvmIsNativeMethod(calleeMethod)) {
+        genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+            TEMPLATE_INVOKE_METHOD_NATIVE_PROF :
+            TEMPLATE_INVOKE_METHOD_NATIVE);
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.invokeNative++;
+#endif
+    } else {
+        genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+            TEMPLATE_INVOKE_METHOD_CHAIN_PROF :
+            TEMPLATE_INVOKE_METHOD_CHAIN);
+#if defined(WITH_JIT_TUNING)
+        gDvmJit.invokeMonomorphic++;
+#endif
+        /* Branch to the chaining cell */
+        genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+    }
+    /* Handle exceptions using the interpreter */
+    genTrap(cUnit, mir->offset, pcrLabel);
+}
+
+/*
+ * Generate code to check the validity of a predicted chain and take actions
+ * based on the result.
+ *
+ * 0x2f1304c4 :  lui      s0,0x2d22(11554)            # s0 <- dalvikPC
+ * 0x2f1304c8 :  ori      s0,s0,0x2d22848c(757236876)
+ * 0x2f1304cc :  lahi/lui a1,0x2f13(12051)            # a1 <- &retChainingCell
+ * 0x2f1304d0 :  lalo/ori a1,a1,0x2f13055c(789775708)
+ * 0x2f1304d4 :  lahi/lui a2,0x2f13(12051)            # a2 <- &predictedChainingCell
+ * 0x2f1304d8 :  lalo/ori a2,a2,0x2f13056c(789775724)
+ * 0x2f1304dc :  jal      0x2f12d1ec(789762540)       # call TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
+ * 0x2f1304e0 :  nop
+ * 0x2f1304e4 :  b        0x2f13056c (L0x11ec10)      # off to the predicted chain
+ * 0x2f1304e8 :  nop
+ * 0x2f1304ec :  b        0x2f13054c (L0x11fc80)      # punt to the interpreter
+ * 0x2f1304f0 :  lui      a0,0x2d22(11554)
+ * 0x2f1304f4 :  lw       a0,156(s4)                  # a0 <- this->class->vtable[methodIdx]
+ * 0x2f1304f8 :  bgtz     a1,0x2f13051c (L0x11fa40)   # if >0 don't rechain
+ * 0x2f1304fc :  nop
+ * 0x2f130500 :  lui      t9,0x2aba(10938)
+ * 0x2f130504 :  ori      t9,t9,0x2abae3f8(716891128)
+ * 0x2f130508 :  move     a1,s2
+ * 0x2f13050c :  jalr     ra,t9                       # call dvmJitToPatchPredictedChain
+ * 0x2f130510 :  nop
+ * 0x2f130514 :  lw       gp,84(sp)
+ * 0x2f130518 :  move     a0,v0
+ * 0x2f13051c :  lahi/lui a1,0x2f13(12051)            # a1 <- &retChainingCell
+ * 0x2f130520 :  lalo/ori a1,a1,0x2f13055c(789775708)
+ * 0x2f130524 :  jal      0x2f12d0c4(789762244)       # call TEMPLATE_INVOKE_METHOD_NO_OPT
+ * 0x2f130528 :  nop
+ */
+static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
+                                   int methodIndex,
+                                   MipsLIR *retChainingCell,
+                                   MipsLIR *predChainingCell,
+                                   MipsLIR *pcrLabel)
+{
+    /*
+     * Note: all Dalvik register state should be flushed to
+     * memory by the point, so register usage restrictions no
+     * longer apply.  Lock temps to prevent them from being
+     * allocated by utility routines.
+     */
+    dvmCompilerLockAllTemps(cUnit);
+
+    /*
+     * For verbose printing, store the method pointer in operands[1] first as
+     * operands[0] will be clobbered in dvmCompilerMIR2LIR.
+     */
+    predChainingCell->operands[1] = (int) mir->meta.callsiteInfo->method;
+
+    /* "this" is already left in r_A0 by genProcessArgs* */
+
+    /* r4PC = dalvikCallsite */
+    loadConstant(cUnit, r4PC,
+                 (int) (cUnit->method->insns + mir->offset));
+
+    /* r_A1 = &retChainingCell */
+    MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+    addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+
+    /* r_A2 = &predictedChainingCell */
+    MipsLIR *predictedChainingCell = newLIR2(cUnit, kMipsLahi, r_A2, 0);
+    predictedChainingCell->generic.target = (LIR *) predChainingCell;
+    predictedChainingCell = newLIR3(cUnit, kMipsLalo, r_A2, r_A2, 0);
+    predictedChainingCell->generic.target = (LIR *) predChainingCell;
+
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+        TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+        TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+
+    /* return through ra - jump to the chaining cell */
+    genUnconditionalBranch(cUnit, predChainingCell);
+
+    /*
+     * null-check on "this" may have been eliminated, but we still need a PC-
+     * reconstruction label for stack overflow bailout.
+     */
+    if (pcrLabel == NULL) {
+        int dPC = (int) (cUnit->method->insns + mir->offset);
+        pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+        pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
+        pcrLabel->operands[0] = dPC;
+        pcrLabel->operands[1] = mir->offset;
+        /* Insert the place holder to the growable list */
+        dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                              (intptr_t) pcrLabel);
+    }
+
+    /* return through ra+8 - punt to the interpreter */
+    genUnconditionalBranch(cUnit, pcrLabel);
+
+    /*
+     * return through ra+16 - fully resolve the callee method.
+     * r_A1 <- count
+     * r_A2 <- &predictedChainCell
+     * r_A3 <- this->class
+     * r4 <- dPC
+     * r_S4 <- this->class->vtable
+     */
+
+    /* r_A0 <- calleeMethod */
+    loadWordDisp(cUnit, r_S4, methodIndex * 4, r_A0);
+
+    /* Check if rechain limit is reached */
+    MipsLIR *bypassRechaining = opCompareBranch(cUnit, kMipsBgtz, r_A1, -1);
+
+    LOAD_FUNC_ADDR(cUnit, r_T9, (int) dvmJitToPatchPredictedChain);
+
+    genRegCopy(cUnit, r_A1, rSELF);
+
+    /*
+     * r_A0 = calleeMethod
+     * r_A2 = &predictedChainingCell
+     * r_A3 = class
+     *
+     * &returnChainingCell has been loaded into r_A1 but is not needed
+     * when patching the chaining cell and will be clobbered upon
+     * returning so it will be reconstructed again.
+     */
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    newLIR2(cUnit, kMipsMove, r_A0, r_V0);
+
+    /* r_A1 = &retChainingCell */
+    addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+    bypassRechaining->generic.target = (LIR *) addrRetChain;
+    addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
+    addrRetChain->generic.target = (LIR *) retChainingCell;
+
+    /*
+     * r_A0 = calleeMethod,
+     * r_A1 = &ChainingCell,
+     * r4PC = callsiteDPC,
+     */
+    genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+        TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+        TEMPLATE_INVOKE_METHOD_NO_OPT);
+#if defined(WITH_JIT_TUNING)
+    gDvmJit.invokePolymorphic++;
+#endif
+    /* Handle exceptions using the interpreter */
+    genTrap(cUnit, mir->offset, pcrLabel);
+}
+
+/* "this" pointer is already in r0 */
+static void genInvokeVirtualWholeMethod(CompilationUnit *cUnit,
+                                        MIR *mir,
+                                        void *calleeAddr,
+                                        MipsLIR *retChainingCell)
+{
+    CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
+    dvmCompilerLockAllTemps(cUnit);
+
+    loadClassPointer(cUnit, r_A1, (int) callsiteInfo);
+
+    loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A2);
+    /*
+     * Set the misPredBranchOver target so that it will be generated when the
+     * code for the non-optimized invoke is generated.
+     */
+    /* Branch to the slow path if classes are not equal */
+    MipsLIR *classCheck = opCompareBranch(cUnit, kMipsBne, r_A1, r_A2);
+
+    /* a0 = the Dalvik PC of the callsite */
+    loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
+
+    newLIR1(cUnit, kMipsJal, (int) calleeAddr);
+    genUnconditionalBranch(cUnit, retChainingCell);
+
+    /* Target of slow path */
+    MipsLIR *slowPathLabel = newLIR0(cUnit, kMipsPseudoTargetLabel);
+
+    slowPathLabel->defMask = ENCODE_ALL;
+    classCheck->generic.target = (LIR *) slowPathLabel;
+
+    // FIXME
+    cUnit->printMe = true;
+}
+
+static void genInvokeSingletonWholeMethod(CompilationUnit *cUnit,
+                                          MIR *mir,
+                                          void *calleeAddr,
+                                          MipsLIR *retChainingCell)
+{
+    /* a0 = the Dalvik PC of the callsite */
+    loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
+
+    newLIR1(cUnit, kMipsJal, (int) calleeAddr);
+    genUnconditionalBranch(cUnit, retChainingCell);
+
+    // FIXME
+    cUnit->printMe = true;
+}
+
+/* Geneate a branch to go back to the interpreter */
+static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
+{
+    /* a0 = dalvik pc */
+    dvmCompilerFlushAllRegs(cUnit);
+    loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + offset));
+#if 0 /* MIPSTODO tempoary workaround unaligned access on sigma hardware
+             this can removed when we're not punting to genInterpSingleStep
+             for opcodes that haven't been activated yet */
+    loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A3);
+#endif
+    loadWordDisp(cUnit, rSELF, offsetof(Thread,
+                 jitToInterpEntries.dvmJitToInterpPunt), r_A1);
+
+    opReg(cUnit, kOpBlx, r_A1);
+}
+
+/*
+ * Attempt to single step one instruction using the interpreter and return
+ * to the compiled code for the next Dalvik instruction
+ */
+static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
+{
+    int flags = dexGetFlagsFromOpcode(mir->dalvikInsn.opcode);
+    int flagsToCheck = kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn;
+
+    // Single stepping is considered loop mode breaker
+    if (cUnit->jitMode == kJitLoop) {
+        cUnit->quitLoopMode = true;
+        return;
+    }
+
+    //If already optimized out, just ignore
+    if (mir->dalvikInsn.opcode == OP_NOP)
+        return;
+
+    //Ugly, but necessary.  Flush all Dalvik regs so Interp can find them
+    dvmCompilerFlushAllRegs(cUnit);
+
+    if ((mir->next == NULL) || (flags & flagsToCheck)) {
+       genPuntToInterp(cUnit, mir->offset);
+       return;
+    }
+    int entryAddr = offsetof(Thread,
+                             jitToInterpEntries.dvmJitToInterpSingleStep);
+    loadWordDisp(cUnit, rSELF, entryAddr, r_A2);
+    /* a0 = dalvik pc */
+    loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
+    /* a1 = dalvik pc of following instruction */
+    loadConstant(cUnit, r_A1, (int) (cUnit->method->insns + mir->next->offset));
+    opReg(cUnit, kOpBlx, r_A2);
+}
+
+/*
+ * To prevent a thread in a monitor wait from blocking the Jit from
+ * resetting the code cache, heavyweight monitor lock will not
+ * be allowed to return to an existing translation.  Instead, we will
+ * handle them by branching to a handler, which will in turn call the
+ * runtime lock routine and then branch directly back to the
+ * interpreter main loop.  Given the high cost of the heavyweight
+ * lock operation, this additional cost should be slight (especially when
+ * considering that we expect the vast majority of lock operations to
+ * use the fast-path thin lock bypass).
+ */
+static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir)
+{
+    bool isEnter = (mir->dalvikInsn.opcode == OP_MONITOR_ENTER);
+    genExportPC(cUnit, mir);
+    dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    loadValueDirectFixed(cUnit, rlSrc, r_A1);
+    genRegCopy(cUnit, r_A0, rSELF);
+    genNullCheck(cUnit, rlSrc.sRegLow, r_A1, mir->offset, NULL);
+    if (isEnter) {
+        /* Get dPC of next insn */
+        loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
+                 dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
+        genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
+    } else {
+        LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmUnlockObject);
+        /* Do the call */
+        opReg(cUnit, kOpBlx, r_T9);
+        newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+        /* Did we throw? */
+        MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+        loadConstant(cUnit, r_A0,
+                     (int) (cUnit->method->insns + mir->offset +
+                     dexGetWidthFromOpcode(OP_MONITOR_EXIT)));
+        genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+        MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+        target->defMask = ENCODE_ALL;
+        branchOver->generic.target = (LIR *) target;
+        dvmCompilerClobberCallRegs(cUnit);
+    }
+}
+/*#endif*/
+
+/*
+ * Fetch *self->info.breakFlags. If the breakFlags are non-zero,
+ * punt to the interpreter.
+ */
+static void genSuspendPoll(CompilationUnit *cUnit, MIR *mir)
+{
+    int rTemp = dvmCompilerAllocTemp(cUnit);
+    MipsLIR *ld;
+    ld = loadBaseDisp(cUnit, NULL, rSELF,
+                      offsetof(Thread, interpBreak.ctl.breakFlags),
+                      rTemp, kUnsignedByte, INVALID_SREG);
+    setMemRefType(ld, true /* isLoad */, kMustNotAlias);
+    genRegImmCheck(cUnit, kMipsCondNe, rTemp, 0, mir->offset, NULL);
+}
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ */
+
+static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
+                                       BasicBlock *bb, MipsLIR *labelList)
+{
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch &&
+        (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
+        genSuspendPoll(cUnit, mir);
+    }
+
+    int numPredecessors = dvmCountSetBits(bb->taken->predecessors);
+    /*
+     * Things could be hoisted out of the taken block into the predecessor, so
+     * make sure it is dominated by the predecessor.
+     */
+    if (numPredecessors == 1 && bb->taken->visited == false &&
+        bb->taken->blockType == kDalvikByteCode) {
+        cUnit->nextCodegenBlock = bb->taken;
+    } else {
+        /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
+        genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+    }
+    return false;
+}
+
+static bool handleFmt10x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    if ((dalvikOpcode >= OP_UNUSED_3E) && (dalvikOpcode <= OP_UNUSED_43)) {
+        LOGE("Codegen: got unused opcode %#x",dalvikOpcode);
+        return true;
+    }
+    switch (dalvikOpcode) {
+        case OP_RETURN_VOID_BARRIER:
+	    dvmCompilerGenMemBarrier(cUnit, 0);
+            // Intentional fallthrough
+        case OP_RETURN_VOID:
+            genReturnCommon(cUnit,mir);
+            break;
+        case OP_UNUSED_73:
+        case OP_UNUSED_79:
+        case OP_UNUSED_7A:
+        case OP_DISPATCH_FF:
+            LOGE("Codegen: got unused opcode %#x",dalvikOpcode);
+            return true;
+        case OP_NOP:
+            break;
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlDest;
+    RegLocation rlResult;
+    if (mir->ssaRep->numDefs == 2) {
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    } else {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    }
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_CONST:
+        case OP_CONST_4: {
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+            loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_CONST_WIDE_32: {
+            //TUNING: single routine to load constant pair for support doubles
+            //TUNING: load 0/-1 separately to avoid load dependency
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
+            opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
+                        rlResult.lowReg, 31);
+            storeValueWide(cUnit, rlDest, rlResult);
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlDest;
+    RegLocation rlResult;
+    if (mir->ssaRep->numDefs == 2) {
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    } else {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    }
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_CONST_HIGH16: {
+            loadConstantNoClobber(cUnit, rlResult.lowReg,
+                                  mir->dalvikInsn.vB << 16);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_CONST_WIDE_HIGH16: {
+            loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+                                  0, mir->dalvikInsn.vB << 16);
+            storeValueWide(cUnit, rlDest, rlResult);
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt20bc_Fmt40sc(CompilationUnit *cUnit, MIR *mir)
+{
+    /* For OP_THROW_VERIFICATION_ERROR & OP_THROW_VERIFICATION_ERROR_JUMBO */
+    genInterpSingleStep(cUnit, mir);
+    return false;
+}
+
+static bool handleFmt21c_Fmt31c_Fmt41c(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlResult;
+    RegLocation rlDest;
+    RegLocation rlSrc;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_CONST_STRING_JUMBO:
+        case OP_CONST_STRING: {
+            void *strPtr = (void*)
+              (cUnit->method->clazz->pDvmDex->pResStrings[mir->dalvikInsn.vB]);
+
+            if (strPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null string");
+                dvmAbort();
+            }
+
+            rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            loadConstantNoClobber(cUnit, rlResult.lowReg, (int) strPtr );
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_CONST_CLASS:
+        case OP_CONST_CLASS_JUMBO: {
+            void *classPtr = (void*)
+              (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
+
+            if (classPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null class");
+                dvmAbort();
+            }
+
+            rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            loadConstantNoClobber(cUnit, rlResult.lowReg, (int) classPtr );
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_SGET:
+        case OP_SGET_VOLATILE:
+        case OP_SGET_VOLATILE_JUMBO:
+        case OP_SGET_JUMBO:
+        case OP_SGET_OBJECT:
+        case OP_SGET_OBJECT_VOLATILE:
+        case OP_SGET_OBJECT_VOLATILE_JUMBO:
+        case OP_SGET_OBJECT_JUMBO:
+        case OP_SGET_BOOLEAN:
+        case OP_SGET_BOOLEAN_JUMBO:
+        case OP_SGET_CHAR:
+        case OP_SGET_CHAR_JUMBO:
+        case OP_SGET_BYTE:
+        case OP_SGET_BYTE_JUMBO:
+        case OP_SGET_SHORT:
+        case OP_SGET_SHORT_JUMBO: {
+            int valOffset = OFFSETOF_MEMBER(StaticField, value);
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            bool isVolatile;
+            const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
+                mir->meta.calleeMethod : cUnit->method;
+            void *fieldPtr = (void*)
+              (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
+
+            if (fieldPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null static field");
+                dvmAbort();
+            }
+
+            /*
+             * On SMP systems, Dalvik opcodes found to be referencing
+             * volatile fields are rewritten to their _VOLATILE variant.
+             * However, this does not happen on non-SMP systems. The JIT
+             * still needs to know about volatility to avoid unsafe
+             * optimizations so we determine volatility based on either
+             * the opcode or the field access flags.
+             */
+#if ANDROID_SMP != 0
+            Opcode opcode = mir->dalvikInsn.opcode;
+            isVolatile = (opcode == OP_SGET_VOLATILE) ||
+                         (opcode == OP_SGET_VOLATILE_JUMBO) ||
+                         (opcode == OP_SGET_OBJECT_VOLATILE) ||
+                         (opcode == OP_SGET_OBJECT_VOLATILE_JUMBO);
+            assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
+#else
+            isVolatile = dvmIsVolatileField((Field *) fieldPtr);
+#endif
+
+            rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+            loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
+
+            if (isVolatile) {
+                dvmCompilerGenMemBarrier(cUnit, 0);
+            }
+            HEAP_ACCESS_SHADOW(true);
+            loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
+            HEAP_ACCESS_SHADOW(false);
+
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_SGET_WIDE:
+        case OP_SGET_WIDE_JUMBO: {
+            int valOffset = OFFSETOF_MEMBER(StaticField, value);
+            const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
+                mir->meta.calleeMethod : cUnit->method;
+            void *fieldPtr = (void*)
+              (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
+
+            if (fieldPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null static field");
+                dvmAbort();
+            }
+
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+            loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
+
+            HEAP_ACCESS_SHADOW(true);
+            loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
+            HEAP_ACCESS_SHADOW(false);
+
+            storeValueWide(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_SPUT:
+        case OP_SPUT_VOLATILE:
+        case OP_SPUT_VOLATILE_JUMBO:
+        case OP_SPUT_JUMBO:
+        case OP_SPUT_OBJECT:
+        case OP_SPUT_OBJECT_VOLATILE:
+        case OP_SPUT_OBJECT_VOLATILE_JUMBO:
+        case OP_SPUT_OBJECT_JUMBO:
+        case OP_SPUT_BOOLEAN:
+        case OP_SPUT_BOOLEAN_JUMBO:
+        case OP_SPUT_CHAR:
+        case OP_SPUT_CHAR_JUMBO:
+        case OP_SPUT_BYTE:
+        case OP_SPUT_BYTE_JUMBO:
+        case OP_SPUT_SHORT:
+        case OP_SPUT_SHORT_JUMBO: {
+            int valOffset = OFFSETOF_MEMBER(StaticField, value);
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            int objHead = 0;
+            bool isVolatile;
+            bool isSputObject;
+            const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
+                mir->meta.calleeMethod : cUnit->method;
+            void *fieldPtr = (void*)
+              (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
+            Opcode opcode = mir->dalvikInsn.opcode;
+
+            if (fieldPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null static field");
+                dvmAbort();
+            }
+
+#if ANDROID_SMP != 0
+            isVolatile = (opcode == OP_SPUT_VOLATILE) ||
+                         (opcode == OP_SPUT_VOLATILE_JUMBO) ||
+                         (opcode == OP_SPUT_OBJECT_VOLATILE) ||
+                         (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
+            assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
+#else
+            isVolatile = dvmIsVolatileField((Field *) fieldPtr);
+#endif
+
+            isSputObject = (opcode == OP_SPUT_OBJECT) ||
+                           (opcode == OP_SPUT_OBJECT_JUMBO) ||
+                           (opcode == OP_SPUT_OBJECT_VOLATILE) ||
+                           (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
+
+            rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
+            loadConstant(cUnit, tReg,  (int) fieldPtr);
+            if (isSputObject) {
+                objHead = dvmCompilerAllocTemp(cUnit);
+                loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
+            }
+            if (isVolatile) {
+                dvmCompilerGenMemBarrier(cUnit, 0);
+            }
+            HEAP_ACCESS_SHADOW(true);
+            storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
+            dvmCompilerFreeTemp(cUnit, tReg);
+            HEAP_ACCESS_SHADOW(false);
+            if (isVolatile) {
+                dvmCompilerGenMemBarrier(cUnit, 0);
+            }
+            if (isSputObject) {
+                /* NOTE: marking card based sfield->clazz */
+                markCard(cUnit, rlSrc.lowReg, objHead);
+                dvmCompilerFreeTemp(cUnit, objHead);
+            }
+
+            break;
+        }
+        case OP_SPUT_WIDE:
+        case OP_SPUT_WIDE_JUMBO: {
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            int valOffset = OFFSETOF_MEMBER(StaticField, value);
+            const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
+                mir->meta.calleeMethod : cUnit->method;
+            void *fieldPtr = (void*)
+              (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
+
+            if (fieldPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null static field");
+                dvmAbort();
+            }
+
+            rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+            rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
+            loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
+
+            HEAP_ACCESS_SHADOW(true);
+            storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
+            HEAP_ACCESS_SHADOW(false);
+            break;
+        }
+        case OP_NEW_INSTANCE:
+        case OP_NEW_INSTANCE_JUMBO: {
+            /*
+             * Obey the calling convention and don't mess with the register
+             * usage.
+             */
+            ClassObject *classPtr = (ClassObject *)
+              (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
+
+            if (classPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null class");
+                dvmAbort();
+            }
+
+            /*
+             * If it is going to throw, it should not make to the trace to begin
+             * with.  However, Alloc might throw, so we need to genExportPC()
+             */
+            assert((classPtr->accessFlags & (ACC_INTERFACE|ACC_ABSTRACT)) == 0);
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            genExportPC(cUnit, mir);
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmAllocObject);
+            loadConstant(cUnit, r_A0, (int) classPtr);
+            loadConstant(cUnit, r_A1, ALLOC_DONT_TRACK);
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /* generate a branch over if allocation is successful */
+            MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+
+            /*
+             * OOM exception needs to be thrown here and cannot re-execute
+             */
+            loadConstant(cUnit, r_A0,
+                         (int) (cUnit->method->insns + mir->offset));
+            genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+            /* noreturn */
+
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            branchOver->generic.target = (LIR *) target;
+            rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            rlResult = dvmCompilerGetReturn(cUnit);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_CHECK_CAST:
+        case OP_CHECK_CAST_JUMBO: {
+            /*
+             * Obey the calling convention and don't mess with the register
+             * usage.
+             */
+            ClassObject *classPtr =
+              (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
+            /*
+             * Note: It is possible that classPtr is NULL at this point,
+             * even though this instruction has been successfully interpreted.
+             * If the previous interpretation had a null source, the
+             * interpreter would not have bothered to resolve the clazz.
+             * Bail out to the interpreter in this case, and log it
+             * so that we can tell if it happens frequently.
+             */
+            if (classPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGVV("null clazz in OP_CHECK_CAST, single-stepping");
+                genInterpSingleStep(cUnit, mir);
+                return false;
+            }
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            loadConstant(cUnit, r_A1, (int) classPtr );
+            rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            MipsLIR *branch1 = opCompareBranch(cUnit, kMipsBeqz, rlSrc.lowReg, -1);
+            /*
+             *  rlSrc.lowReg now contains object->clazz.  Note that
+             *  it could have been allocated r_A0, but we're okay so long
+             *  as we don't do anything desctructive until r_A0 is loaded
+             *  with clazz.
+             */
+            /* r_A0 now contains object->clazz */
+            loadWordDisp(cUnit, rlSrc.lowReg, offsetof(Object, clazz), r_A0);
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInstanceofNonTrivial);
+            MipsLIR *branch2 = opCompareBranch(cUnit, kMipsBeq, r_A0, r_A1);
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /*
+             * If null, check cast failed - punt to the interpreter.  Because
+             * interpreter will be the one throwing, we don't need to
+             * genExportPC() here.
+             */
+            genRegCopy(cUnit, r_A0, r_V0);
+            genZeroCheck(cUnit, r_V0, mir->offset, NULL);
+            /* check cast passed - branch target here */
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            branch1->generic.target = (LIR *)target;
+            branch2->generic.target = (LIR *)target;
+            break;
+        }
+        case OP_SGET_WIDE_VOLATILE:
+        case OP_SGET_WIDE_VOLATILE_JUMBO:
+        case OP_SPUT_WIDE_VOLATILE:
+        case OP_SPUT_WIDE_VOLATILE_JUMBO:
+            genInterpSingleStep(cUnit, mir);
+            break;
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    RegLocation rlResult;
+    switch (dalvikOpcode) {
+        case OP_MOVE_EXCEPTION: {
+            int exOffset = offsetof(Thread, exception);
+            int resetReg = dvmCompilerAllocTemp(cUnit);
+            RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
+            loadConstant(cUnit, resetReg, 0);
+            storeWordDisp(cUnit, rSELF, exOffset, resetReg);
+            storeValue(cUnit, rlDest, rlResult);
+           break;
+        }
+        case OP_MOVE_RESULT:
+        case OP_MOVE_RESULT_OBJECT: {
+            /* An inlined move result is effectively no-op */
+            if (mir->OptimizationFlags & MIR_INLINED)
+                break;
+            RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            RegLocation rlSrc = LOC_DALVIK_RETURN_VAL;
+            rlSrc.fp = rlDest.fp;
+            storeValue(cUnit, rlDest, rlSrc);
+            break;
+        }
+        case OP_MOVE_RESULT_WIDE: {
+            /* An inlined move result is effectively no-op */
+            if (mir->OptimizationFlags & MIR_INLINED)
+                break;
+            RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+            RegLocation rlSrc = LOC_DALVIK_RETURN_VAL_WIDE;
+            rlSrc.fp = rlDest.fp;
+            storeValueWide(cUnit, rlDest, rlSrc);
+            break;
+        }
+        case OP_RETURN_WIDE: {
+            RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+            RegLocation rlDest = LOC_DALVIK_RETURN_VAL_WIDE;
+            rlDest.fp = rlSrc.fp;
+            storeValueWide(cUnit, rlDest, rlSrc);
+            genReturnCommon(cUnit,mir);
+            break;
+        }
+        case OP_RETURN:
+        case OP_RETURN_OBJECT: {
+            RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            RegLocation rlDest = LOC_DALVIK_RETURN_VAL;
+            rlDest.fp = rlSrc.fp;
+            storeValue(cUnit, rlDest, rlSrc);
+            genReturnCommon(cUnit, mir);
+            break;
+        }
+        case OP_MONITOR_EXIT:
+        case OP_MONITOR_ENTER:
+            genMonitor(cUnit, mir);
+            break;
+        case OP_THROW:
+            genInterpSingleStep(cUnit, mir);
+            break;
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt12x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+    RegLocation rlDest;
+    RegLocation rlSrc;
+    RegLocation rlResult;
+
+    if ( (opcode >= OP_ADD_INT_2ADDR) && (opcode <= OP_REM_DOUBLE_2ADDR)) {
+        return genArithOp( cUnit, mir );
+    }
+
+    if (mir->ssaRep->numUses == 2)
+        rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    else
+        rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    if (mir->ssaRep->numDefs == 2)
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    else
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+
+    switch (opcode) {
+        case OP_DOUBLE_TO_INT:
+        case OP_INT_TO_FLOAT:
+        case OP_FLOAT_TO_INT:
+        case OP_DOUBLE_TO_FLOAT:
+        case OP_FLOAT_TO_DOUBLE:
+        case OP_INT_TO_DOUBLE:
+        case OP_FLOAT_TO_LONG:
+        case OP_LONG_TO_FLOAT:
+        case OP_DOUBLE_TO_LONG:
+        case OP_LONG_TO_DOUBLE:
+            return genConversion(cUnit, mir);
+        case OP_NEG_INT:
+        case OP_NOT_INT:
+            return genArithOpInt(cUnit, mir, rlDest, rlSrc, rlSrc);
+        case OP_NEG_LONG:
+        case OP_NOT_LONG:
+            return genArithOpLong(cUnit, mir, rlDest, rlSrc, rlSrc);
+        case OP_NEG_FLOAT:
+            return genArithOpFloat(cUnit, mir, rlDest, rlSrc, rlSrc);
+        case OP_NEG_DOUBLE:
+            return genArithOpDouble(cUnit, mir, rlDest, rlSrc, rlSrc);
+        case OP_MOVE_WIDE:
+            storeValueWide(cUnit, rlDest, rlSrc);
+            break;
+        case OP_INT_TO_LONG:
+            rlSrc = dvmCompilerUpdateLoc(cUnit, rlSrc);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            //TUNING: shouldn't loadValueDirect already check for phys reg?
+            if (rlSrc.location == kLocPhysReg) {
+                genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+            } else {
+                loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
+            }
+            opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
+                        rlResult.lowReg, 31);
+            storeValueWide(cUnit, rlDest, rlResult);
+            break;
+        case OP_LONG_TO_INT:
+            rlSrc = dvmCompilerUpdateLocWide(cUnit, rlSrc);
+            rlSrc = dvmCompilerWideToNarrow(cUnit, rlSrc);
+            // Intentional fallthrough
+        case OP_MOVE:
+        case OP_MOVE_OBJECT:
+            storeValue(cUnit, rlDest, rlSrc);
+            break;
+        case OP_INT_TO_BYTE:
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc.lowReg);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        case OP_INT_TO_SHORT:
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc.lowReg);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        case OP_INT_TO_CHAR:
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc.lowReg);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        case OP_ARRAY_LENGTH: {
+            int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            genNullCheck(cUnit, rlSrc.sRegLow, rlSrc.lowReg,
+                         mir->offset, NULL);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            loadWordDisp(cUnit, rlSrc.lowReg, lenOffset,
+                         rlResult.lowReg);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt21s(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    RegLocation rlDest;
+    RegLocation rlResult;
+    int BBBB = mir->dalvikInsn.vB;
+    if (dalvikOpcode == OP_CONST_WIDE_16) {
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+        loadConstantNoClobber(cUnit, rlResult.lowReg, BBBB);
+        //TUNING: do high separately to avoid load dependency
+        opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
+        storeValueWide(cUnit, rlDest, rlResult);
+    } else if (dalvikOpcode == OP_CONST_16) {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
+        loadConstantNoClobber(cUnit, rlResult.lowReg, BBBB);
+        storeValue(cUnit, rlDest, rlResult);
+    } else
+        return true;
+    return false;
+}
+
+/* Compare agaist zero */
+static bool handleFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+                         MipsLIR *labelList)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    MipsOpCode opc = kMipsNop;
+    int rt = -1;
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch &&
+        (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
+        genSuspendPoll(cUnit, mir);
+    }
+
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+
+    switch (dalvikOpcode) {
+        case OP_IF_EQZ:
+            opc = kMipsBeqz;
+            break;
+        case OP_IF_NEZ:
+            opc = kMipsBne;
+            rt = r_ZERO;
+            break;
+        case OP_IF_LTZ:
+            opc = kMipsBltz;
+            break;
+        case OP_IF_GEZ:
+            opc = kMipsBgez;
+            break;
+        case OP_IF_GTZ:
+            opc = kMipsBgtz;
+            break;
+        case OP_IF_LEZ:
+            opc = kMipsBlez;
+            break;
+        default:
+            LOGE("Unexpected opcode (%d) for Fmt21t", dalvikOpcode);
+            dvmCompilerAbort(cUnit);
+    }
+    genConditionalBranchMips(cUnit, opc, rlSrc.lowReg, rt, &labelList[bb->taken->id]);
+    /* This mostly likely will be optimized away in a later phase */
+    genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+    return false;
+}
+
+static bool isPowerOfTwo(int x)
+{
+    return (x & (x - 1)) == 0;
+}
+
+// Returns true if no more than two bits are set in 'x'.
+static bool isPopCountLE2(unsigned int x)
+{
+    x &= x - 1;
+    return (x & (x - 1)) == 0;
+}
+
+// Returns the index of the lowest set bit in 'x'.
+static int lowestSetBit(unsigned int x) {
+    int bit_posn = 0;
+    while ((x & 0xf) == 0) {
+        bit_posn += 4;
+        x >>= 4;
+    }
+    while ((x & 1) == 0) {
+        bit_posn++;
+        x >>= 1;
+    }
+    return bit_posn;
+}
+
+// Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
+// and store the result in 'rlDest'.
+static bool handleEasyDivide(CompilationUnit *cUnit, Opcode dalvikOpcode,
+                             RegLocation rlSrc, RegLocation rlDest, int lit)
+{
+    if (lit < 2 || !isPowerOfTwo(lit)) {
+        return false;
+    }
+    int k = lowestSetBit(lit);
+    if (k >= 30) {
+        // Avoid special cases.
+        return false;
+    }
+    bool div = (dalvikOpcode == OP_DIV_INT_LIT8 || dalvikOpcode == OP_DIV_INT_LIT16);
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    if (div) {
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        if (lit == 2) {
+            // Division by 2 is by far the most common division by constant.
+            opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
+            opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
+            opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
+        } else {
+            opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
+            opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
+            opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
+            opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
+        }
+    } else {
+        int cReg = dvmCompilerAllocTemp(cUnit);
+        loadConstant(cUnit, cReg, lit - 1);
+        int tReg1 = dvmCompilerAllocTemp(cUnit);
+        int tReg2 = dvmCompilerAllocTemp(cUnit);
+        if (lit == 2) {
+            opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
+            opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
+            opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
+            opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+        } else {
+            opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
+            opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
+            opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
+            opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
+            opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+        }
+    }
+    storeValue(cUnit, rlDest, rlResult);
+    return true;
+}
+
+// Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
+// and store the result in 'rlDest'.
+static bool handleEasyMultiply(CompilationUnit *cUnit,
+                               RegLocation rlSrc, RegLocation rlDest, int lit)
+{
+    // Can we simplify this multiplication?
+    bool powerOfTwo = false;
+    bool popCountLE2 = false;
+    bool powerOfTwoMinusOne = false;
+    if (lit < 2) {
+        // Avoid special cases.
+        return false;
+    } else if (isPowerOfTwo(lit)) {
+        powerOfTwo = true;
+    } else if (isPopCountLE2(lit)) {
+        popCountLE2 = true;
+    } else if (isPowerOfTwo(lit + 1)) {
+        powerOfTwoMinusOne = true;
+    } else {
+        return false;
+    }
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    if (powerOfTwo) {
+        // Shift.
+        opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
+                    lowestSetBit(lit));
+    } else if (popCountLE2) {
+        // Shift and add and shift.
+        int firstBit = lowestSetBit(lit);
+        int secondBit = lowestSetBit(lit ^ (1 << firstBit));
+        genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
+                                      firstBit, secondBit);
+    } else {
+        // Reverse subtract: (src << (shift + 1)) - src.
+        assert(powerOfTwoMinusOne);
+        // TODO: rsb dst, src, src lsl#lowestSetBit(lit + 1)
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
+        opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+    }
+    storeValue(cUnit, rlDest, rlResult);
+    return true;
+}
+
+static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    RegLocation rlResult;
+    int lit = mir->dalvikInsn.vC;
+    OpKind op = (OpKind)0;      /* Make gcc happy */
+    int shiftOp = false;
+
+    switch (dalvikOpcode) {
+        case OP_RSUB_INT_LIT8:
+        case OP_RSUB_INT: {
+            int tReg;
+            //TUNING: add support for use of Arm rsub op
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            tReg = dvmCompilerAllocTemp(cUnit);
+            loadConstant(cUnit, tReg, lit);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
+                        tReg, rlSrc.lowReg);
+            storeValue(cUnit, rlDest, rlResult);
+            return false;
+            break;
+        }
+
+        case OP_ADD_INT_LIT8:
+        case OP_ADD_INT_LIT16:
+            op = kOpAdd;
+            break;
+        case OP_MUL_INT_LIT8:
+        case OP_MUL_INT_LIT16: {
+            if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
+                return false;
+            }
+            op = kOpMul;
+            break;
+        }
+        case OP_AND_INT_LIT8:
+        case OP_AND_INT_LIT16:
+            op = kOpAnd;
+            break;
+        case OP_OR_INT_LIT8:
+        case OP_OR_INT_LIT16:
+            op = kOpOr;
+            break;
+        case OP_XOR_INT_LIT8:
+        case OP_XOR_INT_LIT16:
+            op = kOpXor;
+            break;
+        case OP_SHL_INT_LIT8:
+            lit &= 31;
+            shiftOp = true;
+            op = kOpLsl;
+            break;
+        case OP_SHR_INT_LIT8:
+            lit &= 31;
+            shiftOp = true;
+            op = kOpAsr;
+            break;
+        case OP_USHR_INT_LIT8:
+            lit &= 31;
+            shiftOp = true;
+            op = kOpLsr;
+            break;
+
+        case OP_DIV_INT_LIT8:
+        case OP_DIV_INT_LIT16:
+        case OP_REM_INT_LIT8:
+        case OP_REM_INT_LIT16: {
+            if (lit == 0) {
+                /* Let the interpreter deal with div by 0 */
+                genInterpSingleStep(cUnit, mir);
+                return false;
+            }
+            if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
+                return false;
+            }
+
+            MipsOpCode opc;
+            int divReg;
+
+            if ((dalvikOpcode == OP_DIV_INT_LIT8) ||
+                (dalvikOpcode == OP_DIV_INT_LIT16)) {
+                opc = kMipsMflo;
+                divReg = r_LO;
+            } else {
+                opc = kMipsMfhi;
+                divReg = r_HI;
+            }
+
+            rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            newLIR3(cUnit, kMipsAddiu, tReg, r_ZERO, lit);
+            newLIR4(cUnit, kMipsDiv, r_HI, r_LO, rlSrc.lowReg, tReg);
+            rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+            newLIR2(cUnit, opc, rlResult.lowReg, divReg);
+            dvmCompilerFreeTemp(cUnit, tReg);
+            storeValue(cUnit, rlDest, rlResult);
+            return false;
+            break;
+        }
+        default:
+            return true;
+    }
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
+    if (shiftOp && (lit == 0)) {
+        genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+    } else {
+        opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
+    }
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool handleFmt22c_Fmt52c(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    int fieldOffset = -1;
+    bool isVolatile = false;
+    switch (dalvikOpcode) {
+        /*
+         * Wide volatiles currently handled via single step.
+         * Add them here if generating in-line code.
+         *     case OP_IGET_WIDE_VOLATILE:
+         *     case OP_IGET_WIDE_VOLATILE_JUMBO:
+         *     case OP_IPUT_WIDE_VOLATILE:
+         *     case OP_IPUT_WIDE_VOLATILE_JUMBO:
+         */
+        case OP_IGET_VOLATILE:
+        case OP_IGET_VOLATILE_JUMBO:
+        case OP_IGET_OBJECT_VOLATILE:
+        case OP_IGET_OBJECT_VOLATILE_JUMBO:
+        case OP_IPUT_VOLATILE:
+        case OP_IPUT_VOLATILE_JUMBO:
+        case OP_IPUT_OBJECT_VOLATILE:
+        case OP_IPUT_OBJECT_VOLATILE_JUMBO:
+#if ANDROID_SMP != 0
+            isVolatile = true;
+        // NOTE: intentional fallthrough
+#endif
+        case OP_IGET:
+        case OP_IGET_JUMBO:
+        case OP_IGET_WIDE:
+        case OP_IGET_WIDE_JUMBO:
+        case OP_IGET_OBJECT:
+        case OP_IGET_OBJECT_JUMBO:
+        case OP_IGET_BOOLEAN:
+        case OP_IGET_BOOLEAN_JUMBO:
+        case OP_IGET_BYTE:
+        case OP_IGET_BYTE_JUMBO:
+        case OP_IGET_CHAR:
+        case OP_IGET_CHAR_JUMBO:
+        case OP_IGET_SHORT:
+        case OP_IGET_SHORT_JUMBO:
+        case OP_IPUT:
+        case OP_IPUT_JUMBO:
+        case OP_IPUT_WIDE:
+        case OP_IPUT_WIDE_JUMBO:
+        case OP_IPUT_OBJECT:
+        case OP_IPUT_OBJECT_JUMBO:
+        case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BOOLEAN_JUMBO:
+        case OP_IPUT_BYTE:
+        case OP_IPUT_BYTE_JUMBO:
+        case OP_IPUT_CHAR:
+        case OP_IPUT_CHAR_JUMBO:
+        case OP_IPUT_SHORT:
+        case OP_IPUT_SHORT_JUMBO: {
+            const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
+                mir->meta.calleeMethod : cUnit->method;
+            Field *fieldPtr =
+                method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vC];
+
+            if (fieldPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null instance field");
+                dvmAbort();
+            }
+#if ANDROID_SMP != 0
+            assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
+#else
+            isVolatile = dvmIsVolatileField((Field *) fieldPtr);
+#endif
+            fieldOffset = ((InstField *)fieldPtr)->byteOffset;
+            break;
+        }
+        default:
+            break;
+    }
+
+    switch (dalvikOpcode) {
+        case OP_NEW_ARRAY:
+        case OP_NEW_ARRAY_JUMBO: {
+#if 0 /* 080 triggers assert in Interp.c:1290 for out of memory exception.
+             i think the assert is in error and should be disabled. With
+             asserts disabled, 080 passes. */
+genInterpSingleStep(cUnit, mir);
+return false;
+#endif
+            // Generates a call - use explicit registers
+            RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            RegLocation rlResult;
+            void *classPtr = (void*)
+              (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
+
+            if (classPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGE("Unexpected null class");
+                dvmAbort();
+            }
+
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            genExportPC(cUnit, mir);
+            loadValueDirectFixed(cUnit, rlSrc, r_A1);   /* Len */
+            loadConstant(cUnit, r_A0, (int) classPtr );
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmAllocArrayByClass);
+            /*
+             * "len < 0": bail to the interpreter to re-execute the
+             * instruction
+             */
+            genRegImmCheck(cUnit, kMipsCondMi, r_A1, 0, mir->offset, NULL);
+            loadConstant(cUnit, r_A2, ALLOC_DONT_TRACK);
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /* generate a branch over if allocation is successful */
+            MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+            /*
+             * OOM exception needs to be thrown here and cannot re-execute
+             */
+            loadConstant(cUnit, r_A0,
+                         (int) (cUnit->method->insns + mir->offset));
+            genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+            /* noreturn */
+
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            branchOver->generic.target = (LIR *) target;
+            rlResult = dvmCompilerGetReturn(cUnit);
+            storeValue(cUnit, rlDest, rlResult);
+            break;
+        }
+        case OP_INSTANCE_OF:
+        case OP_INSTANCE_OF_JUMBO: {
+            // May generate a call - use explicit registers
+            RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+            RegLocation rlResult;
+            ClassObject *classPtr =
+              (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
+            /*
+             * Note: It is possible that classPtr is NULL at this point,
+             * even though this instruction has been successfully interpreted.
+             * If the previous interpretation had a null source, the
+             * interpreter would not have bothered to resolve the clazz.
+             * Bail out to the interpreter in this case, and log it
+             * so that we can tell if it happens frequently.
+             */
+            if (classPtr == NULL) {
+                BAIL_LOOP_COMPILATION();
+                LOGD("null clazz in OP_INSTANCE_OF, single-stepping");
+                genInterpSingleStep(cUnit, mir);
+                break;
+            }
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            loadValueDirectFixed(cUnit, rlSrc, r_V0);  /* Ref */
+            loadConstant(cUnit, r_A2, (int) classPtr );
+            /* When taken r_V0 has NULL which can be used for store directly */
+            MipsLIR *branch1 = opCompareBranch(cUnit, kMipsBeqz, r_V0, -1);
+            /* r_A1 now contains object->clazz */
+            loadWordDisp(cUnit, r_V0, offsetof(Object, clazz), r_A1);
+            /* r_A1 now contains object->clazz */
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInstanceofNonTrivial);
+            loadConstant(cUnit, r_V0, 1);                /* Assume true */
+            MipsLIR *branch2 = opCompareBranch(cUnit, kMipsBeq, r_A1, r_A2);
+            genRegCopy(cUnit, r_A0, r_A1);
+            genRegCopy(cUnit, r_A1, r_A2);
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /* branch target here */
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            rlResult = dvmCompilerGetReturn(cUnit);
+            storeValue(cUnit, rlDest, rlResult);
+            branch1->generic.target = (LIR *)target;
+            branch2->generic.target = (LIR *)target;
+            break;
+        }
+        case OP_IGET_WIDE:
+        case OP_IGET_WIDE_JUMBO:
+            genIGetWide(cUnit, mir, fieldOffset);
+            break;
+        case OP_IGET_VOLATILE:
+        case OP_IGET_VOLATILE_JUMBO:
+        case OP_IGET_OBJECT_VOLATILE:
+        case OP_IGET_OBJECT_VOLATILE_JUMBO:
+        case OP_IGET:
+        case OP_IGET_JUMBO:
+        case OP_IGET_OBJECT:
+        case OP_IGET_OBJECT_JUMBO:
+        case OP_IGET_BOOLEAN:
+        case OP_IGET_BOOLEAN_JUMBO:
+        case OP_IGET_BYTE:
+        case OP_IGET_BYTE_JUMBO:
+        case OP_IGET_CHAR:
+        case OP_IGET_CHAR_JUMBO:
+        case OP_IGET_SHORT:
+        case OP_IGET_SHORT_JUMBO:
+            genIGet(cUnit, mir, kWord, fieldOffset, isVolatile);
+            break;
+        case OP_IPUT_WIDE:
+        case OP_IPUT_WIDE_JUMBO:
+            genIPutWide(cUnit, mir, fieldOffset);
+            break;
+        case OP_IPUT_VOLATILE:
+        case OP_IPUT_VOLATILE_JUMBO:
+        case OP_IPUT:
+        case OP_IPUT_JUMBO:
+        case OP_IPUT_BOOLEAN:
+        case OP_IPUT_BOOLEAN_JUMBO:
+        case OP_IPUT_BYTE:
+        case OP_IPUT_BYTE_JUMBO:
+        case OP_IPUT_CHAR:
+        case OP_IPUT_CHAR_JUMBO:
+        case OP_IPUT_SHORT:
+        case OP_IPUT_SHORT_JUMBO:
+            genIPut(cUnit, mir, kWord, fieldOffset, false, isVolatile);
+            break;
+        case OP_IPUT_OBJECT_VOLATILE:
+        case OP_IPUT_OBJECT_VOLATILE_JUMBO:
+        case OP_IPUT_OBJECT:
+        case OP_IPUT_OBJECT_JUMBO:
+            genIPut(cUnit, mir, kWord, fieldOffset, true, isVolatile);
+            break;
+        case OP_IGET_WIDE_VOLATILE:
+        case OP_IGET_WIDE_VOLATILE_JUMBO:
+        case OP_IPUT_WIDE_VOLATILE:
+        case OP_IPUT_WIDE_VOLATILE_JUMBO:
+            genInterpSingleStep(cUnit, mir);
+            break;
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt22cs(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    int fieldOffset =  mir->dalvikInsn.vC;
+    switch (dalvikOpcode) {
+        case OP_IGET_QUICK:
+        case OP_IGET_OBJECT_QUICK:
+            genIGet(cUnit, mir, kWord, fieldOffset, false);
+            break;
+        case OP_IPUT_QUICK:
+            genIPut(cUnit, mir, kWord, fieldOffset, false, false);
+            break;
+        case OP_IPUT_OBJECT_QUICK:
+            genIPut(cUnit, mir, kWord, fieldOffset, true, false);
+            break;
+        case OP_IGET_WIDE_QUICK:
+            genIGetWide(cUnit, mir, fieldOffset);
+            break;
+        case OP_IPUT_WIDE_QUICK:
+            genIPutWide(cUnit, mir, fieldOffset);
+            break;
+        default:
+            return true;
+    }
+    return false;
+
+}
+
+/* Compare against zero */
+static bool handleFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+                         MipsLIR *labelList)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    MipsConditionCode cond;
+    MipsOpCode opc = kMipsNop;
+    MipsLIR * test = NULL;
+    /* backward branch? */
+    bool backwardBranch = (bb->taken->startOffset <= mir->offset);
+
+    if (backwardBranch &&
+        (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
+        genSuspendPoll(cUnit, mir);
+    }
+
+    RegLocation rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
+    rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+    rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+    int reg1 = rlSrc1.lowReg;
+    int reg2 = rlSrc2.lowReg;
+    int tReg;
+
+    switch (dalvikOpcode) {
+        case OP_IF_EQ:
+            opc = kMipsBeq;
+            break;
+        case OP_IF_NE:
+            opc = kMipsBne;
+            break;
+        case OP_IF_LT:
+            opc = kMipsBne;
+            tReg = dvmCompilerAllocTemp(cUnit);
+            test = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2);
+            reg1 = tReg;
+            reg2 = r_ZERO;
+            break;
+        case OP_IF_LE:
+            opc = kMipsBeqz;
+            tReg = dvmCompilerAllocTemp(cUnit);
+            test = newLIR3(cUnit, kMipsSlt, tReg, reg2, reg1);
+            reg1 = tReg;
+            reg2 = -1;
+            break;
+        case OP_IF_GT:
+            opc = kMipsBne;
+            tReg = dvmCompilerAllocTemp(cUnit);
+            test = newLIR3(cUnit, kMipsSlt, tReg, reg2, reg1);
+            reg1 = tReg;
+            reg2 = r_ZERO;
+            break;
+        case OP_IF_GE:
+            opc = kMipsBeqz;
+            tReg = dvmCompilerAllocTemp(cUnit);
+            test = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2);
+            reg1 = tReg;
+            reg2 = -1;
+            break;
+        default:
+            cond = (MipsConditionCode)0;
+            LOGE("Unexpected opcode (%d) for Fmt22t", dalvikOpcode);
+            dvmCompilerAbort(cUnit);
+    }
+
+    genConditionalBranchMips(cUnit, opc, reg1, reg2, &labelList[bb->taken->id]);
+    /* This mostly likely will be optimized away in a later phase */
+    genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+    return false;
+}
+
+static bool handleFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+
+    switch (opcode) {
+        case OP_MOVE_16:
+        case OP_MOVE_OBJECT_16:
+        case OP_MOVE_FROM16:
+        case OP_MOVE_OBJECT_FROM16: {
+            storeValue(cUnit, dvmCompilerGetDest(cUnit, mir, 0),
+                       dvmCompilerGetSrc(cUnit, mir, 0));
+            break;
+        }
+        case OP_MOVE_WIDE_16:
+        case OP_MOVE_WIDE_FROM16: {
+            storeValueWide(cUnit, dvmCompilerGetDestWide(cUnit, mir, 0, 1),
+                           dvmCompilerGetSrcWide(cUnit, mir, 0, 1));
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+    RegLocation rlSrc1;
+    RegLocation rlSrc2;
+    RegLocation rlDest;
+
+    if ((opcode >= OP_ADD_INT) && (opcode <= OP_REM_DOUBLE)) {
+        return genArithOp( cUnit, mir );
+    }
+
+    /* APUTs have 3 sources and no targets */
+    if (mir->ssaRep->numDefs == 0) {
+        if (mir->ssaRep->numUses == 3) {
+            rlDest = dvmCompilerGetSrc(cUnit, mir, 0);
+            rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 1);
+            rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 2);
+        } else {
+            assert(mir->ssaRep->numUses == 4);
+            rlDest = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+            rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 2);
+            rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 3);
+        }
+    } else {
+        /* Two sources and 1 dest.  Deduce the operand sizes */
+        if (mir->ssaRep->numUses == 4) {
+            rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+            rlSrc2 = dvmCompilerGetSrcWide(cUnit, mir, 2, 3);
+        } else {
+            assert(mir->ssaRep->numUses == 2);
+            rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
+            rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
+        }
+        if (mir->ssaRep->numDefs == 2) {
+            rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+        } else {
+            assert(mir->ssaRep->numDefs == 1);
+            rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+        }
+    }
+
+    switch (opcode) {
+        case OP_CMPL_FLOAT:
+        case OP_CMPG_FLOAT:
+        case OP_CMPL_DOUBLE:
+        case OP_CMPG_DOUBLE:
+            return genCmpFP(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+        case OP_CMP_LONG:
+            genCmpLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+            break;
+        case OP_AGET_WIDE:
+            genArrayGet(cUnit, mir, kLong, rlSrc1, rlSrc2, rlDest, 3);
+            break;
+        case OP_AGET:
+        case OP_AGET_OBJECT:
+            genArrayGet(cUnit, mir, kWord, rlSrc1, rlSrc2, rlDest, 2);
+            break;
+        case OP_AGET_BOOLEAN:
+            genArrayGet(cUnit, mir, kUnsignedByte, rlSrc1, rlSrc2, rlDest, 0);
+            break;
+        case OP_AGET_BYTE:
+            genArrayGet(cUnit, mir, kSignedByte, rlSrc1, rlSrc2, rlDest, 0);
+            break;
+        case OP_AGET_CHAR:
+            genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc1, rlSrc2, rlDest, 1);
+            break;
+        case OP_AGET_SHORT:
+            genArrayGet(cUnit, mir, kSignedHalf, rlSrc1, rlSrc2, rlDest, 1);
+            break;
+        case OP_APUT_WIDE:
+            genArrayPut(cUnit, mir, kLong, rlSrc1, rlSrc2, rlDest, 3);
+            break;
+        case OP_APUT:
+            genArrayPut(cUnit, mir, kWord, rlSrc1, rlSrc2, rlDest, 2);
+            break;
+        case OP_APUT_OBJECT:
+            genArrayObjectPut(cUnit, mir, rlSrc1, rlSrc2, rlDest, 2);
+            break;
+        case OP_APUT_SHORT:
+        case OP_APUT_CHAR:
+            genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc1, rlSrc2, rlDest, 1);
+            break;
+        case OP_APUT_BYTE:
+        case OP_APUT_BOOLEAN:
+            genArrayPut(cUnit, mir, kUnsignedByte, rlSrc1, rlSrc2, rlDest, 0);
+            break;
+        default:
+            return true;
+    }
+    return false;
+}
+
+/*
+ * Find the matching case.
+ *
+ * return values:
+ * r_RESULT0 (low 32-bit): pc of the chaining cell corresponding to the resolved case,
+ *    including default which is placed at MIN(size, MAX_CHAINED_SWITCH_CASES).
+ * r_RESULT1 (high 32-bit): the branch offset of the matching case (only for indexes
+ *    above MAX_CHAINED_SWITCH_CASES).
+ *
+ * Instructions around the call are:
+ *
+ * jalr &findPackedSwitchIndex
+ * nop
+ * lw gp, 84(sp) |
+ * addu          | 20 bytes for these 5 instructions
+ * move          | (NOTE: if this sequence is shortened or lengthened, then
+ * jr            |  the 20 byte offset added below in 3 places must be changed
+ * nop           |  accordingly.)
+ * chaining cell for case 0 [16 bytes]
+ * chaining cell for case 1 [16 bytes]
+ *               :
+ * chaining cell for case MIN(size, MAX_CHAINED_SWITCH_CASES)-1 [16 bytes]
+ * chaining cell for case default [16 bytes]
+ * noChain exit
+ */
+static s8 findPackedSwitchIndex(const u2* switchData, int testVal)
+{
+    int size;
+    int firstKey;
+    const int *entries;
+    int index;
+    int jumpIndex;
+    int caseDPCOffset = 0;
+
+    /*
+     * Packed switch data format:
+     *  ushort ident = 0x0100   magic value
+     *  ushort size             number of entries in the table
+     *  int first_key           first (and lowest) switch case value
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (4+size*2) 16-bit code units.
+     */
+    size = switchData[1];
+    assert(size > 0);
+
+    firstKey = switchData[2];
+    firstKey |= switchData[3] << 16;
+
+
+    /* The entries are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    entries = (const int*) &switchData[4];
+    assert(((u4)entries & 0x3) == 0);
+
+    index = testVal - firstKey;
+
+    /* Jump to the default cell */
+    if (index < 0 || index >= size) {
+        jumpIndex = MIN(size, MAX_CHAINED_SWITCH_CASES);
+    /* Jump to the non-chaining exit point */
+    } else if (index >= MAX_CHAINED_SWITCH_CASES) {
+        jumpIndex = MAX_CHAINED_SWITCH_CASES + 1;
+#ifdef HAVE_LITTLE_ENDIAN
+        caseDPCOffset = entries[index];
+#else
+        caseDPCOffset = (unsigned int)entries[index] >> 16 | entries[index] << 16;
+#endif
+    /* Jump to the inline chaining cell */
+    } else {
+        jumpIndex = index;
+    }
+
+    return (((s8) caseDPCOffset) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
+}
+
+/* See comments for findPackedSwitchIndex */
+static s8 findSparseSwitchIndex(const u2* switchData, int testVal)
+{
+    int size;
+    const int *keys;
+    const int *entries;
+    /* In Thumb mode pc is 4 ahead of the "mov r2, pc" instruction */
+    int i;
+
+    /*
+     * Sparse switch data format:
+     *  ushort ident = 0x0200   magic value
+     *  ushort size             number of entries in the table; > 0
+     *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
+     *  int targets[size]       branch targets, relative to switch opcode
+     *
+     * Total size is (2+size*4) 16-bit code units.
+     */
+
+    size = switchData[1];
+    assert(size > 0);
+
+    /* The keys are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    keys = (const int*) &switchData[2];
+    assert(((u4)keys & 0x3) == 0);
+
+    /* The entries are guaranteed to be aligned on a 32-bit boundary;
+     * we can treat them as a native int array.
+     */
+    entries = keys + size;
+    assert(((u4)entries & 0x3) == 0);
+
+    /*
+     * Run through the list of keys, which are guaranteed to
+     * be sorted low-to-high.
+     *
+     * Most tables have 3-4 entries.  Few have more than 10.  A binary
+     * search here is probably not useful.
+     */
+    for (i = 0; i < size; i++) {
+#ifdef HAVE_LITTLE_ENDIAN
+        int k = keys[i];
+        if (k == testVal) {
+            /* MAX_CHAINED_SWITCH_CASES + 1 is the start of the overflow case */
+            int jumpIndex = (i < MAX_CHAINED_SWITCH_CASES) ?
+                           i : MAX_CHAINED_SWITCH_CASES + 1;
+            return (((s8) entries[i]) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
+#else
+        int k = (unsigned int)keys[i] >> 16 | keys[i] << 16;
+        if (k == testVal) {
+            /* MAX_CHAINED_SWITCH_CASES + 1 is the start of the overflow case */
+            int jumpIndex = (i < MAX_CHAINED_SWITCH_CASES) ?
+                           i : MAX_CHAINED_SWITCH_CASES + 1;
+            int temp = (unsigned int)entries[i] >> 16 | entries[i] << 16;
+            return (((s8) temp) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
+#endif
+        } else if (k > testVal) {
+            break;
+        }
+    }
+    return MIN(size, MAX_CHAINED_SWITCH_CASES) * CHAIN_CELL_NORMAL_SIZE + 20;
+}
+
+static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+    switch (dalvikOpcode) {
+        case OP_FILL_ARRAY_DATA: {
+            RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            // Making a call - use explicit registers
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            genExportPC(cUnit, mir);
+            loadValueDirectFixed(cUnit, rlSrc, r_A0);
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInterpHandleFillArrayData);
+            loadConstant(cUnit, r_A1,
+               (int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /* generate a branch over if successful */
+            MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+            loadConstant(cUnit, r_A0,
+                         (int) (cUnit->method->insns + mir->offset));
+            genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            branchOver->generic.target = (LIR *) target;
+            break;
+        }
+        /*
+         * Compute the goto target of up to
+         * MIN(switchSize, MAX_CHAINED_SWITCH_CASES) + 1 chaining cells.
+         * See the comment before findPackedSwitchIndex for the code layout.
+         */
+        case OP_PACKED_SWITCH:
+        case OP_SPARSE_SWITCH: {
+            RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+            dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+            loadValueDirectFixed(cUnit, rlSrc, r_A1);
+            dvmCompilerLockAllTemps(cUnit);
+
+            if (dalvikOpcode == OP_PACKED_SWITCH) {
+                LOAD_FUNC_ADDR(cUnit, r_T9, (int)findPackedSwitchIndex);
+            } else {
+                LOAD_FUNC_ADDR(cUnit, r_T9, (int)findSparseSwitchIndex);
+            }
+            /* r_A0 <- Addr of the switch data */
+            loadConstant(cUnit, r_A0,
+               (int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            dvmCompilerClobberCallRegs(cUnit);
+            /* pc <- computed goto target using value in RA */
+            newLIR3(cUnit, kMipsAddu, r_A0, r_RA, r_RESULT0);
+            newLIR2(cUnit, kMipsMove, r_A1, r_RESULT1);
+            newLIR1(cUnit, kMipsJr, r_A0);
+            newLIR0(cUnit, kMipsNop); /* for maintaining 20 byte offset */
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+/*
+ * See the example of predicted inlining listed before the
+ * genValidationForPredictedInline function. The function here takes care the
+ * branch over at 0x4858de78 and the misprediction target at 0x4858de7a.
+ */
+static void genLandingPadForMispredictedCallee(CompilationUnit *cUnit, MIR *mir,
+                                               BasicBlock *bb,
+                                               MipsLIR *labelList)
+{
+    BasicBlock *fallThrough = bb->fallThrough;
+
+    /* Bypass the move-result block if there is one */
+    if (fallThrough->firstMIRInsn) {
+        assert(fallThrough->firstMIRInsn->OptimizationFlags & MIR_INLINED_PRED);
+        fallThrough = fallThrough->fallThrough;
+    }
+    /* Generate a branch over if the predicted inlining is correct */
+    genUnconditionalBranch(cUnit, &labelList[fallThrough->id]);
+
+    /* Reset the register state */
+    dvmCompilerResetRegPool(cUnit);
+    dvmCompilerClobberAllRegs(cUnit);
+    dvmCompilerResetNullCheck(cUnit);
+
+    /* Target for the slow invoke path */
+    MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+    target->defMask = ENCODE_ALL;
+    /* Hook up the target to the verification branch */
+    mir->meta.callsiteInfo->misPredBranchOver->target = (LIR *) target;
+}
+
+static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
+                             BasicBlock *bb, MipsLIR *labelList)
+{
+    MipsLIR *retChainingCell = NULL;
+    MipsLIR *pcrLabel = NULL;
+
+    /* An invoke with the MIR_INLINED is effectively a no-op */
+    if (mir->OptimizationFlags & MIR_INLINED)
+        return false;
+
+    if (bb->fallThrough != NULL)
+        retChainingCell = &labelList[bb->fallThrough->id];
+
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    switch (mir->dalvikInsn.opcode) {
+        /*
+         * calleeMethod = this->clazz->vtable[
+         *     method->clazz->pDvmDex->pResMethods[BBBB]->methodIndex
+         * ]
+         */
+        case OP_INVOKE_VIRTUAL:
+        case OP_INVOKE_VIRTUAL_RANGE:
+        case OP_INVOKE_VIRTUAL_JUMBO: {
+            MipsLIR *predChainingCell = &labelList[bb->taken->id];
+            int methodIndex =
+                cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]->
+                methodIndex;
+
+            /*
+             * If the invoke has non-null misPredBranchOver, we need to generate
+             * the non-inlined version of the invoke here to handle the
+             * mispredicted case.
+             */
+            if (mir->meta.callsiteInfo->misPredBranchOver) {
+                genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
+            }
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            genInvokeVirtualCommon(cUnit, mir, methodIndex,
+                                   retChainingCell,
+                                   predChainingCell,
+                                   pcrLabel);
+            break;
+        }
+        /*
+         * calleeMethod = method->clazz->super->vtable[method->clazz->pDvmDex
+         *                ->pResMethods[BBBB]->methodIndex]
+         */
+        case OP_INVOKE_SUPER:
+        case OP_INVOKE_SUPER_RANGE:
+        case OP_INVOKE_SUPER_JUMBO: {
+            /* Grab the method ptr directly from what the interpreter sees */
+            const Method *calleeMethod = mir->meta.callsiteInfo->method;
+            assert(calleeMethod == cUnit->method->clazz->super->vtable[
+                                     cUnit->method->clazz->pDvmDex->
+                                       pResMethods[dInsn->vB]->methodIndex]);
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+                                              retChainingCell);
+            } else {
+                /* r_A0 = calleeMethod */
+                loadConstant(cUnit, r_A0, (int) calleeMethod);
+
+                genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                         calleeMethod);
+            }
+            break;
+        }
+        /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
+        case OP_INVOKE_DIRECT:
+        case OP_INVOKE_DIRECT_RANGE:
+        case OP_INVOKE_DIRECT_JUMBO: {
+            /* Grab the method ptr directly from what the interpreter sees */
+            const Method *calleeMethod = mir->meta.callsiteInfo->method;
+            assert(calleeMethod ==
+                   cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]);
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_DIRECT)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            /* r_A0 = calleeMethod */
+            loadConstant(cUnit, r_A0, (int) calleeMethod);
+
+            genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                     calleeMethod);
+            break;
+        }
+        /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
+        case OP_INVOKE_STATIC:
+        case OP_INVOKE_STATIC_RANGE:
+        case OP_INVOKE_STATIC_JUMBO: {
+            /* Grab the method ptr directly from what the interpreter sees */
+            const Method *calleeMethod = mir->meta.callsiteInfo->method;
+            assert(calleeMethod ==
+                   cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]);
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_STATIC)
+                genProcessArgsNoRange(cUnit, mir, dInsn,
+                                      NULL /* no null check */);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn,
+                                    NULL /* no null check */);
+
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
+                                              retChainingCell);
+            } else {
+                /* r_A0 = calleeMethod */
+                loadConstant(cUnit, r_A0, (int) calleeMethod);
+
+                genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                         calleeMethod);
+            }
+            break;
+        }
+
+        /*
+         * calleeMethod = dvmFindInterfaceMethodInCache(this->clazz,
+         *                    BBBB, method, method->clazz->pDvmDex)
+         *
+         * The following is an example of generated code for
+         *      "invoke-interface v0"
+         *
+         * -------- dalvik offset: 0x000f @ invoke-interface (PI) v2
+         * 0x2f140c54 : lw       a0,8(s1)                    # genProcessArgsNoRange
+         * 0x2f140c58 : addiu    s4,s1,0xffffffe8(-24)
+         * 0x2f140c5c : beqz     a0,0x2f140d5c (L0x11f864)
+         * 0x2f140c60 : pref     1,0(s4)
+         * -------- BARRIER
+         * 0x2f140c64 : sw       a0,0(s4)
+         * 0x2f140c68 : addiu    s4,s4,0x0004(4)
+         * -------- BARRIER
+         * 0x2f140c6c : lui      s0,0x2d23(11555)            # dalvikPC
+         * 0x2f140c70 : ori      s0,s0,0x2d2365a6(757294502)
+         * 0x2f140c74 : lahi/lui a1,0x2f14(12052)            # a1 <- &retChainingCell
+         * 0x2f140c78 : lalo/ori a1,a1,0x2f140d38(789843256)
+         * 0x2f140c7c : lahi/lui a2,0x2f14(12052)            # a2 <- &predictedChainingCell
+         * 0x2f140c80 : lalo/ori a2,a2,0x2f140d80(789843328)
+         * 0x2f140c84 : jal      0x2f1311ec(789778924)       # call TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
+         * 0x2f140c88 : nop
+         * 0x2f140c8c : b        0x2f140d80 (L0x11efc0)      # off to the predicted chain
+         * 0x2f140c90 : nop
+         * 0x2f140c94 : b        0x2f140d60 (L0x12457c)      # punt to the interpreter
+         * 0x2f140c98 : lui      a0,0x2d23(11555)
+         * 0x2f140c9c : move     s5,a1                       # prepare for dvmFindInterfaceMethodInCache
+         * 0x2f140ca0 : move     s6,a2
+         * 0x2f140ca4 : move     s7,a3
+         * 0x2f140ca8 : move     a0,a3
+         * 0x2f140cac : ori      a1,zero,0x2b42(11074)
+         * 0x2f140cb0 : lui      a2,0x2c92(11410)
+         * 0x2f140cb4 : ori      a2,a2,0x2c92adf8(747810296)
+         * 0x2f140cb8 : lui      a3,0x0009(9)
+         * 0x2f140cbc : ori      a3,a3,0x924b8(599224)
+         * 0x2f140cc0 : lui      t9,0x2ab2(10930)
+         * 0x2f140cc4 : ori      t9,t9,0x2ab2a48c(716350604)
+         * 0x2f140cc8 : jalr     ra,t9                       # call dvmFindInterfaceMethodInCache
+         * 0x2f140ccc : nop
+         * 0x2f140cd0 : lw       gp,84(sp)
+         * 0x2f140cd4 : move     a0,v0
+         * 0x2f140cd8 : bne      v0,zero,0x2f140cf0 (L0x120064)
+         * 0x2f140cdc : nop
+         * 0x2f140ce0 : lui      a0,0x2d23(11555)            # a0 <- dalvikPC
+         * 0x2f140ce4 : ori      a0,a0,0x2d2365a6(757294502)
+         * 0x2f140ce8 : jal      0x2f131720(789780256)       # call TEMPLATE_THROW_EXCEPTION_COMMON
+         * 0x2f140cec : nop
+         * 0x2f140cf0 : move     a1,s5                       # a1 <- &retChainingCell
+         * 0x2f140cf4 : bgtz     s5,0x2f140d20 (L0x120324)   # >0? don't rechain
+         * 0x2f140cf8 : nop
+         * 0x2f140cfc : lui      t9,0x2aba(10938)            # prepare for dvmJitToPatchPredictedChain
+         * 0x2f140d00 : ori      t9,t9,0x2abae3c4(716891076)
+         * 0x2f140d04 : move     a1,s2
+         * 0x2f140d08 : move     a2,s6
+         * 0x2f140d0c : move     a3,s7
+         * 0x2f140d10 : jalr     ra,t9                       # call dvmJitToPatchPredictedChain
+         * 0x2f140d14 : nop
+         * 0x2f140d18 : lw       gp,84(sp)
+         * 0x2f140d1c : move     a0,v0
+         * 0x2f140d20 : lahi/lui a1,0x2f14(12052)
+         * 0x2f140d24 : lalo/ori a1,a1,0x2f140d38(789843256) # a1 <- &retChainingCell
+         * 0x2f140d28 : jal      0x2f1310c4(789778628)       # call TEMPLATE_INVOKE_METHOD_NO_OPT
+         * 0x2f140d2c : nop
+         * 0x2f140d30 : b        0x2f140d60 (L0x12457c)
+         * 0x2f140d34 : lui      a0,0x2d23(11555)
+         * 0x2f140d38 : .align4
+         * -------- dalvik offset: 0x0012 @ move-result (PI) v1, (#0), (#0)
+         * 0x2f140d38 : lw       a2,16(s2)
+         * 0x2f140d3c : sw       a2,4(s1)
+         * 0x2f140d40 : b        0x2f140d74 (L0x1246fc)
+         * 0x2f140d44 : lw       a0,116(s2)
+         * 0x2f140d48 : undefined
+         * -------- reconstruct dalvik PC : 0x2d2365a6 @ +0x000f
+         * 0x2f140d4c : lui      a0,0x2d23(11555)
+         * 0x2f140d50 : ori      a0,a0,0x2d2365a6(757294502)
+         * 0x2f140d54 : b        0x2f140d68 (L0x12463c)
+         * 0x2f140d58 : lw       a1,108(s2)
+         * -------- reconstruct dalvik PC : 0x2d2365a6 @ +0x000f
+         * 0x2f140d5c : lui      a0,0x2d23(11555)
+         * 0x2f140d60 : ori      a0,a0,0x2d2365a6(757294502)
+         * Exception_Handling:
+         * 0x2f140d64 : lw       a1,108(s2)
+         * 0x2f140d68 : jalr     ra,a1
+         * 0x2f140d6c : nop
+         * 0x2f140d70 : .align4
+         * -------- chaining cell (hot): 0x0013
+         * 0x2f140d70 : lw       a0,116(s2)
+         * 0x2f140d74 : jalr     ra,a0
+         * 0x2f140d78 : nop
+         * 0x2f140d7c : data     0x2d2365ae(757294510)
+         * 0x2f140d80 : .align4
+         * -------- chaining cell (predicted): N/A
+         * 0x2f140d80 : data     0xe7fe(59390)
+         * 0x2f140d84 : data     0x0000(0)
+         * 0x2f140d88 : data     0x0000(0)
+         * 0x2f140d8c : data     0x0000(0)
+         * 0x2f140d90 : data     0x0000(0)
+         * -------- end of chaining cells (0x0190)
+         */
+        case OP_INVOKE_INTERFACE:
+        case OP_INVOKE_INTERFACE_RANGE:
+        case OP_INVOKE_INTERFACE_JUMBO: {
+            MipsLIR *predChainingCell = &labelList[bb->taken->id];
+
+            /*
+             * If the invoke has non-null misPredBranchOver, we need to generate
+             * the non-inlined version of the invoke here to handle the
+             * mispredicted case.
+             */
+            if (mir->meta.callsiteInfo->misPredBranchOver) {
+                genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
+            }
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            /* "this" is already left in r_A0 by genProcessArgs* */
+
+            /* r4PC = dalvikCallsite */
+            loadConstant(cUnit, r4PC,
+                         (int) (cUnit->method->insns + mir->offset));
+
+            /* r_A1 = &retChainingCell */
+            MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
+            addrRetChain->generic.target = (LIR *) retChainingCell;
+            addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
+            addrRetChain->generic.target = (LIR *) retChainingCell;
+
+
+            /* r_A2 = &predictedChainingCell */
+            MipsLIR *predictedChainingCell = newLIR2(cUnit, kMipsLahi, r_A2, 0);
+            predictedChainingCell->generic.target = (LIR *) predChainingCell;
+            predictedChainingCell = newLIR3(cUnit, kMipsLalo, r_A2, r_A2, 0);
+            predictedChainingCell->generic.target = (LIR *) predChainingCell;
+
+            genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+                TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+
+            /* return through ra - jump to the chaining cell */
+            genUnconditionalBranch(cUnit, predChainingCell);
+
+            /*
+             * null-check on "this" may have been eliminated, but we still need
+             * a PC-reconstruction label for stack overflow bailout.
+             */
+            if (pcrLabel == NULL) {
+                int dPC = (int) (cUnit->method->insns + mir->offset);
+                pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+                pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
+                pcrLabel->operands[0] = dPC;
+                pcrLabel->operands[1] = mir->offset;
+                /* Insert the place holder to the growable list */
+                dvmInsertGrowableList(&cUnit->pcReconstructionList,
+                                      (intptr_t) pcrLabel);
+            }
+
+            /* return through ra+8 - punt to the interpreter */
+            genUnconditionalBranch(cUnit, pcrLabel);
+
+            /*
+             * return through ra+16 - fully resolve the callee method.
+             * r_A1 <- count
+             * r_A2 <- &predictedChainCell
+             * r_A3 <- this->class
+             * r4 <- dPC
+             * r_S4 <- this->class->vtable
+             */
+
+            /* Save count, &predictedChainCell, and class to high regs first */
+            genRegCopy(cUnit, r_S5, r_A1);
+            genRegCopy(cUnit, r_S6, r_A2);
+            genRegCopy(cUnit, r_S7, r_A3);
+
+            /* r_A0 now contains this->clazz */
+            genRegCopy(cUnit, r_A0, r_A3);
+
+            /* r_A1 = BBBB */
+            loadConstant(cUnit, r_A1, dInsn->vB);
+
+            /* r_A2 = method (caller) */
+            loadConstant(cUnit, r_A2, (int) cUnit->method);
+
+            /* r_A3 = pDvmDex */
+            loadConstant(cUnit, r_A3, (int) cUnit->method->clazz->pDvmDex);
+
+            LOAD_FUNC_ADDR(cUnit, r_T9,
+                           (intptr_t) dvmFindInterfaceMethodInCache);
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            /* r_V0 = calleeMethod (returned from dvmFindInterfaceMethodInCache */
+            genRegCopy(cUnit, r_A0, r_V0);
+
+            dvmCompilerClobberCallRegs(cUnit);
+            /* generate a branch over if the interface method is resolved */
+            MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+            /*
+             * calleeMethod == NULL -> throw
+             */
+            loadConstant(cUnit, r_A0,
+                         (int) (cUnit->method->insns + mir->offset));
+            genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+            /* noreturn */
+
+            MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+            target->defMask = ENCODE_ALL;
+            branchOver->generic.target = (LIR *) target;
+
+            genRegCopy(cUnit, r_A1, r_S5);
+
+            /* Check if rechain limit is reached */
+            MipsLIR *bypassRechaining = opCompareBranch(cUnit, kMipsBgtz, r_S5, -1);
+
+            LOAD_FUNC_ADDR(cUnit, r_T9, (int) dvmJitToPatchPredictedChain);
+
+            genRegCopy(cUnit, r_A1, rSELF);
+            genRegCopy(cUnit, r_A2, r_S6);
+            genRegCopy(cUnit, r_A3, r_S7);
+
+            /*
+             * r_A0 = calleeMethod
+             * r_A2 = &predictedChainingCell
+             * r_A3 = class
+             *
+             * &returnChainingCell has been loaded into r_A1 but is not needed
+             * when patching the chaining cell and will be clobbered upon
+             * returning so it will be reconstructed again.
+             */
+            opReg(cUnit, kOpBlx, r_T9);
+            newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+            genRegCopy(cUnit, r_A0, r_V0);
+
+            /* r_A1 = &retChainingCell */
+            addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
+            addrRetChain->generic.target = (LIR *) retChainingCell;
+            bypassRechaining->generic.target = (LIR *) addrRetChain;
+            addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
+            addrRetChain->generic.target = (LIR *) retChainingCell;
+
+
+            /*
+             * r_A0 = this, r_A1 = calleeMethod,
+             * r_A1 = &ChainingCell,
+             * r4PC = callsiteDPC,
+             */
+            genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+                TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+                TEMPLATE_INVOKE_METHOD_NO_OPT);
+
+#if defined(WITH_JIT_TUNING)
+            gDvmJit.invokePolymorphic++;
+#endif
+            /* Handle exceptions using the interpreter */
+            genTrap(cUnit, mir->offset, pcrLabel);
+            break;
+        }
+        case OP_INVOKE_OBJECT_INIT_JUMBO:
+        case OP_INVOKE_OBJECT_INIT_RANGE:
+        case OP_FILLED_NEW_ARRAY:
+        case OP_FILLED_NEW_ARRAY_RANGE:
+        case OP_FILLED_NEW_ARRAY_JUMBO: {
+            /* Just let the interpreter deal with these */
+            genInterpSingleStep(cUnit, mir);
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
+                               BasicBlock *bb, MipsLIR *labelList)
+{
+    MipsLIR *pcrLabel = NULL;
+
+    /* An invoke with the MIR_INLINED is effectively a no-op */
+    if (mir->OptimizationFlags & MIR_INLINED)
+        return false;
+
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    switch (mir->dalvikInsn.opcode) {
+        /* calleeMethod = this->clazz->vtable[BBBB] */
+        case OP_INVOKE_VIRTUAL_QUICK_RANGE:
+        case OP_INVOKE_VIRTUAL_QUICK: {
+            int methodIndex = dInsn->vB;
+            MipsLIR *retChainingCell = &labelList[bb->fallThrough->id];
+            MipsLIR *predChainingCell = &labelList[bb->taken->id];
+
+            /*
+             * If the invoke has non-null misPredBranchOver, we need to generate
+             * the non-inlined version of the invoke here to handle the
+             * mispredicted case.
+             */
+            if (mir->meta.callsiteInfo->misPredBranchOver) {
+                genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
+            }
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL_QUICK)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+                const Method *calleeMethod = mir->meta.callsiteInfo->method;
+                void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+                assert(calleeAddr);
+                genInvokeVirtualWholeMethod(cUnit, mir, calleeAddr,
+                                            retChainingCell);
+            }
+
+            genInvokeVirtualCommon(cUnit, mir, methodIndex,
+                                   retChainingCell,
+                                   predChainingCell,
+                                   pcrLabel);
+            break;
+        }
+        /* calleeMethod = method->clazz->super->vtable[BBBB] */
+        case OP_INVOKE_SUPER_QUICK:
+        case OP_INVOKE_SUPER_QUICK_RANGE: {
+            /* Grab the method ptr directly from what the interpreter sees */
+            const Method *calleeMethod = mir->meta.callsiteInfo->method;
+            assert(calleeMethod ==
+                   cUnit->method->clazz->super->vtable[dInsn->vB]);
+
+            if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER_QUICK)
+                genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
+            else
+                genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+            /* r_A0 = calleeMethod */
+            loadConstant(cUnit, r_A0, (int) calleeMethod);
+
+            genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
+                                     calleeMethod);
+            break;
+        }
+        default:
+            return true;
+    }
+    return false;
+}
+
+/*
+ * This operation is complex enough that we'll do it partly inline
+ * and partly with a handler.  NOTE: the handler uses hardcoded
+ * values for string object offsets and must be revisitied if the
+ * layout changes.
+ */
+static bool genInlinedCompareTo(CompilationUnit *cUnit, MIR *mir)
+{
+#if defined(USE_GLOBAL_STRING_DEFS)
+    return handleExecuteInlineC(cUnit, mir);
+#else
+    MipsLIR *rollback;
+    RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlComp = dvmCompilerGetSrc(cUnit, mir, 1);
+
+    loadValueDirectFixed(cUnit, rlThis, r_A0);
+    loadValueDirectFixed(cUnit, rlComp, r_A1);
+    /* Test objects for NULL */
+    rollback = genNullCheck(cUnit, rlThis.sRegLow, r_A0, mir->offset, NULL);
+    genNullCheck(cUnit, rlComp.sRegLow, r_A1, mir->offset, rollback);
+    /*
+     * TUNING: we could check for object pointer equality before invoking
+     * handler. Unclear whether the gain would be worth the added code size
+     * expansion.
+     */
+    genDispatchToHandler(cUnit, TEMPLATE_STRING_COMPARETO);
+    storeValue(cUnit, inlinedTarget(cUnit, mir, false),
+               dvmCompilerGetReturn(cUnit));
+    return false;
+#endif
+}
+
+static bool genInlinedFastIndexOf(CompilationUnit *cUnit, MIR *mir)
+{
+#if defined(USE_GLOBAL_STRING_DEFS)
+    return handleExecuteInlineC(cUnit, mir);
+#else
+    RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlChar = dvmCompilerGetSrc(cUnit, mir, 1);
+
+    loadValueDirectFixed(cUnit, rlThis, r_A0);
+    loadValueDirectFixed(cUnit, rlChar, r_A1);
+
+    RegLocation rlStart = dvmCompilerGetSrc(cUnit, mir, 2);
+    loadValueDirectFixed(cUnit, rlStart, r_A2);
+
+    /* Test objects for NULL */
+    genNullCheck(cUnit, rlThis.sRegLow, r_A0, mir->offset, NULL);
+    genDispatchToHandler(cUnit, TEMPLATE_STRING_INDEXOF);
+    storeValue(cUnit, inlinedTarget(cUnit, mir, false),
+               dvmCompilerGetReturn(cUnit));
+    return false;
+#endif
+}
+
+// Generates an inlined String.isEmpty or String.length.
+static bool genInlinedStringIsEmptyOrLength(CompilationUnit *cUnit, MIR *mir,
+                                            bool isEmpty)
+{
+    // dst = src.length();
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlDest = inlinedTarget(cUnit, mir, false);
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset, NULL);
+    loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_count,
+                 rlResult.lowReg);
+    if (isEmpty) {
+        // dst = (dst == 0);
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
+        opRegRegImm(cUnit, kOpXor, rlResult.lowReg, tReg, 1);
+    }
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genInlinedStringLength(CompilationUnit *cUnit, MIR *mir)
+{
+    return genInlinedStringIsEmptyOrLength(cUnit, mir, false);
+}
+
+static bool genInlinedStringIsEmpty(CompilationUnit *cUnit, MIR *mir)
+{
+    return genInlinedStringIsEmptyOrLength(cUnit, mir, true);
+}
+
+static bool genInlinedStringCharAt(CompilationUnit *cUnit, MIR *mir)
+{
+    int contents = OFFSETOF_MEMBER(ArrayObject, contents);
+    RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlIdx = dvmCompilerGetSrc(cUnit, mir, 1);
+    RegLocation rlDest = inlinedTarget(cUnit, mir, false);
+    RegLocation rlResult;
+    rlObj = loadValue(cUnit, rlObj, kCoreReg);
+    rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
+    int regMax = dvmCompilerAllocTemp(cUnit);
+    int regOff = dvmCompilerAllocTemp(cUnit);
+    int regPtr = dvmCompilerAllocTemp(cUnit);
+    MipsLIR *pcrLabel = genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg,
+                                    mir->offset, NULL);
+    loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_count, regMax);
+    loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_offset, regOff);
+    loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_value, regPtr);
+    genBoundsCheck(cUnit, rlIdx.lowReg, regMax, mir->offset, pcrLabel);
+    dvmCompilerFreeTemp(cUnit, regMax);
+    opRegImm(cUnit, kOpAdd, regPtr, contents);
+    opRegReg(cUnit, kOpAdd, regOff, rlIdx.lowReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    loadBaseIndexed(cUnit, regPtr, regOff, rlResult.lowReg, 1, kUnsignedHalf);
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genInlinedAbsInt(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+    RegLocation rlDest = inlinedTarget(cUnit, mir, false);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    int signReg = dvmCompilerAllocTemp(cUnit);
+    /*
+     * abs(x) = y<=x>>31, (x+y)^y.
+     * Thumb2's IT block also yields 3 instructions, but imposes
+     * scheduling constraints.
+     */
+    opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.lowReg, 31);
+    opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
+    opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genInlinedAbsLong(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    RegLocation rlDest = inlinedTargetWide(cUnit, mir, false);
+    rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    int signReg = dvmCompilerAllocTemp(cUnit);
+    int tReg = dvmCompilerAllocTemp(cUnit);
+    /*
+     * abs(x) = y<=x>>31, (x+y)^y.
+     * Thumb2 IT block allows slightly shorter sequence,
+     * but introduces a scheduling barrier.  Stick with this
+     * mechanism for now.
+     */
+    opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.highReg, 31);
+    opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
+    newLIR3(cUnit, kMipsSltu, tReg, rlResult.lowReg, signReg);
+    opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, signReg);
+    opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
+    opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
+    opRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
+    dvmCompilerFreeTemp(cUnit, signReg);
+    dvmCompilerFreeTemp(cUnit, tReg);
+    storeValueWide(cUnit, rlDest, rlResult);
+    return false;
+}
+
+static bool genInlinedIntFloatConversion(CompilationUnit *cUnit, MIR *mir)
+{
+    // Just move from source to destination...
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlDest = inlinedTarget(cUnit, mir, false);
+    storeValue(cUnit, rlDest, rlSrc);
+    return false;
+}
+
+static bool genInlinedLongDoubleConversion(CompilationUnit *cUnit, MIR *mir)
+{
+    // Just move from source to destination...
+    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    RegLocation rlDest = inlinedTargetWide(cUnit, mir, false);
+    storeValueWide(cUnit, rlDest, rlSrc);
+    return false;
+}
+/*
+ * JITs a call to a C function.
+ * TODO: use this for faster native method invocation for simple native
+ * methods (http://b/3069458).
+ */
+static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
+{
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    int operation = dInsn->vB;
+    unsigned int i;
+    const InlineOperation* inLineTable = dvmGetInlineOpsTable();
+    uintptr_t fn = (int) inLineTable[operation].func;
+    if (fn == 0) {
+        dvmCompilerAbort(cUnit);
+    }
+    dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
+    dvmCompilerClobberCallRegs(cUnit);
+    dvmCompilerClobber(cUnit, r4PC);
+    dvmCompilerClobber(cUnit, rINST);
+    int offset = offsetof(Thread, interpSave.retval);
+    opRegRegImm(cUnit, kOpAdd, r4PC, rSELF, offset);
+    newLIR3(cUnit, kMipsSw, r4PC, 16, r_SP); /* sp has plenty of space */
+    genExportPC(cUnit, mir);
+    assert(dInsn->vA <= 4);
+    for (i=0; i < dInsn->vA; i++) {
+        loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i+r_A0);
+    }
+    LOAD_FUNC_ADDR(cUnit, r_T9, fn);
+    opReg(cUnit, kOpBlx, r_T9);
+    newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
+    /* NULL? */
+    MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
+    loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
+    genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+    MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
+    target->defMask = ENCODE_ALL;
+    branchOver->generic.target = (LIR *) target;
+    return false;
+}
+
+/*
+ * NOTE: Handles both range and non-range versions (arguments
+ * have already been normalized by this point).
+ */
+static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
+{
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    assert(dInsn->opcode == OP_EXECUTE_INLINE_RANGE ||
+           dInsn->opcode == OP_EXECUTE_INLINE);
+    switch (dInsn->vB) {
+        case INLINE_EMPTYINLINEMETHOD:
+            return false;  /* Nop */
+
+        /* These ones we potentially JIT inline. */
+        case INLINE_STRING_LENGTH:
+            return genInlinedStringLength(cUnit, mir);
+        case INLINE_STRING_IS_EMPTY:
+            return genInlinedStringIsEmpty(cUnit, mir);
+        case INLINE_MATH_ABS_INT:
+            return genInlinedAbsInt(cUnit, mir);
+        case INLINE_MATH_ABS_LONG:
+            return genInlinedAbsLong(cUnit, mir);
+        case INLINE_MATH_MIN_INT:
+            return genInlinedMinMaxInt(cUnit, mir, true);
+        case INLINE_MATH_MAX_INT:
+            return genInlinedMinMaxInt(cUnit, mir, false);
+        case INLINE_STRING_CHARAT:
+            return genInlinedStringCharAt(cUnit, mir);
+        case INLINE_MATH_SQRT:
+            return genInlineSqrt(cUnit, mir);
+        case INLINE_MATH_ABS_FLOAT:
+            return genInlinedAbsFloat(cUnit, mir);
+        case INLINE_MATH_ABS_DOUBLE:
+            return genInlinedAbsDouble(cUnit, mir);
+        case INLINE_STRING_COMPARETO:
+            return genInlinedCompareTo(cUnit, mir);
+        case INLINE_STRING_FASTINDEXOF_II:
+            return genInlinedFastIndexOf(cUnit, mir);
+        case INLINE_FLOAT_TO_RAW_INT_BITS:
+        case INLINE_INT_BITS_TO_FLOAT:
+            return genInlinedIntFloatConversion(cUnit, mir);
+        case INLINE_DOUBLE_TO_RAW_LONG_BITS:
+        case INLINE_LONG_BITS_TO_DOUBLE:
+            return genInlinedLongDoubleConversion(cUnit, mir);
+
+        /*
+         * These ones we just JIT a call to a C function for.
+         * TODO: special-case these in the other "invoke" call paths.
+         */
+        case INLINE_STRING_EQUALS:
+        case INLINE_MATH_COS:
+        case INLINE_MATH_SIN:
+        case INLINE_FLOAT_TO_INT_BITS:
+        case INLINE_DOUBLE_TO_LONG_BITS:
+            return handleExecuteInlineC(cUnit, mir);
+    }
+    dvmCompilerAbort(cUnit);
+    return false; // Not reachable; keeps compiler happy.
+}
+
+static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
+{
+    //TUNING: We're using core regs here - not optimal when target is a double
+    RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    loadConstantNoClobber(cUnit, rlResult.lowReg,
+                          mir->dalvikInsn.vB_wide & 0xFFFFFFFFUL);
+    loadConstantNoClobber(cUnit, rlResult.highReg,
+                          (mir->dalvikInsn.vB_wide>>32) & 0xFFFFFFFFUL);
+    storeValueWide(cUnit, rlDest, rlResult);
+    return false;
+}
+
+/*
+ * The following are special processing routines that handle transfer of
+ * controls between compiled code and the interpreter. Certain VM states like
+ * Dalvik PC and special-purpose registers are reconstructed here.
+ */
+
+/* Chaining cell for code that may need warmup. */
+static void handleNormalChainingCell(CompilationUnit *cUnit,
+                                     unsigned int offset)
+{
+    newLIR3(cUnit, kMipsLw, r_A0,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal),
+        rSELF);
+    newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
+}
+
+/*
+ * Chaining cell for instructions that immediately following already translated
+ * code.
+ */
+static void handleHotChainingCell(CompilationUnit *cUnit,
+                                  unsigned int offset)
+{
+    newLIR3(cUnit, kMipsLw, r_A0,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpTraceSelect),
+        rSELF);
+    newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
+}
+
+/* Chaining cell for branches that branch back into the same basic block */
+static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
+                                             unsigned int offset)
+{
+    /*
+     * Use raw instruction constructors to guarantee that the generated
+     * instructions fit the predefined cell size.
+     */
+#if defined(WITH_SELF_VERIFICATION)
+    newLIR3(cUnit, kMipsLw, r_A0,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpBackwardBranch),
+        rSELF);
+#else
+    newLIR3(cUnit, kMipsLw, r_A0,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal),
+        rSELF);
+#endif
+    newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
+    addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
+}
+
+/* Chaining cell for monomorphic method invocations. */
+static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
+                                              const Method *callee)
+{
+    newLIR3(cUnit, kMipsLw, r_A0,
+        offsetof(Thread, jitToInterpEntries.dvmJitToInterpTraceSelect),
+        rSELF);
+    newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
+    addWordData(cUnit, NULL, (int) (callee->insns));
+}
+
+/* Chaining cell for monomorphic method invocations. */
+static void handleInvokePredictedChainingCell(CompilationUnit *cUnit)
+{
+    /* Should not be executed in the initial state */
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_BX_PAIR_INIT);
+    /* branch delay slot nop */
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_DELAY_SLOT_INIT);
+    /* To be filled: class */
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_CLAZZ_INIT);
+    /* To be filled: method */
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_METHOD_INIT);
+    /*
+     * Rechain count. The initial value of 0 here will trigger chaining upon
+     * the first invocation of this callsite.
+     */
+    addWordData(cUnit, NULL, PREDICTED_CHAIN_COUNTER_INIT);
+}
+
+/* Load the Dalvik PC into a0 and jump to the specified target */
+static void handlePCReconstruction(CompilationUnit *cUnit,
+                                   MipsLIR *targetLabel)
+{
+    MipsLIR **pcrLabel =
+        (MipsLIR **) cUnit->pcReconstructionList.elemList;
+    int numElems = cUnit->pcReconstructionList.numUsed;
+    int i;
+
+    /*
+     * We should never reach here through fall-through code, so insert
+     * a bomb to signal troubles immediately.
+     */
+    if (numElems) {
+        newLIR0(cUnit, kMipsUndefined);
+    }
+
+    for (i = 0; i < numElems; i++) {
+        dvmCompilerAppendLIR(cUnit, (LIR *) pcrLabel[i]);
+        /* a0 = dalvik PC */
+        loadConstant(cUnit, r_A0, pcrLabel[i]->operands[0]);
+        genUnconditionalBranch(cUnit, targetLabel);
+    }
+}
+
+static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
+    "kMirOpPhi",
+    "kMirOpNullNRangeUpCheck",
+    "kMirOpNullNRangeDownCheck",
+    "kMirOpLowerBound",
+    "kMirOpPunt",
+    "kMirOpCheckInlinePrediction",
+};
+
+/*
+ * vA = arrayReg;
+ * vB = idxReg;
+ * vC = endConditionReg;
+ * arg[0] = maxC
+ * arg[1] = minC
+ * arg[2] = loopBranchConditionCode
+ */
+static void genHoistedChecksForCountUpLoop(CompilationUnit *cUnit, MIR *mir)
+{
+    /*
+     * NOTE: these synthesized blocks don't have ssa names assigned
+     * for Dalvik registers.  However, because they dominate the following
+     * blocks we can simply use the Dalvik name w/ subscript 0 as the
+     * ssa name.
+     */
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    const int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+    const int maxC = dInsn->arg[0];
+    int regLength;
+    RegLocation rlArray = cUnit->regLocation[mir->dalvikInsn.vA];
+    RegLocation rlIdxEnd = cUnit->regLocation[mir->dalvikInsn.vC];
+
+    /* regArray <- arrayRef */
+    rlArray = loadValue(cUnit, rlArray, kCoreReg);
+    rlIdxEnd = loadValue(cUnit, rlIdxEnd, kCoreReg);
+    genRegImmCheck(cUnit, kMipsCondEq, rlArray.lowReg, 0, 0,
+                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+
+    /* regLength <- len(arrayRef) */
+    regLength = dvmCompilerAllocTemp(cUnit);
+    loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLength);
+
+    int delta = maxC;
+    /*
+     * If the loop end condition is ">=" instead of ">", then the largest value
+     * of the index is "endCondition - 1".
+     */
+    if (dInsn->arg[2] == OP_IF_GE) {
+        delta--;
+    }
+
+    if (delta) {
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        opRegRegImm(cUnit, kOpAdd, tReg, rlIdxEnd.lowReg, delta);
+        rlIdxEnd.lowReg = tReg;
+        dvmCompilerFreeTemp(cUnit, tReg);
+    }
+    /* Punt if "regIdxEnd < len(Array)" is false */
+    genRegRegCheck(cUnit, kMipsCondGe, rlIdxEnd.lowReg, regLength, 0,
+                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+}
+
+/*
+ * vA = arrayReg;
+ * vB = idxReg;
+ * vC = endConditionReg;
+ * arg[0] = maxC
+ * arg[1] = minC
+ * arg[2] = loopBranchConditionCode
+ */
+static void genHoistedChecksForCountDownLoop(CompilationUnit *cUnit, MIR *mir)
+{
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    const int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
+    const int regLength = dvmCompilerAllocTemp(cUnit);
+    const int maxC = dInsn->arg[0];
+    RegLocation rlArray = cUnit->regLocation[mir->dalvikInsn.vA];
+    RegLocation rlIdxInit = cUnit->regLocation[mir->dalvikInsn.vB];
+
+    /* regArray <- arrayRef */
+    rlArray = loadValue(cUnit, rlArray, kCoreReg);
+    rlIdxInit = loadValue(cUnit, rlIdxInit, kCoreReg);
+    genRegImmCheck(cUnit, kMipsCondEq, rlArray.lowReg, 0, 0,
+                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+
+    /* regLength <- len(arrayRef) */
+    loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLength);
+
+    if (maxC) {
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        opRegRegImm(cUnit, kOpAdd, tReg, rlIdxInit.lowReg, maxC);
+        rlIdxInit.lowReg = tReg;
+        dvmCompilerFreeTemp(cUnit, tReg);
+    }
+
+    /* Punt if "regIdxInit < len(Array)" is false */
+    genRegRegCheck(cUnit, kMipsCondGe, rlIdxInit.lowReg, regLength, 0,
+                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+}
+
+/*
+ * vA = idxReg;
+ * vB = minC;
+ */
+static void genHoistedLowerBoundCheck(CompilationUnit *cUnit, MIR *mir)
+{
+    DecodedInstruction *dInsn = &mir->dalvikInsn;
+    const int minC = dInsn->vB;
+    RegLocation rlIdx = cUnit->regLocation[mir->dalvikInsn.vA];
+
+    /* regIdx <- initial index value */
+    rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
+
+    /* Punt if "regIdxInit + minC >= 0" is false */
+    genRegImmCheck(cUnit, kMipsCondLt, rlIdx.lowReg, -minC, 0,
+                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+}
+
+/*
+ * vC = this
+ *
+ * A predicted inlining target looks like the following, where instructions
+ * between 0x2f130d24 and 0x2f130d40 are checking if the predicted class
+ * matches "this", and the verificaion code is generated by this routine.
+ *
+ * (C) means the instruction is inlined from the callee, and (PI) means the
+ * instruction is the predicted inlined invoke, whose corresponding
+ * instructions are still generated to handle the mispredicted case.
+ *
+ * D/dalvikvm( 2377): -------- kMirOpCheckInlinePrediction
+ * D/dalvikvm( 2377): 0x2f130d24 (0020):  lw       v0,16(s1)
+ * D/dalvikvm( 2377): 0x2f130d28 (0024):  lui      v1,0x0011(17)
+ * D/dalvikvm( 2377): 0x2f130d2c (0028):  ori      v1,v1,0x11e418(1172504)
+ * D/dalvikvm( 2377): 0x2f130d30 (002c):  beqz     v0,0x2f130df0 (L0x11f1f0)
+ * D/dalvikvm( 2377): 0x2f130d34 (0030):  pref     0,0(v0)
+ * D/dalvikvm( 2377): 0x2f130d38 (0034):  lw       a0,0(v0)
+ * D/dalvikvm( 2377): 0x2f130d3c (0038):  bne      v1,a0,0x2f130d54 (L0x11f518)
+ * D/dalvikvm( 2377): 0x2f130d40 (003c):  pref     0,8(v0)
+ * D/dalvikvm( 2377): -------- dalvik offset: 0x000a @ +iget-object-quick (C) v3, v4, (#8)
+ * D/dalvikvm( 2377): 0x2f130d44 (0040):  lw       a1,8(v0)
+ * D/dalvikvm( 2377): -------- dalvik offset: 0x000a @ +invoke-virtual-quick (PI) v4
+ * D/dalvikvm( 2377): 0x2f130d48 (0044):  sw       a1,12(s1)
+ * D/dalvikvm( 2377): 0x2f130d4c (0048):  b        0x2f130e18 (L0x120150)
+ * D/dalvikvm( 2377): 0x2f130d50 (004c):  lw       a0,116(s2)
+ * D/dalvikvm( 2377): L0x11f518:
+ * D/dalvikvm( 2377): 0x2f130d54 (0050):  lw       a0,16(s1)
+ * D/dalvikvm( 2377): 0x2f130d58 (0054):  addiu    s4,s1,0xffffffe8(-24)
+ * D/dalvikvm( 2377): 0x2f130d5c (0058):  beqz     a0,0x2f130e00 (L0x11f618)
+ * D/dalvikvm( 2377): 0x2f130d60 (005c):  pref     1,0(s4)
+ * D/dalvikvm( 2377): -------- BARRIER
+ * D/dalvikvm( 2377): 0x2f130d64 (0060):  sw       a0,0(s4)
+ * D/dalvikvm( 2377): 0x2f130d68 (0064):  addiu    s4,s4,0x0004(4)
+ * D/dalvikvm( 2377): -------- BARRIER
+ * D/dalvikvm( 2377): 0x2f130d6c (0068):  lui      s0,0x2d22(11554)
+ * D/dalvikvm( 2377): 0x2f130d70 (006c):  ori      s0,s0,0x2d228464(757236836)
+ * D/dalvikvm( 2377): 0x2f130d74 (0070):  lahi/lui a1,0x2f13(12051)
+ * D/dalvikvm( 2377): 0x2f130d78 (0074):  lalo/ori a1,a1,0x2f130ddc(789777884)
+ * D/dalvikvm( 2377): 0x2f130d7c (0078):  lahi/lui a2,0x2f13(12051)
+ * D/dalvikvm( 2377): 0x2f130d80 (007c):  lalo/ori a2,a2,0x2f130e24(789777956)
+ * D/dalvikvm( 2377): 0x2f130d84 (0080):  jal      0x2f12d1ec(789762540)
+ * D/dalvikvm( 2377): 0x2f130d88 (0084):  nop
+ * D/dalvikvm( 2377): 0x2f130d8c (0088):  b        0x2f130e24 (L0x11ed6c)
+ * D/dalvikvm( 2377): 0x2f130d90 (008c):  nop
+ * D/dalvikvm( 2377): 0x2f130d94 (0090):  b        0x2f130e04 (L0x11ffd0)
+ * D/dalvikvm( 2377): 0x2f130d98 (0094):  lui      a0,0x2d22(11554)
+ * D/dalvikvm( 2377): 0x2f130d9c (0098):  lw       a0,44(s4)
+ * D/dalvikvm( 2377): 0x2f130da0 (009c):  bgtz     a1,0x2f130dc4 (L0x11fb98)
+ * D/dalvikvm( 2377): 0x2f130da4 (00a0):  nop
+ * D/dalvikvm( 2377): 0x2f130da8 (00a4):  lui      t9,0x2aba(10938)
+ * D/dalvikvm( 2377): 0x2f130dac (00a8):  ori      t9,t9,0x2abae3f8(716891128)
+ * D/dalvikvm( 2377): 0x2f130db0 (00ac):  move     a1,s2
+ * D/dalvikvm( 2377): 0x2f130db4 (00b0):  jalr     ra,t9
+ * D/dalvikvm( 2377): 0x2f130db8 (00b4):  nop
+ * D/dalvikvm( 2377): 0x2f130dbc (00b8):  lw       gp,84(sp)
+ * D/dalvikvm( 2377): 0x2f130dc0 (00bc):  move     a0,v0
+ * D/dalvikvm( 2377): 0x2f130dc4 (00c0):  lahi/lui a1,0x2f13(12051)
+ * D/dalvikvm( 2377): 0x2f130dc8 (00c4):  lalo/ori a1,a1,0x2f130ddc(789777884)
+ * D/dalvikvm( 2377): 0x2f130dcc (00c8):  jal      0x2f12d0c4(789762244)
+ * D/dalvikvm( 2377): 0x2f130dd0 (00cc):  nop
+ * D/dalvikvm( 2377): 0x2f130dd4 (00d0):  b        0x2f130e04 (L0x11ffd0)
+ * D/dalvikvm( 2377): 0x2f130dd8 (00d4):  lui      a0,0x2d22(11554)
+ * D/dalvikvm( 2377): 0x2f130ddc (00d8): .align4
+ * D/dalvikvm( 2377): L0x11ed2c:
+ * D/dalvikvm( 2377): -------- dalvik offset: 0x000d @ move-result-object (PI) v3, (#0), (#0)
+ * D/dalvikvm( 2377): 0x2f130ddc (00d8):  lw       a2,16(s2)
+ * D/dalvikvm( 2377): 0x2f130de0 (00dc):  sw       a2,12(s1)
+ * D/dalvikvm( 2377): 0x2f130de4 (00e0):  b        0x2f130e18 (L0x120150)
+ * D/dalvikvm( 2377): 0x2f130de8 (00e4):  lw       a0,116(s2)
+ * D/dalvikvm( 2377): 0x2f130dec (00e8):  undefined
+ * D/dalvikvm( 2377): L0x11f1f0:
+ * D/dalvikvm( 2377): -------- reconstruct dalvik PC : 0x2d228464 @ +0x000a
+ * D/dalvikvm( 2377): 0x2f130df0 (00ec):  lui      a0,0x2d22(11554)
+ * D/dalvikvm( 2377): 0x2f130df4 (00f0):  ori      a0,a0,0x2d228464(757236836)
+ * D/dalvikvm( 2377): 0x2f130df8 (00f4):  b        0x2f130e0c (L0x120090)
+ * D/dalvikvm( 2377): 0x2f130dfc (00f8):  lw       a1,108(s2)
+ * D/dalvikvm( 2377): L0x11f618:
+ * D/dalvikvm( 2377): -------- reconstruct dalvik PC : 0x2d228464 @ +0x000a
+ * D/dalvikvm( 2377): 0x2f130e00 (00fc):  lui      a0,0x2d22(11554)
+ * D/dalvikvm( 2377): 0x2f130e04 (0100):  ori      a0,a0,0x2d228464(757236836)
+ * D/dalvikvm( 2377): Exception_Handling:
+ * D/dalvikvm( 2377): 0x2f130e08 (0104):  lw       a1,108(s2)
+ * D/dalvikvm( 2377): 0x2f130e0c (0108):  jalr     ra,a1
+ * D/dalvikvm( 2377): 0x2f130e10 (010c):  nop
+ * D/dalvikvm( 2377): 0x2f130e14 (0110): .align4
+ * D/dalvikvm( 2377): L0x11edac:
+ * D/dalvikvm( 2377): -------- chaining cell (hot): 0x000e
+ * D/dalvikvm( 2377): 0x2f130e14 (0110):  lw       a0,116(s2)
+ * D/dalvikvm( 2377): 0x2f130e18 (0114):  jalr     ra,a0
+ * D/dalvikvm( 2377): 0x2f130e1c (0118):  nop
+ * D/dalvikvm( 2377): 0x2f130e20 (011c):  data     0x2d22846c(757236844)
+ * D/dalvikvm( 2377): 0x2f130e24 (0120): .align4
+ * D/dalvikvm( 2377): L0x11ed6c:
+ * D/dalvikvm( 2377): -------- chaining cell (predicted)
+ * D/dalvikvm( 2377): 0x2f130e24 (0120):  data     0xe7fe(59390)
+ * D/dalvikvm( 2377): 0x2f130e28 (0124):  data     0x0000(0)
+ * D/dalvikvm( 2377): 0x2f130e2c (0128):  data     0x0000(0)
+ * D/dalvikvm( 2377): 0x2f130e30 (012c):  data     0x0000(0)
+ * D/dalvikvm( 2377): 0x2f130e34 (0130):  data     0x0000(0)
+ */
+static void genValidationForPredictedInline(CompilationUnit *cUnit, MIR *mir)
+{
+    CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
+    RegLocation rlThis = cUnit->regLocation[mir->dalvikInsn.vC];
+
+    rlThis = loadValue(cUnit, rlThis, kCoreReg);
+    int regPredictedClass = dvmCompilerAllocTemp(cUnit);
+    loadClassPointer(cUnit, regPredictedClass, (int) callsiteInfo);
+    genNullCheck(cUnit, rlThis.sRegLow, rlThis.lowReg, mir->offset,
+                 NULL);/* null object? */
+    int regActualClass = dvmCompilerAllocTemp(cUnit);
+    loadWordDisp(cUnit, rlThis.lowReg, offsetof(Object, clazz), regActualClass);
+//    opRegReg(cUnit, kOpCmp, regPredictedClass, regActualClass);
+    /*
+     * Set the misPredBranchOver target so that it will be generated when the
+     * code for the non-optimized invoke is generated.
+     */
+    callsiteInfo->misPredBranchOver = (LIR *) opCompareBranch(cUnit, kMipsBne, regPredictedClass, regActualClass);
+}
+
+/* Extended MIR instructions like PHI */
+static void handleExtendedMIR(CompilationUnit *cUnit, MIR *mir)
+{
+    int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
+    char *msg = (char *)dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
+                                       false);
+    strcpy(msg, extendedMIROpNames[opOffset]);
+    newLIR1(cUnit, kMipsPseudoExtended, (int) msg);
+
+    switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
+        case kMirOpPhi: {
+            char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
+            newLIR1(cUnit, kMipsPseudoSSARep, (int) ssaString);
+            break;
+        }
+        case kMirOpNullNRangeUpCheck: {
+            genHoistedChecksForCountUpLoop(cUnit, mir);
+            break;
+        }
+        case kMirOpNullNRangeDownCheck: {
+            genHoistedChecksForCountDownLoop(cUnit, mir);
+            break;
+        }
+        case kMirOpLowerBound: {
+            genHoistedLowerBoundCheck(cUnit, mir);
+            break;
+        }
+        case kMirOpPunt: {
+            genUnconditionalBranch(cUnit,
+                                   (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
+            break;
+        }
+        case kMirOpCheckInlinePrediction: {
+            genValidationForPredictedInline(cUnit, mir);
+            break;
+        }
+        default:
+            break;
+    }
+}
+
+/*
+ * Create a PC-reconstruction cell for the starting offset of this trace.
+ * Since the PCR cell is placed near the end of the compiled code which is
+ * usually out of range for a conditional branch, we put two branches (one
+ * branch over to the loop body and one layover branch to the actual PCR) at the
+ * end of the entry block.
+ */
+static void setupLoopEntryBlock(CompilationUnit *cUnit, BasicBlock *entry,
+                                MipsLIR *bodyLabel)
+{
+    /* Set up the place holder to reconstruct this Dalvik PC */
+    MipsLIR *pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
+    pcrLabel->operands[0] =
+        (int) (cUnit->method->insns + entry->startOffset);
+    pcrLabel->operands[1] = entry->startOffset;
+    /* Insert the place holder to the growable list */
+    dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
+
+    /*
+     * Next, create two branches - one branch over to the loop body and the
+     * other branch to the PCR cell to punt.
+     */
+    MipsLIR *branchToBody = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    branchToBody->opcode = kMipsB;
+    branchToBody->generic.target = (LIR *) bodyLabel;
+    setupResourceMasks(branchToBody);
+    cUnit->loopAnalysis->branchToBody = (LIR *) branchToBody;
+
+    MipsLIR *branchToPCR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    branchToPCR->opcode = kMipsB;
+    branchToPCR->generic.target = (LIR *) pcrLabel;
+    setupResourceMasks(branchToPCR);
+    cUnit->loopAnalysis->branchToPCR = (LIR *) branchToPCR;
+}
+
+#if defined(WITH_SELF_VERIFICATION)
+static bool selfVerificationPuntOps(MIR *mir)
+{
+assert(0); /* MIPSTODO port selfVerificationPuntOps() */
+    DecodedInstruction *decInsn = &mir->dalvikInsn;
+
+    /*
+     * All opcodes that can throw exceptions and use the
+     * TEMPLATE_THROW_EXCEPTION_COMMON template should be excluded in the trace
+     * under self-verification mode.
+     */
+    switch (decInsn->opcode) {
+        case OP_MONITOR_ENTER:
+        case OP_MONITOR_EXIT:
+        case OP_NEW_INSTANCE:
+        case OP_NEW_INSTANCE_JUMBO:
+        case OP_NEW_ARRAY:
+        case OP_NEW_ARRAY_JUMBO:
+        case OP_CHECK_CAST:
+        case OP_CHECK_CAST_JUMBO:
+        case OP_MOVE_EXCEPTION:
+        case OP_FILL_ARRAY_DATA:
+        case OP_EXECUTE_INLINE:
+        case OP_EXECUTE_INLINE_RANGE:
+            return true;
+        default:
+            return false;
+    }
+}
+#endif
+
+void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
+{
+    /* Used to hold the labels of each block */
+    MipsLIR *labelList =
+        (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR) * cUnit->numBlocks, true);
+    MipsLIR *headLIR = NULL;
+    GrowableList chainingListByType[kChainingCellGap];
+    int i;
+
+    /*
+     * Initialize various types chaining lists.
+     */
+    for (i = 0; i < kChainingCellGap; i++) {
+        dvmInitGrowableList(&chainingListByType[i], 2);
+    }
+
+    /* Clear the visited flag for each block */
+    dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerClearVisitedFlag,
+                                          kAllNodes, false /* isIterative */);
+
+    GrowableListIterator iterator;
+    dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
+
+    /* Traces start with a profiling entry point.  Generate it here */
+    cUnit->profileCodeSize = genTraceProfileEntry(cUnit);
+
+    /* Handle the content in each basic block */
+    for (i = 0; ; i++) {
+        MIR *mir;
+        BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+        if (bb == NULL) break;
+        if (bb->visited == true) continue;
+
+        labelList[i].operands[0] = bb->startOffset;
+
+        if (bb->blockType >= kChainingCellGap) {
+            if (bb->isFallThroughFromInvoke == true) {
+                /* Align this block first since it is a return chaining cell */
+                newLIR0(cUnit, kMipsPseudoPseudoAlign4);
+            }
+            /*
+             * Append the label pseudo LIR first. Chaining cells will be handled
+             * separately afterwards.
+             */
+            dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]);
+        }
+
+        if (bb->blockType == kEntryBlock) {
+            labelList[i].opcode = kMipsPseudoEntryBlock;
+            if (bb->firstMIRInsn == NULL) {
+                continue;
+            } else {
+              setupLoopEntryBlock(cUnit, bb,
+                                  &labelList[bb->fallThrough->id]);
+            }
+        } else if (bb->blockType == kExitBlock) {
+            labelList[i].opcode = kMipsPseudoExitBlock;
+            goto gen_fallthrough;
+        } else if (bb->blockType == kDalvikByteCode) {
+            if (bb->hidden == true) continue;
+            labelList[i].opcode = kMipsPseudoNormalBlockLabel;
+            /* Reset the register state */
+            dvmCompilerResetRegPool(cUnit);
+            dvmCompilerClobberAllRegs(cUnit);
+            dvmCompilerResetNullCheck(cUnit);
+        } else {
+            switch (bb->blockType) {
+                case kChainingCellNormal:
+                    labelList[i].opcode = kMipsPseudoChainingCellNormal;
+                    /* handle the codegen later */
+                    dvmInsertGrowableList(
+                        &chainingListByType[kChainingCellNormal], i);
+                    break;
+                case kChainingCellInvokeSingleton:
+                    labelList[i].opcode =
+                        kMipsPseudoChainingCellInvokeSingleton;
+                    labelList[i].operands[0] =
+                        (int) bb->containingMethod;
+                    /* handle the codegen later */
+                    dvmInsertGrowableList(
+                        &chainingListByType[kChainingCellInvokeSingleton], i);
+                    break;
+                case kChainingCellInvokePredicted:
+                    labelList[i].opcode =
+                        kMipsPseudoChainingCellInvokePredicted;
+                    /*
+                     * Move the cached method pointer from operand 1 to 0.
+                     * Operand 0 was clobbered earlier in this routine to store
+                     * the block starting offset, which is not applicable to
+                     * predicted chaining cell.
+                     */
+                    labelList[i].operands[0] = labelList[i].operands[1];
+                    /* handle the codegen later */
+                    dvmInsertGrowableList(
+                        &chainingListByType[kChainingCellInvokePredicted], i);
+                    break;
+                case kChainingCellHot:
+                    labelList[i].opcode =
+                        kMipsPseudoChainingCellHot;
+                    /* handle the codegen later */
+                    dvmInsertGrowableList(
+                        &chainingListByType[kChainingCellHot], i);
+                    break;
+                case kPCReconstruction:
+                    /* Make sure exception handling block is next */
+                    labelList[i].opcode =
+                        kMipsPseudoPCReconstructionBlockLabel;
+                    handlePCReconstruction(cUnit,
+                                           &labelList[cUnit->puntBlock->id]);
+                    break;
+                case kExceptionHandling:
+                    labelList[i].opcode = kMipsPseudoEHBlockLabel;
+                    if (cUnit->pcReconstructionList.numUsed) {
+                        loadWordDisp(cUnit, rSELF, offsetof(Thread,
+                                     jitToInterpEntries.dvmJitToInterpPunt),
+                                     r_A1);
+                        opReg(cUnit, kOpBlx, r_A1);
+                    }
+                    break;
+                case kChainingCellBackwardBranch:
+                    labelList[i].opcode =
+                        kMipsPseudoChainingCellBackwardBranch;
+                    /* handle the codegen later */
+                    dvmInsertGrowableList(
+                        &chainingListByType[kChainingCellBackwardBranch],
+                        i);
+                    break;
+                default:
+                    break;
+            }
+            continue;
+        }
+
+        /*
+         * Try to build a longer optimization unit. Currently if the previous
+         * block ends with a goto, we continue adding instructions and don't
+         * reset the register allocation pool.
+         */
+        for (BasicBlock *nextBB = bb; nextBB != NULL; nextBB = cUnit->nextCodegenBlock) {
+            bb = nextBB;
+            bb->visited = true;
+            cUnit->nextCodegenBlock = NULL;
+
+            for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+
+                dvmCompilerResetRegPool(cUnit);
+                if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
+                    dvmCompilerClobberAllRegs(cUnit);
+                }
+
+                if (gDvmJit.disableOpt & (1 << kSuppressLoads)) {
+                    dvmCompilerResetDefTracking(cUnit);
+                }
+
+                if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
+                    handleExtendedMIR(cUnit, mir);
+                    continue;
+                }
+
+                Opcode dalvikOpcode = mir->dalvikInsn.opcode;
+                InstructionFormat dalvikFormat =
+                    dexGetFormatFromOpcode(dalvikOpcode);
+                const char *note;
+                if (mir->OptimizationFlags & MIR_INLINED) {
+                    note = " (I)";
+                } else if (mir->OptimizationFlags & MIR_INLINED_PRED) {
+                    note = " (PI)";
+                } else if (mir->OptimizationFlags & MIR_CALLEE) {
+                    note = " (C)";
+                } else {
+                    note = NULL;
+                }
+
+                MipsLIR *boundaryLIR =
+                    newLIR2(cUnit, kMipsPseudoDalvikByteCodeBoundary,
+                            mir->offset,
+                            (int) dvmCompilerGetDalvikDisassembly(&mir->dalvikInsn,
+                                                                  note));
+                if (mir->ssaRep) {
+                    char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
+                    newLIR1(cUnit, kMipsPseudoSSARep, (int) ssaString);
+                }
+
+                /* Remember the first LIR for this block */
+                if (headLIR == NULL) {
+                    headLIR = boundaryLIR;
+                    /* Set the first boundaryLIR as a scheduling barrier */
+                    headLIR->defMask = ENCODE_ALL;
+                }
+
+                bool notHandled;
+                /*
+                 * Debugging: screen the opcode first to see if it is in the
+                 * do[-not]-compile list
+                 */
+                bool singleStepMe = SINGLE_STEP_OP(dalvikOpcode);
+#if defined(WITH_SELF_VERIFICATION)
+              if (singleStepMe == false) {
+                  singleStepMe = selfVerificationPuntOps(mir);
+              }
+#endif
+                if (singleStepMe || cUnit->allSingleStep) {
+                    notHandled = false;
+                    genInterpSingleStep(cUnit, mir);
+                } else {
+                    opcodeCoverage[dalvikOpcode]++;
+                    switch (dalvikFormat) {
+                        case kFmt10t:
+                        case kFmt20t:
+                        case kFmt30t:
+                            notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
+                                      mir, bb, labelList);
+                            break;
+                        case kFmt10x:
+                            notHandled = handleFmt10x(cUnit, mir);
+                            break;
+                        case kFmt11n:
+                        case kFmt31i:
+                            notHandled = handleFmt11n_Fmt31i(cUnit, mir);
+                            break;
+                        case kFmt11x:
+                            notHandled = handleFmt11x(cUnit, mir);
+                            break;
+                        case kFmt12x:
+                            notHandled = handleFmt12x(cUnit, mir);
+                            break;
+                        case kFmt20bc:
+                        case kFmt40sc:
+                            notHandled = handleFmt20bc_Fmt40sc(cUnit, mir);
+                            break;
+                        case kFmt21c:
+                        case kFmt31c:
+                        case kFmt41c:
+                            notHandled = handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
+                            break;
+                        case kFmt21h:
+                            notHandled = handleFmt21h(cUnit, mir);
+                            break;
+                        case kFmt21s:
+                            notHandled = handleFmt21s(cUnit, mir);
+                            break;
+                        case kFmt21t:
+                            notHandled = handleFmt21t(cUnit, mir, bb,
+                                                      labelList);
+                            break;
+                        case kFmt22b:
+                        case kFmt22s:
+                            notHandled = handleFmt22b_Fmt22s(cUnit, mir);
+                            break;
+                        case kFmt22c:
+                        case kFmt52c:
+                            notHandled = handleFmt22c_Fmt52c(cUnit, mir);
+                            break;
+                        case kFmt22cs:
+                            notHandled = handleFmt22cs(cUnit, mir);
+                            break;
+                        case kFmt22t:
+                            notHandled = handleFmt22t(cUnit, mir, bb,
+                                                      labelList);
+                            break;
+                        case kFmt22x:
+                        case kFmt32x:
+                            notHandled = handleFmt22x_Fmt32x(cUnit, mir);
+                            break;
+                        case kFmt23x:
+                            notHandled = handleFmt23x(cUnit, mir);
+                            break;
+                        case kFmt31t:
+                            notHandled = handleFmt31t(cUnit, mir);
+                            break;
+                        case kFmt3rc:
+                        case kFmt35c:
+                        case kFmt5rc:
+                            notHandled = handleFmt35c_3rc_5rc(cUnit, mir, bb,
+                                                          labelList);
+                            break;
+                        case kFmt3rms:
+                        case kFmt35ms:
+                            notHandled = handleFmt35ms_3rms(cUnit, mir,bb,
+                                                            labelList);
+                            break;
+                        case kFmt35mi:
+                        case kFmt3rmi:
+                            notHandled = handleExecuteInline(cUnit, mir);
+                            break;
+                        case kFmt51l:
+                            notHandled = handleFmt51l(cUnit, mir);
+                            break;
+                        default:
+                            notHandled = true;
+                            break;
+                    }
+                }
+                if (notHandled) {
+                    LOGE("%#06x: Opcode %#x (%s) / Fmt %d not handled",
+                         mir->offset,
+                         dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
+                         dalvikFormat);
+                    dvmCompilerAbort(cUnit);
+                    break;
+                }
+            }
+        }
+
+        if (bb->blockType == kEntryBlock) {
+            dvmCompilerAppendLIR(cUnit,
+                                 (LIR *) cUnit->loopAnalysis->branchToBody);
+            dvmCompilerAppendLIR(cUnit,
+                                 (LIR *) cUnit->loopAnalysis->branchToPCR);
+        }
+
+        if (headLIR) {
+            /*
+             * Eliminate redundant loads/stores and delay stores into later
+             * slots
+             */
+            dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR,
+                                               cUnit->lastLIRInsn);
+            /* Reset headLIR which is also the optimization boundary */
+            headLIR = NULL;
+        }
+
+gen_fallthrough:
+        /*
+         * Check if the block is terminated due to trace length constraint -
+         * insert an unconditional branch to the chaining cell.
+         */
+        if (bb->needFallThroughBranch) {
+            genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+        }
+    }
+
+    /* Handle the chaining cells in predefined order */
+    for (i = 0; i < kChainingCellGap; i++) {
+        size_t j;
+        int *blockIdList = (int *) chainingListByType[i].elemList;
+
+        cUnit->numChainingCells[i] = chainingListByType[i].numUsed;
+
+        /* No chaining cells of this type */
+        if (cUnit->numChainingCells[i] == 0)
+            continue;
+
+        /* Record the first LIR for a new type of chaining cell */
+        cUnit->firstChainingLIR[i] = (LIR *) &labelList[blockIdList[0]];
+
+        for (j = 0; j < chainingListByType[i].numUsed; j++) {
+            int blockId = blockIdList[j];
+            BasicBlock *chainingBlock =
+                (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
+                                                         blockId);
+
+            /* Align this chaining cell first */
+            newLIR0(cUnit, kMipsPseudoPseudoAlign4);
+
+            /* Insert the pseudo chaining instruction */
+            dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
+
+
+            switch (chainingBlock->blockType) {
+                case kChainingCellNormal:
+                    handleNormalChainingCell(cUnit, chainingBlock->startOffset);
+                    break;
+                case kChainingCellInvokeSingleton:
+                    handleInvokeSingletonChainingCell(cUnit,
+                        chainingBlock->containingMethod);
+                    break;
+                case kChainingCellInvokePredicted:
+                    handleInvokePredictedChainingCell(cUnit);
+                    break;
+                case kChainingCellHot:
+                    handleHotChainingCell(cUnit, chainingBlock->startOffset);
+                    break;
+                case kChainingCellBackwardBranch:
+                    handleBackwardBranchChainingCell(cUnit,
+                        chainingBlock->startOffset);
+                    break;
+                default:
+                    LOGE("Bad blocktype %d", chainingBlock->blockType);
+                    dvmCompilerAbort(cUnit);
+            }
+        }
+    }
+
+    /* Mark the bottom of chaining cells */
+    cUnit->chainingCellBottom = (LIR *) newLIR0(cUnit, kMipsChainingCellBottom);
+
+    /*
+     * Generate the branch to the dvmJitToInterpNoChain entry point at the end
+     * of all chaining cells for the overflow cases.
+     */
+    if (cUnit->switchOverflowPad) {
+        loadConstant(cUnit, r_A0, (int) cUnit->switchOverflowPad);
+        loadWordDisp(cUnit, rSELF, offsetof(Thread,
+                     jitToInterpEntries.dvmJitToInterpNoChain), r_A2);
+        opRegReg(cUnit, kOpAdd, r_A1, r_A1);
+        opRegRegReg(cUnit, kOpAdd, r4PC, r_A0, r_A1);
+#if defined(WITH_JIT_TUNING)
+        loadConstant(cUnit, r_A0, kSwitchOverflow);
+#endif
+        opReg(cUnit, kOpBlx, r_A2);
+    }
+
+    dvmCompilerApplyGlobalOptimizations(cUnit);
+
+#if defined(WITH_SELF_VERIFICATION)
+    selfVerificationBranchInsertPass(cUnit);
+#endif
+}
+
+/*
+ * Accept the work and start compiling.  Returns true if compilation
+ * is attempted.
+ */
+bool dvmCompilerDoWork(CompilerWorkOrder *work)
+{
+    JitTraceDescription *desc;
+    bool isCompile;
+    bool success = true;
+
+    if (gDvmJit.codeCacheFull) {
+        return false;
+    }
+
+    switch (work->kind) {
+        case kWorkOrderTrace:
+            isCompile = true;
+            /* Start compilation with maximally allowed trace length */
+            desc = (JitTraceDescription *)work->info;
+            success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+                                        work->bailPtr, 0 /* no hints */);
+            break;
+        case kWorkOrderTraceDebug: {
+            bool oldPrintMe = gDvmJit.printMe;
+            gDvmJit.printMe = true;
+            isCompile = true;
+            /* Start compilation with maximally allowed trace length */
+            desc = (JitTraceDescription *)work->info;
+            success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+                                        work->bailPtr, 0 /* no hints */);
+            gDvmJit.printMe = oldPrintMe;
+            break;
+        }
+        case kWorkOrderProfileMode:
+            dvmJitChangeProfileMode((TraceProfilingModes)(int)work->info);
+            isCompile = false;
+            break;
+        default:
+            isCompile = false;
+            LOGE("Jit: unknown work order type");
+            assert(0);  // Bail if debug build, discard otherwise
+    }
+    if (!success)
+        work->result.codeAddress = NULL;
+    return isCompile;
+}
+
+/* Architectural-specific debugging helpers go here */
+void dvmCompilerArchDump(void)
+{
+    /* Print compiled opcode in this VM instance */
+    int i, start, streak;
+    char buf[1024];
+
+    streak = i = 0;
+    buf[0] = 0;
+    while (opcodeCoverage[i] == 0 && i < 256) {
+        i++;
+    }
+    if (i == 256) {
+        return;
+    }
+    for (start = i++, streak = 1; i < 256; i++) {
+        if (opcodeCoverage[i]) {
+            streak++;
+        } else {
+            if (streak == 1) {
+                sprintf(buf+strlen(buf), "%x,", start);
+            } else {
+                sprintf(buf+strlen(buf), "%x-%x,", start, start + streak - 1);
+            }
+            streak = 0;
+            while (opcodeCoverage[i] == 0 && i < 256) {
+                i++;
+            }
+            if (i < 256) {
+                streak = 1;
+                start = i;
+            }
+        }
+    }
+    if (streak) {
+        if (streak == 1) {
+            sprintf(buf+strlen(buf), "%x", start);
+        } else {
+            sprintf(buf+strlen(buf), "%x-%x", start, start + streak - 1);
+        }
+    }
+    if (strlen(buf)) {
+        LOGD("dalvik.vm.jit.op = %s", buf);
+    }
+}
+
+/* Common initialization routine for an architecture family */
+bool dvmCompilerArchInit()
+{
+    int i;
+
+    for (i = 0; i < kMipsLast; i++) {
+        if (EncodingMap[i].opcode != i) {
+            LOGE("Encoding order for %s is wrong: expecting %d, seeing %d",
+                 EncodingMap[i].name, i, EncodingMap[i].opcode);
+            dvmAbort();  // OK to dvmAbort - build error
+        }
+    }
+
+    return dvmCompilerArchVariantInit();
+}
+
+void *dvmCompilerGetInterpretTemplate()
+{
+      return (void*) ((int)gDvmJit.codeCache +
+                      templateEntryOffsets[TEMPLATE_INTERPRET]);
+}
+
+JitInstructionSetType dvmCompilerGetInterpretTemplateSet()
+{
+    return DALVIK_JIT_MIPS;
+}
+
+/* Needed by the Assembler */
+void dvmCompilerSetupResourceMasks(MipsLIR *lir)
+{
+    setupResourceMasks(lir);
+}
+
+/* Needed by the ld/st optmizatons */
+MipsLIR* dvmCompilerRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+    return genRegCopyNoInsert(cUnit, rDest, rSrc);
+}
+
+/* Needed by the register allocator */
+MipsLIR* dvmCompilerRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+    return genRegCopy(cUnit, rDest, rSrc);
+}
+
+/* Needed by the register allocator */
+void dvmCompilerRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
+                            int srcLo, int srcHi)
+{
+    genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
+}
+
+void dvmCompilerFlushRegImpl(CompilationUnit *cUnit, int rBase,
+                             int displacement, int rSrc, OpSize size)
+{
+    storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
+}
+
+void dvmCompilerFlushRegWideImpl(CompilationUnit *cUnit, int rBase,
+                                 int displacement, int rSrcLo, int rSrcHi)
+{
+    storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
+}
diff --git a/vm/compiler/codegen/mips/CodegenFactory.cpp b/vm/compiler/codegen/mips/CodegenFactory.cpp
new file mode 100644
index 0000000..1b604ec
--- /dev/null
+++ b/vm/compiler/codegen/mips/CodegenFactory.cpp
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen and support common to all supported
+ * Mips variants.  It is included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ * which combines this common code with specific support found in the
+ * applicable directory below this one.
+ */
+
+
+/* Load a word at base + displacement.  Displacement must be word multiple */
+static MipsLIR *loadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
+                            int rDest)
+{
+    return loadBaseDisp(cUnit, NULL, rBase, displacement, rDest, kWord,
+                        INVALID_SREG);
+}
+
+static MipsLIR *storeWordDisp(CompilationUnit *cUnit, int rBase,
+                             int displacement, int rSrc)
+{
+    return storeBaseDisp(cUnit, rBase, displacement, rSrc, kWord);
+}
+
+/*
+ * Load a Dalvik register into a physical register.  Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness.  That is the responsibility of the caller.
+ */
+static void loadValueDirect(CompilationUnit *cUnit, RegLocation rlSrc,
+                                int reg1)
+{
+    rlSrc = dvmCompilerUpdateLoc(cUnit, rlSrc);
+    if (rlSrc.location == kLocPhysReg) {
+        genRegCopy(cUnit, reg1, rlSrc.lowReg);
+    } else  if (rlSrc.location == kLocRetval) {
+        loadWordDisp(cUnit, rSELF, offsetof(Thread, interpSave.retval), reg1);
+    } else {
+        assert(rlSrc.location == kLocDalvikFrame);
+        loadWordDisp(cUnit, rFP, dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2,
+                     reg1);
+    }
+}
+
+/*
+ * Similar to loadValueDirect, but clobbers and allocates the target
+ * register.  Should be used when loading to a fixed register (for example,
+ * loading arguments to an out of line call.
+ */
+static void loadValueDirectFixed(CompilationUnit *cUnit, RegLocation rlSrc,
+                                 int reg1)
+{
+    dvmCompilerClobber(cUnit, reg1);
+    dvmCompilerMarkInUse(cUnit, reg1);
+    loadValueDirect(cUnit, rlSrc, reg1);
+}
+
+/*
+ * Load a Dalvik register pair into a physical register[s].  Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness.  That is the responsibility of the caller.
+ */
+static void loadValueDirectWide(CompilationUnit *cUnit, RegLocation rlSrc,
+                                int regLo, int regHi)
+{
+    rlSrc = dvmCompilerUpdateLocWide(cUnit, rlSrc);
+    if (rlSrc.location == kLocPhysReg) {
+        genRegCopyWide(cUnit, regLo, regHi, rlSrc.lowReg, rlSrc.highReg);
+    } else if (rlSrc.location == kLocRetval) {
+        loadBaseDispWide(cUnit, NULL, rSELF, offsetof(Thread, interpSave.retval),
+                         regLo, regHi, INVALID_SREG);
+    } else {
+        assert(rlSrc.location == kLocDalvikFrame);
+            loadBaseDispWide(cUnit, NULL, rFP,
+                             dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2,
+                             regLo, regHi, INVALID_SREG);
+    }
+}
+
+/*
+ * Similar to loadValueDirect, but clobbers and allocates the target
+ * registers.  Should be used when loading to a fixed registers (for example,
+ * loading arguments to an out of line call.
+ */
+static void loadValueDirectWideFixed(CompilationUnit *cUnit, RegLocation rlSrc,
+                                     int regLo, int regHi)
+{
+    dvmCompilerClobber(cUnit, regLo);
+    dvmCompilerClobber(cUnit, regHi);
+    dvmCompilerMarkInUse(cUnit, regLo);
+    dvmCompilerMarkInUse(cUnit, regHi);
+    loadValueDirectWide(cUnit, rlSrc, regLo, regHi);
+}
+
+static RegLocation loadValue(CompilationUnit *cUnit, RegLocation rlSrc,
+                             RegisterClass opKind)
+{
+    rlSrc = dvmCompilerEvalLoc(cUnit, rlSrc, opKind, false);
+    if (rlSrc.location == kLocDalvikFrame) {
+        loadValueDirect(cUnit, rlSrc, rlSrc.lowReg);
+        rlSrc.location = kLocPhysReg;
+        dvmCompilerMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+    } else if (rlSrc.location == kLocRetval) {
+        loadWordDisp(cUnit, rSELF, offsetof(Thread, interpSave.retval), rlSrc.lowReg);
+        rlSrc.location = kLocPhysReg;
+        dvmCompilerClobber(cUnit, rlSrc.lowReg);
+    }
+    return rlSrc;
+}
+
+static void storeValue(CompilationUnit *cUnit, RegLocation rlDest,
+                       RegLocation rlSrc)
+{
+    LIR *defStart;
+    LIR *defEnd;
+    assert(!rlDest.wide);
+    assert(!rlSrc.wide);
+    dvmCompilerKillNullCheckedLoc(cUnit, rlDest);
+    rlSrc = dvmCompilerUpdateLoc(cUnit, rlSrc);
+    rlDest = dvmCompilerUpdateLoc(cUnit, rlDest);
+    if (rlSrc.location == kLocPhysReg) {
+        if (dvmCompilerIsLive(cUnit, rlSrc.lowReg) ||
+            (rlDest.location == kLocPhysReg)) {
+            // Src is live or Dest has assigned reg.
+            rlDest = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, false);
+            genRegCopy(cUnit, rlDest.lowReg, rlSrc.lowReg);
+        } else {
+            // Just re-assign the registers.  Dest gets Src's regs
+            rlDest.lowReg = rlSrc.lowReg;
+            dvmCompilerClobber(cUnit, rlSrc.lowReg);
+        }
+    } else {
+        // Load Src either into promoted Dest or temps allocated for Dest
+        rlDest = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, false);
+        loadValueDirect(cUnit, rlSrc, rlDest.lowReg);
+    }
+
+    // Dest is now live and dirty (until/if we flush it to home location)
+    dvmCompilerMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
+    dvmCompilerMarkDirty(cUnit, rlDest.lowReg);
+
+
+    if (rlDest.location == kLocRetval) {
+        storeBaseDisp(cUnit, rSELF, offsetof(Thread, interpSave.retval),
+                      rlDest.lowReg, kWord);
+        dvmCompilerClobber(cUnit, rlDest.lowReg);
+    } else {
+        dvmCompilerResetDefLoc(cUnit, rlDest);
+        if (dvmCompilerLiveOut(cUnit, rlDest.sRegLow)) {
+            defStart = (LIR *)cUnit->lastLIRInsn;
+            int vReg = dvmCompilerS2VReg(cUnit, rlDest.sRegLow);
+            storeBaseDisp(cUnit, rFP, vReg << 2, rlDest.lowReg, kWord);
+            dvmCompilerMarkClean(cUnit, rlDest.lowReg);
+            defEnd = (LIR *)cUnit->lastLIRInsn;
+            dvmCompilerMarkDef(cUnit, rlDest, defStart, defEnd);
+        }
+    }
+}
+
+static RegLocation loadValueWide(CompilationUnit *cUnit, RegLocation rlSrc,
+                                 RegisterClass opKind)
+{
+    assert(rlSrc.wide);
+    rlSrc = dvmCompilerEvalLoc(cUnit, rlSrc, opKind, false);
+    if (rlSrc.location == kLocDalvikFrame) {
+        loadValueDirectWide(cUnit, rlSrc, rlSrc.lowReg, rlSrc.highReg);
+        rlSrc.location = kLocPhysReg;
+        dvmCompilerMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+        dvmCompilerMarkLive(cUnit, rlSrc.highReg,
+                            dvmCompilerSRegHi(rlSrc.sRegLow));
+    } else if (rlSrc.location == kLocRetval) {
+        loadBaseDispWide(cUnit, NULL, rSELF, offsetof(Thread, interpSave.retval),
+                         rlSrc.lowReg, rlSrc.highReg, INVALID_SREG);
+        rlSrc.location = kLocPhysReg;
+        dvmCompilerClobber(cUnit, rlSrc.lowReg);
+        dvmCompilerClobber(cUnit, rlSrc.highReg);
+    }
+    return rlSrc;
+}
+
+static void storeValueWide(CompilationUnit *cUnit, RegLocation rlDest,
+                       RegLocation rlSrc)
+{
+    LIR *defStart;
+    LIR *defEnd;
+    assert(FPREG(rlSrc.lowReg)==FPREG(rlSrc.highReg));
+    assert(rlDest.wide);
+    assert(rlSrc.wide);
+    dvmCompilerKillNullCheckedLoc(cUnit, rlDest);
+    if (rlSrc.location == kLocPhysReg) {
+        if (dvmCompilerIsLive(cUnit, rlSrc.lowReg) ||
+            dvmCompilerIsLive(cUnit, rlSrc.highReg) ||
+            (rlDest.location == kLocPhysReg)) {
+            // Src is live or Dest has assigned reg.
+            rlDest = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, false);
+            genRegCopyWide(cUnit, rlDest.lowReg, rlDest.highReg,
+                           rlSrc.lowReg, rlSrc.highReg);
+        } else {
+            // Just re-assign the registers.  Dest gets Src's regs
+            rlDest.lowReg = rlSrc.lowReg;
+            rlDest.highReg = rlSrc.highReg;
+            dvmCompilerClobber(cUnit, rlSrc.lowReg);
+            dvmCompilerClobber(cUnit, rlSrc.highReg);
+        }
+    } else {
+        // Load Src either into promoted Dest or temps allocated for Dest
+        rlDest = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, false);
+        loadValueDirectWide(cUnit, rlSrc, rlDest.lowReg,
+                            rlDest.highReg);
+    }
+
+    // Dest is now live and dirty (until/if we flush it to home location)
+    dvmCompilerMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
+    dvmCompilerMarkLive(cUnit, rlDest.highReg,
+                        dvmCompilerSRegHi(rlDest.sRegLow));
+    dvmCompilerMarkDirty(cUnit, rlDest.lowReg);
+    dvmCompilerMarkDirty(cUnit, rlDest.highReg);
+    dvmCompilerMarkPair(cUnit, rlDest.lowReg, rlDest.highReg);
+
+
+    if (rlDest.location == kLocRetval) {
+        storeBaseDispWide(cUnit, rSELF, offsetof(Thread, interpSave.retval),
+                          rlDest.lowReg, rlDest.highReg);
+        dvmCompilerClobber(cUnit, rlDest.lowReg);
+        dvmCompilerClobber(cUnit, rlDest.highReg);
+    } else {
+        dvmCompilerResetDefLocWide(cUnit, rlDest);
+        if (dvmCompilerLiveOut(cUnit, rlDest.sRegLow) ||
+            dvmCompilerLiveOut(cUnit, dvmCompilerSRegHi(rlDest.sRegLow))) {
+            defStart = (LIR *)cUnit->lastLIRInsn;
+            int vReg = dvmCompilerS2VReg(cUnit, rlDest.sRegLow);
+            assert((vReg+1) == dvmCompilerS2VReg(cUnit,
+                                     dvmCompilerSRegHi(rlDest.sRegLow)));
+            storeBaseDispWide(cUnit, rFP, vReg << 2, rlDest.lowReg,
+                              rlDest.highReg);
+            dvmCompilerMarkClean(cUnit, rlDest.lowReg);
+            dvmCompilerMarkClean(cUnit, rlDest.highReg);
+            defEnd = (LIR *)cUnit->lastLIRInsn;
+            dvmCompilerMarkDefWide(cUnit, rlDest, defStart, defEnd);
+        }
+    }
+}
+/*
+ * Perform null-check on a register. sReg is the ssa register being checked,
+ * and mReg is the machine register holding the actual value. If internal state
+ * indicates that sReg has been checked before the check request is ignored.
+ */
+static MipsLIR *genNullCheck(CompilationUnit *cUnit, int sReg, int mReg,
+                                int dOffset, MipsLIR *pcrLabel)
+{
+    /* This particular Dalvik register has been null-checked */
+    if (dvmIsBitSet(cUnit->regPool->nullCheckedRegs, sReg)) {
+        return pcrLabel;
+    }
+    dvmSetBit(cUnit->regPool->nullCheckedRegs, sReg);
+    return genRegImmCheck(cUnit, kMipsCondEq, mReg, 0, dOffset, pcrLabel);
+}
+
+
+
+/*
+ * Perform a "reg cmp reg" operation and jump to the PCR region if condition
+ * satisfies.
+ */
+static MipsLIR *genRegRegCheck(CompilationUnit *cUnit,
+                              MipsConditionCode cond,
+                              int reg1, int reg2, int dOffset,
+                              MipsLIR *pcrLabel)
+{
+    MipsLIR *res = NULL;
+    if (cond == kMipsCondGe) { /* signed >= case */
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        res = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2);
+        MipsLIR *branch = opCompareBranch(cUnit, kMipsBeqz, tReg, -1);
+        genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+    } else if (cond == kMipsCondCs) {  /* unsigned >= case */
+        int tReg = dvmCompilerAllocTemp(cUnit);
+        res = newLIR3(cUnit, kMipsSltu, tReg, reg1, reg2);
+        MipsLIR *branch = opCompareBranch(cUnit, kMipsBeqz, tReg, -1);
+        genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+    } else {
+        LOGE("Unexpected condition in genRegRegCheck: %d\n", (int) cond);
+        dvmAbort();
+    }
+    return res;
+}
+
+/*
+ * Perform zero-check on a register. Similar to genNullCheck but the value being
+ * checked does not have a corresponding Dalvik register.
+ */
+static MipsLIR *genZeroCheck(CompilationUnit *cUnit, int mReg,
+                                int dOffset, MipsLIR *pcrLabel)
+{
+    return genRegImmCheck(cUnit, kMipsCondEq, mReg, 0, dOffset, pcrLabel);
+}
+
+/* Perform bound check on two registers */
+static MipsLIR *genBoundsCheck(CompilationUnit *cUnit, int rIndex,
+                                  int rBound, int dOffset, MipsLIR *pcrLabel)
+{
+    return genRegRegCheck(cUnit, kMipsCondCs, rIndex, rBound, dOffset,
+                            pcrLabel);
+}
+
+/*
+ * Jump to the out-of-line handler to finish executing the
+ * remaining of more complex instructions.
+ */
+static void genDispatchToHandler(CompilationUnit *cUnit, TemplateOpcode opCode)
+{
+    /*
+     * We're jumping from a trace to a template. Using jal is preferable to jalr,
+     * but we need to ensure source and target addresses allow the use of jal.
+     * This should almost always be the case, but if source and target are in
+     * different 256mb regions then use jalr.  The test below is very conservative
+     * since we don't have a source address yet, but this is ok for now given that
+     * we expect this case to be very rare. The test can be made less conservative
+     * as needed in the future in coordination with address assignment during
+     * the assembly process.
+     */
+    dvmCompilerClobberHandlerRegs(cUnit);
+    int targetAddr = (int) gDvmJit.codeCache + templateEntryOffsets[opCode];
+    int maxSourceAddr = (int) gDvmJit.codeCache + gDvmJit.codeCacheSize;
+
+    if ((targetAddr & 0xF0000000) == (maxSourceAddr & 0xF0000000)) {
+        newLIR1(cUnit, kMipsJal, targetAddr);
+    } else {
+        loadConstant(cUnit, r_T9, targetAddr);
+        newLIR2(cUnit, kMipsJalr, r_RA, r_T9);
+    }
+}
diff --git a/vm/compiler/codegen/mips/FP/MipsFP.cpp b/vm/compiler/codegen/mips/FP/MipsFP.cpp
new file mode 100644
index 0000000..cf44b0e
--- /dev/null
+++ b/vm/compiler/codegen/mips/FP/MipsFP.cpp
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file is included by Codegen-armv5te-vfp.c, and implements architecture
+ * variant-specific code.
+ */
+
+extern void dvmCompilerFlushRegWideForV5TEVFP(CompilationUnit *cUnit,
+                                              int reg1, int reg2);
+extern void dvmCompilerFlushRegForV5TEVFP(CompilationUnit *cUnit, int reg);
+
+/* First, flush any registers associated with this value */
+static void loadValueAddress(CompilationUnit *cUnit, RegLocation rlSrc,
+                             int rDest)
+{
+     rlSrc = rlSrc.wide ? dvmCompilerUpdateLocWide(cUnit, rlSrc) :
+                          dvmCompilerUpdateLoc(cUnit, rlSrc);
+     if (rlSrc.location == kLocPhysReg) {
+         if (rlSrc.wide) {
+             dvmCompilerFlushRegWideForV5TEVFP(cUnit, rlSrc.lowReg,
+                                               rlSrc.highReg);
+         } else {
+             dvmCompilerFlushRegForV5TEVFP(cUnit, rlSrc.lowReg);
+         }
+     }
+     opRegRegImm(cUnit, kOpAdd, rDest, rFP,
+                 dvmCompilerS2VReg(cUnit, rlSrc.sRegLow) << 2);
+}
+
+static bool genInlineSqrt(CompilationUnit *cUnit, MIR *mir)
+{
+    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+#ifdef __mips_hard_float
+    RegLocation rlResult = LOC_C_RETURN_WIDE_ALT;
+#else
+    RegLocation rlResult = LOC_C_RETURN_WIDE;
+#endif
+    RegLocation rlDest = LOC_DALVIK_RETURN_VAL_WIDE;
+    loadValueAddress(cUnit, rlSrc, r_A2);
+    genDispatchToHandler(cUnit, TEMPLATE_SQRT_DOUBLE_VFP);
+    storeValueWide(cUnit, rlDest, rlResult);
+    return false;
+}
+
+/*
+ * TUNING: On some implementations, it is quicker to pass addresses
+ * to the handlers rather than load the operands into core registers
+ * and then move the values to FP regs in the handlers.  Other implementations
+ * may prefer passing data in registers (and the latter approach would
+ * yeild cleaner register handling - avoiding the requirement that operands
+ * be flushed to memory prior to the call).
+ */
+static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir,
+                            RegLocation rlDest, RegLocation rlSrc1,
+                            RegLocation rlSrc2)
+{
+#ifdef __mips_hard_float
+    int op = kMipsNop;
+    RegLocation rlResult;
+
+    /*
+     * Don't attempt to optimize register usage since these opcodes call out to
+     * the handlers.
+     */
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_FLOAT_2ADDR:
+        case OP_ADD_FLOAT:
+            op = kMipsFadds;
+            break;
+        case OP_SUB_FLOAT_2ADDR:
+        case OP_SUB_FLOAT:
+            op = kMipsFsubs;
+            break;
+        case OP_DIV_FLOAT_2ADDR:
+        case OP_DIV_FLOAT:
+            op = kMipsFdivs;
+            break;
+        case OP_MUL_FLOAT_2ADDR:
+        case OP_MUL_FLOAT:
+            op = kMipsFmuls;
+            break;
+        case OP_REM_FLOAT_2ADDR:
+        case OP_REM_FLOAT:
+        case OP_NEG_FLOAT: {
+            return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+        }
+        default:
+            return true;
+    }
+    rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+    rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true);
+    newLIR3(cUnit, (MipsOpCode)op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+    storeValue(cUnit, rlDest, rlResult);
+
+    return false;
+#else
+    TemplateOpcode opcode;
+
+    /*
+     * Don't attempt to optimize register usage since these opcodes call out to
+     * the handlers.
+     */
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_FLOAT_2ADDR:
+        case OP_ADD_FLOAT:
+            opcode = TEMPLATE_ADD_FLOAT_VFP;
+            break;
+        case OP_SUB_FLOAT_2ADDR:
+        case OP_SUB_FLOAT:
+            opcode = TEMPLATE_SUB_FLOAT_VFP;
+            break;
+        case OP_DIV_FLOAT_2ADDR:
+        case OP_DIV_FLOAT:
+            opcode = TEMPLATE_DIV_FLOAT_VFP;
+            break;
+        case OP_MUL_FLOAT_2ADDR:
+        case OP_MUL_FLOAT:
+            opcode = TEMPLATE_MUL_FLOAT_VFP;
+            break;
+        case OP_REM_FLOAT_2ADDR:
+        case OP_REM_FLOAT:
+        case OP_NEG_FLOAT: {
+            return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+        }
+        default:
+            return true;
+    }
+    loadValueAddress(cUnit, rlDest, r_A0);
+    dvmCompilerClobber(cUnit, r_A0);
+    loadValueAddress(cUnit, rlSrc1, r_A1);
+    dvmCompilerClobber(cUnit, r_A1);
+    loadValueAddress(cUnit, rlSrc2, r_A2);
+    genDispatchToHandler(cUnit, opcode);
+    rlDest = dvmCompilerUpdateLoc(cUnit, rlDest);
+    if (rlDest.location == kLocPhysReg) {
+        dvmCompilerClobber(cUnit, rlDest.lowReg);
+    }
+    return false;
+#endif
+}
+
+static bool genArithOpDouble(CompilationUnit *cUnit, MIR *mir,
+                             RegLocation rlDest, RegLocation rlSrc1,
+                             RegLocation rlSrc2)
+{
+#ifdef __mips_hard_float
+    int op = kMipsNop;
+    RegLocation rlResult;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_DOUBLE_2ADDR:
+        case OP_ADD_DOUBLE:
+            op = kMipsFaddd;
+            break;
+        case OP_SUB_DOUBLE_2ADDR:
+        case OP_SUB_DOUBLE:
+            op = kMipsFsubd;
+            break;
+        case OP_DIV_DOUBLE_2ADDR:
+        case OP_DIV_DOUBLE:
+            op = kMipsFdivd;
+            break;
+        case OP_MUL_DOUBLE_2ADDR:
+        case OP_MUL_DOUBLE:
+            op = kMipsFmuld;
+            break;
+        case OP_REM_DOUBLE_2ADDR:
+        case OP_REM_DOUBLE:
+        case OP_NEG_DOUBLE: {
+            return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+        }
+        default:
+            return true;
+    }
+    rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+    assert(rlSrc1.wide);
+    rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+    assert(rlSrc2.wide);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true);
+    assert(rlDest.wide);
+    assert(rlResult.wide);
+    newLIR3(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
+            S2D(rlSrc1.lowReg, rlSrc1.highReg),
+            S2D(rlSrc2.lowReg, rlSrc2.highReg));
+    storeValueWide(cUnit, rlDest, rlResult);
+    return false;
+#else
+    TemplateOpcode opcode;
+
+    switch (mir->dalvikInsn.opcode) {
+        case OP_ADD_DOUBLE_2ADDR:
+        case OP_ADD_DOUBLE:
+            opcode = TEMPLATE_ADD_DOUBLE_VFP;
+            break;
+        case OP_SUB_DOUBLE_2ADDR:
+        case OP_SUB_DOUBLE:
+            opcode = TEMPLATE_SUB_DOUBLE_VFP;
+            break;
+        case OP_DIV_DOUBLE_2ADDR:
+        case OP_DIV_DOUBLE:
+            opcode = TEMPLATE_DIV_DOUBLE_VFP;
+            break;
+        case OP_MUL_DOUBLE_2ADDR:
+        case OP_MUL_DOUBLE:
+            opcode = TEMPLATE_MUL_DOUBLE_VFP;
+            break;
+        case OP_REM_DOUBLE_2ADDR:
+        case OP_REM_DOUBLE:
+        case OP_NEG_DOUBLE: {
+            return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1,
+                                               rlSrc2);
+        }
+        default:
+            return true;
+    }
+    loadValueAddress(cUnit, rlDest, r_A0);
+    dvmCompilerClobber(cUnit, r_A0);
+    loadValueAddress(cUnit, rlSrc1, r_A1);
+    dvmCompilerClobber(cUnit, r_A1);
+    loadValueAddress(cUnit, rlSrc2, r_A2);
+    genDispatchToHandler(cUnit, opcode);
+    rlDest = dvmCompilerUpdateLocWide(cUnit, rlDest);
+    if (rlDest.location == kLocPhysReg) {
+        dvmCompilerClobber(cUnit, rlDest.lowReg);
+        dvmCompilerClobber(cUnit, rlDest.highReg);
+    }
+    return false;
+#endif
+}
+
+static bool genConversion(CompilationUnit *cUnit, MIR *mir)
+{
+    Opcode opcode = mir->dalvikInsn.opcode;
+    bool longSrc = false;
+    bool longDest = false;
+    RegLocation rlSrc;
+    RegLocation rlDest;
+#ifdef __mips_hard_float
+    int op = kMipsNop;
+    int srcReg;
+    RegLocation rlResult;
+
+    switch (opcode) {
+        case OP_INT_TO_FLOAT:
+            longSrc = false;
+            longDest = false;
+            op = kMipsFcvtsw;
+            break;
+        case OP_DOUBLE_TO_FLOAT:
+            longSrc = true;
+            longDest = false;
+            op = kMipsFcvtsd;
+            break;
+        case OP_FLOAT_TO_DOUBLE:
+            longSrc = false;
+            longDest = true;
+            op = kMipsFcvtds;
+            break;
+        case OP_INT_TO_DOUBLE:
+            longSrc = false;
+            longDest = true;
+            op = kMipsFcvtdw;
+            break;
+        case OP_FLOAT_TO_INT:
+        case OP_DOUBLE_TO_INT:
+        case OP_LONG_TO_DOUBLE:
+        case OP_FLOAT_TO_LONG:
+        case OP_LONG_TO_FLOAT:
+        case OP_DOUBLE_TO_LONG:
+            return genConversionPortable(cUnit, mir);
+        default:
+            return true;
+    }
+    if (longSrc) {
+        rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+        rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
+        srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
+    } else {
+        rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+        rlSrc = loadValue(cUnit, rlSrc, kFPReg);
+        srcReg = rlSrc.lowReg;
+    }
+    if (longDest) {
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true);
+        newLIR2(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg), srcReg);
+        storeValueWide(cUnit, rlDest, rlResult);
+    } else {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true);
+        newLIR2(cUnit, (MipsOpCode)op, rlResult.lowReg, srcReg);
+        storeValue(cUnit, rlDest, rlResult);
+    }
+    return false;
+#else
+    TemplateOpcode templateOpcode;
+    switch (opcode) {
+        case OP_INT_TO_FLOAT:
+            longSrc = false;
+            longDest = false;
+            templateOpcode = TEMPLATE_INT_TO_FLOAT_VFP;
+            break;
+        case OP_FLOAT_TO_INT:
+            longSrc = false;
+            longDest = false;
+            templateOpcode = TEMPLATE_FLOAT_TO_INT_VFP;
+            break;
+        case OP_DOUBLE_TO_FLOAT:
+            longSrc = true;
+            longDest = false;
+            templateOpcode = TEMPLATE_DOUBLE_TO_FLOAT_VFP;
+            break;
+        case OP_FLOAT_TO_DOUBLE:
+            longSrc = false;
+            longDest = true;
+            templateOpcode = TEMPLATE_FLOAT_TO_DOUBLE_VFP;
+            break;
+        case OP_INT_TO_DOUBLE:
+            longSrc = false;
+            longDest = true;
+            templateOpcode = TEMPLATE_INT_TO_DOUBLE_VFP;
+            break;
+        case OP_DOUBLE_TO_INT:
+            longSrc = true;
+            longDest = false;
+            templateOpcode = TEMPLATE_DOUBLE_TO_INT_VFP;
+            break;
+        case OP_LONG_TO_DOUBLE:
+        case OP_FLOAT_TO_LONG:
+        case OP_LONG_TO_FLOAT:
+        case OP_DOUBLE_TO_LONG:
+            return genConversionPortable(cUnit, mir);
+        default:
+            return true;
+    }
+
+    if (longSrc) {
+        rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    } else {
+        rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    }
+
+    if (longDest) {
+        rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
+    } else {
+        rlDest = dvmCompilerGetDest(cUnit, mir, 0);
+    }
+    loadValueAddress(cUnit, rlDest, r_A0);
+    dvmCompilerClobber(cUnit, r_A0);
+    loadValueAddress(cUnit, rlSrc, r_A1);
+    genDispatchToHandler(cUnit, templateOpcode);
+    if (rlDest.wide) {
+        rlDest = dvmCompilerUpdateLocWide(cUnit, rlDest);
+        dvmCompilerClobber(cUnit, rlDest.highReg);
+    } else {
+        rlDest = dvmCompilerUpdateLoc(cUnit, rlDest);
+    }
+    dvmCompilerClobber(cUnit, rlDest.lowReg);
+    return false;
+#endif
+}
+
+static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
+                     RegLocation rlSrc1, RegLocation rlSrc2)
+{
+    TemplateOpcode templateOpcode;
+    RegLocation rlResult = dvmCompilerGetReturn(cUnit);
+    bool wide = true;
+
+    switch(mir->dalvikInsn.opcode) {
+        case OP_CMPL_FLOAT:
+            templateOpcode = TEMPLATE_CMPL_FLOAT_VFP;
+            wide = false;
+            break;
+        case OP_CMPG_FLOAT:
+            templateOpcode = TEMPLATE_CMPG_FLOAT_VFP;
+            wide = false;
+            break;
+        case OP_CMPL_DOUBLE:
+            templateOpcode = TEMPLATE_CMPL_DOUBLE_VFP;
+            break;
+        case OP_CMPG_DOUBLE:
+            templateOpcode = TEMPLATE_CMPG_DOUBLE_VFP;
+            break;
+        default:
+            return true;
+    }
+    loadValueAddress(cUnit, rlSrc1, r_A0);
+    dvmCompilerClobber(cUnit, r_A0);
+    loadValueAddress(cUnit, rlSrc2, r_A1);
+    genDispatchToHandler(cUnit, templateOpcode);
+    storeValue(cUnit, rlDest, rlResult);
+    return false;
+}
diff --git a/vm/compiler/codegen/mips/GlobalOptimizations.cpp b/vm/compiler/codegen/mips/GlobalOptimizations.cpp
new file mode 100644
index 0000000..189d818
--- /dev/null
+++ b/vm/compiler/codegen/mips/GlobalOptimizations.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "vm/compiler/CompilerInternals.h"
+#include "MipsLIR.h"
+
+/*
+ * Identify unconditional branches that jump to the immediate successor of the
+ * branch itself.
+ */
+static void applyRedundantBranchElimination(CompilationUnit *cUnit)
+{
+    MipsLIR *thisLIR;
+
+    for (thisLIR = (MipsLIR *) cUnit->firstLIRInsn;
+         thisLIR != (MipsLIR *) cUnit->lastLIRInsn;
+         thisLIR = NEXT_LIR(thisLIR)) {
+
+        /* Branch to the next instruction */
+        if (!thisLIR->flags.isNop && thisLIR->opcode == kMipsB) {
+            MipsLIR *nextLIR = thisLIR;
+
+            while (true) {
+                nextLIR = NEXT_LIR(nextLIR);
+
+                /*
+                 * Is the branch target the next instruction?
+                 */
+                if (nextLIR == (MipsLIR *) thisLIR->generic.target) {
+                    thisLIR->flags.isNop = true;
+                    break;
+                }
+
+                /*
+                 * Found real useful stuff between the branch and the target.
+                 * Need to explicitly check the lastLIRInsn here since with
+                 * method-based JIT the branch might be the last real
+                 * instruction.
+                 */
+                if (!isPseudoOpCode(nextLIR->opcode) ||
+                    (nextLIR = (MipsLIR *) cUnit->lastLIRInsn))
+                    break;
+            }
+        }
+    }
+}
+
+/*
+ * Do simple a form of copy propagation and elimination.
+ */
+static void applyCopyPropagation(CompilationUnit *cUnit)
+{
+    MipsLIR *thisLIR;
+
+    /* look for copies to possibly eliminate */
+    for (thisLIR = (MipsLIR *) cUnit->firstLIRInsn;
+         thisLIR != (MipsLIR *) cUnit->lastLIRInsn;
+         thisLIR = NEXT_LIR(thisLIR)) {
+
+        if (thisLIR->flags.isNop || thisLIR->opcode != kMipsMove)
+            continue;
+
+        const int max_insns = 10;
+        MipsLIR *savedLIR[max_insns];
+        int srcRedefined = 0;
+        int insnCount = 0;
+        MipsLIR *nextLIR;
+
+        /* look for and record all uses of reg defined by the copy */
+        for (nextLIR = (MipsLIR *) NEXT_LIR(thisLIR);
+             nextLIR != (MipsLIR *) cUnit->lastLIRInsn;
+             nextLIR = NEXT_LIR(nextLIR)) {
+
+            if (nextLIR->flags.isNop || nextLIR->opcode == kMips32BitData)
+                continue;
+
+            if (isPseudoOpCode(nextLIR->opcode)) {
+                if (nextLIR->opcode == kMipsPseudoDalvikByteCodeBoundary ||
+                    nextLIR->opcode == kMipsPseudoBarrier ||
+                    nextLIR->opcode == kMipsPseudoExtended ||
+                    nextLIR->opcode == kMipsPseudoSSARep)
+                    continue; /* these pseudos don't pose problems */
+                else if (nextLIR->opcode == kMipsPseudoTargetLabel ||
+                         nextLIR->opcode == kMipsPseudoEntryBlock ||
+                         nextLIR->opcode == kMipsPseudoExitBlock)
+                    insnCount = 0;  /* give up for these pseudos */
+                break; /* reached end for copy propagation */
+            }
+
+            /* Since instructions with IS_BRANCH flag set will have its */
+            /* useMask and defMask set to ENCODE_ALL, any checking of   */
+            /* these flags must come after the branching checks.        */
+
+            /* don't propagate across branch/jump and link case
+               or jump via register */
+            if (EncodingMap[nextLIR->opcode].flags & REG_DEF_LR ||
+                nextLIR->opcode == kMipsJalr ||
+                nextLIR->opcode == kMipsJr) {
+                insnCount = 0;
+                break;
+            }
+
+            /* branches with certain targets ok while others aren't */
+            if (EncodingMap[nextLIR->opcode].flags & IS_BRANCH) {
+                MipsLIR *targetLIR =  (MipsLIR *) nextLIR->generic.target;
+                if (targetLIR->opcode != kMipsPseudoEHBlockLabel &&
+                    targetLIR->opcode != kMipsPseudoChainingCellHot &&
+                    targetLIR->opcode != kMipsPseudoChainingCellNormal &&
+                    targetLIR->opcode != kMipsPseudoChainingCellInvokePredicted &&
+                    targetLIR->opcode != kMipsPseudoChainingCellInvokeSingleton &&
+                    targetLIR->opcode != kMipsPseudoPCReconstructionBlockLabel &&
+                    targetLIR->opcode != kMipsPseudoPCReconstructionCell) {
+                    insnCount = 0;
+                    break;
+                }
+                /* FIXME - for now don't propagate across any branch/jump. */
+                insnCount = 0;
+                break;
+            }
+
+            /* copy def reg used here, so record insn for copy propagation */
+            if (thisLIR->defMask & nextLIR->useMask) {
+                if (insnCount == max_insns || srcRedefined) {
+                    insnCount = 0;
+                    break; /* just give up if too many or not possible */
+                }
+                savedLIR[insnCount++] = nextLIR;
+            }
+
+            if (thisLIR->defMask & nextLIR->defMask) {
+		if (nextLIR->opcode == kMipsMovz)
+		    insnCount = 0; /* movz relies on thisLIR setting dst reg so abandon propagation*/
+                break;
+            }
+
+            /* copy src reg redefined here, so can't propagate further */
+            if (thisLIR->useMask & nextLIR->defMask) {
+                if (insnCount == 0)
+                    break; /* nothing to propagate */
+                srcRedefined = 1;
+            }
+       }
+
+        /* conditions allow propagation and copy elimination */
+        if (insnCount) {
+            int i;
+            for (i = 0; i < insnCount; i++) {
+                int flags = EncodingMap[savedLIR[i]->opcode].flags;
+                savedLIR[i]->useMask &= ~(1 << thisLIR->operands[0]);
+                savedLIR[i]->useMask |= 1 << thisLIR->operands[1];
+                if ((flags & REG_USE0) &&
+                    savedLIR[i]->operands[0] == thisLIR->operands[0])
+                    savedLIR[i]->operands[0] = thisLIR->operands[1];
+                if ((flags & REG_USE1) &&
+                    savedLIR[i]->operands[1] == thisLIR->operands[0])
+                    savedLIR[i]->operands[1] = thisLIR->operands[1];
+                if ((flags & REG_USE2) &&
+                    savedLIR[i]->operands[2] == thisLIR->operands[0])
+                    savedLIR[i]->operands[2] = thisLIR->operands[1];
+                if ((flags & REG_USE3) &&
+                    savedLIR[i]->operands[3] == thisLIR->operands[0])
+                    savedLIR[i]->operands[3] = thisLIR->operands[1];
+            }
+            thisLIR->flags.isNop = true;
+        }
+    }
+}
+
+#ifdef __mips_hard_float
+/*
+ * Look for pairs of mov.s instructions that can be combined into mov.d
+ */
+static void mergeMovs(CompilationUnit *cUnit)
+{
+  MipsLIR *movsLIR = NULL;
+  MipsLIR *thisLIR;
+
+  for (thisLIR = (MipsLIR *) cUnit->firstLIRInsn;
+       thisLIR != (MipsLIR *) cUnit->lastLIRInsn;
+       thisLIR = NEXT_LIR(thisLIR)) {
+    if (thisLIR->flags.isNop)
+      continue;
+
+    if (isPseudoOpCode(thisLIR->opcode)) {
+      if (thisLIR->opcode == kMipsPseudoDalvikByteCodeBoundary ||
+                thisLIR->opcode == kMipsPseudoExtended ||
+	  thisLIR->opcode == kMipsPseudoSSARep)
+	continue;  /* ok to move across these pseudos */
+      movsLIR = NULL; /* don't merge across other pseudos */
+      continue;
+    }
+
+    /* merge pairs of mov.s instructions */
+    if (thisLIR->opcode == kMipsFmovs) {
+      if (movsLIR == NULL)
+	movsLIR = thisLIR;
+      else if (((movsLIR->operands[0] & 1) == 0) &&
+	       ((movsLIR->operands[1] & 1) == 0) &&
+	       ((movsLIR->operands[0] + 1) == thisLIR->operands[0]) &&
+	       ((movsLIR->operands[1] + 1) == thisLIR->operands[1])) {
+	/* movsLIR is handling even register - upgrade to mov.d */
+	movsLIR->opcode = kMipsFmovd;
+	movsLIR->operands[0] = S2D(movsLIR->operands[0], movsLIR->operands[0]+1);
+	movsLIR->operands[1] = S2D(movsLIR->operands[1], movsLIR->operands[1]+1);
+	thisLIR->flags.isNop = true;
+	movsLIR = NULL;
+      }
+      else if (((movsLIR->operands[0] & 1) == 1) &&
+	       ((movsLIR->operands[1] & 1) == 1) &&
+	       ((movsLIR->operands[0] - 1) == thisLIR->operands[0]) &&
+	       ((movsLIR->operands[1] - 1) == thisLIR->operands[1])) {
+	/* thissLIR is handling even register - upgrade to mov.d */
+	thisLIR->opcode = kMipsFmovd;
+	thisLIR->operands[0] = S2D(thisLIR->operands[0], thisLIR->operands[0]+1);
+	thisLIR->operands[1] = S2D(thisLIR->operands[1], thisLIR->operands[1]+1);
+	movsLIR->flags.isNop = true;
+	movsLIR = NULL;
+      }
+      else
+	/* carry on searching from here */
+	movsLIR = thisLIR;
+      continue;
+    }
+
+    /* intervening instruction - start search from scratch */
+    movsLIR = NULL;
+  }
+}
+#endif
+
+
+/*
+ * Look back first and then ahead to try to find an instruction to move into
+ * the branch delay slot.  If the analysis can be done cheaply enough, it may be
+ * be possible to tune this routine to be more beneficial (e.g., being more
+ * particular about what instruction is speculated).
+ */
+static MipsLIR *delaySlotLIR(MipsLIR *firstLIR, MipsLIR *branchLIR)
+{
+    int isLoad;
+    int loadVisited = 0;
+    int isStore;
+    int storeVisited = 0;
+    u8 useMask = branchLIR->useMask;
+    u8 defMask = branchLIR->defMask;
+    MipsLIR *thisLIR;
+    MipsLIR *newLIR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+
+    for (thisLIR = PREV_LIR(branchLIR);
+         thisLIR != firstLIR;
+         thisLIR = PREV_LIR(thisLIR)) {
+        if (thisLIR->flags.isNop)
+            continue;
+
+        if (isPseudoOpCode(thisLIR->opcode)) {
+            if (thisLIR->opcode == kMipsPseudoDalvikByteCodeBoundary ||
+                thisLIR->opcode == kMipsPseudoExtended ||
+                thisLIR->opcode == kMipsPseudoSSARep)
+                continue;  /* ok to move across these pseudos */
+            break; /* don't move across all other pseudos */
+        }
+
+        /* give up on moving previous instruction down into slot */
+        if (thisLIR->opcode == kMipsNop ||
+            thisLIR->opcode == kMips32BitData ||
+            EncodingMap[thisLIR->opcode].flags & IS_BRANCH)
+            break;
+
+        /* don't reorder loads/stores (the alias info could
+           possibly be used to allow as a future enhancement) */
+        isLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
+        isStore = EncodingMap[thisLIR->opcode].flags & IS_STORE;
+
+        if (!(thisLIR->useMask & defMask) &&
+            !(thisLIR->defMask & useMask) &&
+            !(thisLIR->defMask & defMask) &&
+            !(isLoad && storeVisited) &&
+            !(isStore && loadVisited) &&
+            !(isStore && storeVisited)) {
+            *newLIR = *thisLIR;
+            thisLIR->flags.isNop = true;
+            return newLIR; /* move into delay slot succeeded */
+        }
+
+        loadVisited |= isLoad;
+        storeVisited |= isStore;
+
+        /* accumulate def/use constraints */
+        useMask |= thisLIR->useMask;
+        defMask |= thisLIR->defMask;
+    }
+
+    /* for unconditional branches try to copy the instruction at the
+       branch target up into the delay slot and adjust the branch */
+    if (branchLIR->opcode == kMipsB) {
+        MipsLIR *targetLIR;
+        for (targetLIR = (MipsLIR *) branchLIR->generic.target;
+             targetLIR;
+             targetLIR = NEXT_LIR(targetLIR)) {
+            if (!targetLIR->flags.isNop &&
+                (!isPseudoOpCode(targetLIR->opcode) || /* can't pull predicted up */
+                 targetLIR->opcode == kMipsPseudoChainingCellInvokePredicted))
+                break; /* try to get to next real op at branch target */
+        }
+        if (targetLIR && !isPseudoOpCode(targetLIR->opcode) &&
+            !(EncodingMap[targetLIR->opcode].flags & IS_BRANCH)) {
+            *newLIR = *targetLIR;
+            branchLIR->generic.target = (LIR *) NEXT_LIR(targetLIR);
+            return newLIR;
+        }
+    } else if (branchLIR->opcode >= kMipsBeq && branchLIR->opcode <= kMipsBne) {
+        /* for conditional branches try to fill branch delay slot
+           via speculative execution when safe */
+        MipsLIR *targetLIR;
+        for (targetLIR = (MipsLIR *) branchLIR->generic.target;
+             targetLIR;
+             targetLIR = NEXT_LIR(targetLIR)) {
+            if (!targetLIR->flags.isNop && !isPseudoOpCode(targetLIR->opcode))
+                break; /* try to get to next real op at branch target */
+        }
+
+        MipsLIR *nextLIR;
+        for (nextLIR = NEXT_LIR(branchLIR);
+             nextLIR;
+             nextLIR = NEXT_LIR(nextLIR)) {
+            if (!nextLIR->flags.isNop && !isPseudoOpCode(nextLIR->opcode))
+                break; /* try to get to next real op for fall thru */
+        }
+
+        if (nextLIR && targetLIR) {
+            int flags = EncodingMap[nextLIR->opcode].flags;
+            int isLoad = flags & IS_LOAD;
+
+            /* common branch and fall thru to normal chaining cells case */
+            if (isLoad && nextLIR->opcode == targetLIR->opcode &&
+                nextLIR->operands[0] == targetLIR->operands[0] &&
+                nextLIR->operands[1] == targetLIR->operands[1] &&
+                nextLIR->operands[2] == targetLIR->operands[2]) {
+                *newLIR = *targetLIR;
+                branchLIR->generic.target = (LIR *) NEXT_LIR(targetLIR);
+                return newLIR;
+            }
+
+            /* try prefetching (maybe try speculating instructions along the
+               trace like dalvik frame load which is common and may be safe) */
+            int isStore = flags & IS_STORE;
+            if (isLoad || isStore) {
+                newLIR->opcode = kMipsPref;
+                newLIR->operands[0] = isLoad ? 0 : 1;
+                newLIR->operands[1] = nextLIR->operands[1];
+                newLIR->operands[2] = nextLIR->operands[2];
+                newLIR->defMask = nextLIR->defMask;
+                newLIR->useMask = nextLIR->useMask;
+                return newLIR;
+            }
+        }
+    }
+
+    /* couldn't find a useful instruction to move into the delay slot */
+    newLIR->opcode = kMipsNop;
+    return newLIR;
+}
+
+/*
+ * The branch delay slot has been ignored thus far.  This is the point where
+ * a useful instruction is moved into it or a nop is inserted.  Leave existing
+ * NOPs alone -- these came from sparse and packed switch ops and are needed
+ * to maintain the proper offset to the jump table.
+ */
+static void introduceBranchDelaySlot(CompilationUnit *cUnit)
+{
+    MipsLIR *thisLIR;
+    MipsLIR *firstLIR =(MipsLIR *) cUnit->firstLIRInsn;
+    MipsLIR *lastLIR =(MipsLIR *) cUnit->lastLIRInsn;
+
+    for (thisLIR = lastLIR; thisLIR != firstLIR; thisLIR = PREV_LIR(thisLIR)) {
+        if (thisLIR->flags.isNop ||
+            isPseudoOpCode(thisLIR->opcode) ||
+            !(EncodingMap[thisLIR->opcode].flags & IS_BRANCH)) {
+            continue;
+        } else if (thisLIR == lastLIR) {
+            dvmCompilerAppendLIR(cUnit,
+                (LIR *) delaySlotLIR(firstLIR, thisLIR));
+        } else if (NEXT_LIR(thisLIR)->opcode != kMipsNop) {
+            dvmCompilerInsertLIRAfter((LIR *) thisLIR,
+                (LIR *) delaySlotLIR(firstLIR, thisLIR));
+        }
+    }
+
+    if (!thisLIR->flags.isNop &&
+        !isPseudoOpCode(thisLIR->opcode) &&
+        EncodingMap[thisLIR->opcode].flags & IS_BRANCH) {
+        /* nothing available to move, so insert nop */
+        MipsLIR *nopLIR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+        nopLIR->opcode = kMipsNop;
+        dvmCompilerInsertLIRAfter((LIR *) thisLIR, (LIR *) nopLIR);
+    }
+}
+
+void dvmCompilerApplyGlobalOptimizations(CompilationUnit *cUnit)
+{
+    applyRedundantBranchElimination(cUnit);
+    applyCopyPropagation(cUnit);
+#ifdef __mips_hard_float
+    mergeMovs(cUnit);
+#endif
+    introduceBranchDelaySlot(cUnit);
+}
diff --git a/vm/compiler/codegen/mips/LocalOptimizations.cpp b/vm/compiler/codegen/mips/LocalOptimizations.cpp
new file mode 100644
index 0000000..2ccd40d
--- /dev/null
+++ b/vm/compiler/codegen/mips/LocalOptimizations.cpp
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "vm/compiler/CompilerInternals.h"
+#include "MipsLIR.h"
+#include "Codegen.h"
+
+#define DEBUG_OPT(X)
+
+/* Check RAW, WAR, and WAR dependency on the register operands */
+#define CHECK_REG_DEP(use, def, check) ((def & check->useMask) || \
+                                        ((use | def) & check->defMask))
+
+/* Scheduler heuristics */
+#define MAX_HOIST_DISTANCE 20
+#define LDLD_DISTANCE 4
+#define LD_LATENCY 2
+
+static inline bool isDalvikRegisterClobbered(MipsLIR *lir1, MipsLIR *lir2)
+{
+    int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
+    int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
+    int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
+    int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
+
+    return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
+}
+
+#if 0
+/* Debugging utility routine */
+static void dumpDependentInsnPair(MipsLIR *thisLIR, MipsLIR *checkLIR,
+                                  const char *optimization)
+{
+    LOGD("************ %s ************", optimization);
+    dvmDumpLIRInsn((LIR *) thisLIR, 0);
+    dvmDumpLIRInsn((LIR *) checkLIR, 0);
+}
+#endif
+
+/* Convert a more expensive instruction (ie load) into a move */
+static void convertMemOpIntoMove(CompilationUnit *cUnit, MipsLIR *origLIR,
+                                 int dest, int src)
+{
+    /* Insert a move to replace the load */
+    MipsLIR *moveLIR;
+    moveLIR = dvmCompilerRegCopyNoInsert( cUnit, dest, src);
+    /*
+     * Insert the converted instruction after the original since the
+     * optimization is scannng in the top-down order and the new instruction
+     * will need to be re-checked (eg the new dest clobbers the src used in
+     * thisLIR).
+     */
+    dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) moveLIR);
+}
+
+/*
+ * Perform a pass of top-down walk, from the second-last instruction in the
+ * superblock, to eliminate redundant loads and stores.
+ *
+ * An earlier load can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * An earlier store can eliminate a later load iff
+ *   1) They are must-aliases
+ *   2) The native register is not clobbered in between
+ *   3) The memory location is not written to in between
+ *
+ * A later store can be eliminated by an earlier store iff
+ *   1) They are must-aliases
+ *   2) The memory location is not written to in between
+ */
+static void applyLoadStoreElimination(CompilationUnit *cUnit,
+                                      MipsLIR *headLIR,
+                                      MipsLIR *tailLIR)
+{
+    MipsLIR *thisLIR;
+
+    if (headLIR == tailLIR) return;
+
+    for (thisLIR = PREV_LIR(tailLIR);
+         thisLIR != headLIR;
+         thisLIR = PREV_LIR(thisLIR)) {
+        int sinkDistance = 0;
+
+        /* Skip non-interesting instructions */
+        if ((thisLIR->flags.isNop == true) ||
+            isPseudoOpCode(thisLIR->opcode) ||
+            !(EncodingMap[thisLIR->opcode].flags & (IS_LOAD | IS_STORE))) {
+            continue;
+        }
+
+        int nativeRegId = thisLIR->operands[0];
+        bool isThisLIRLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
+        MipsLIR *checkLIR;
+        /* Use the mem mask to determine the rough memory location */
+        u8 thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM;
+
+        /*
+         * Currently only eliminate redundant ld/st for constant and Dalvik
+         * register accesses.
+         */
+        if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+
+        /*
+         * Add r15 (pc) to the resource mask to prevent this instruction
+         * from sinking past branch instructions. Also take out the memory
+         * region bits since stopMask is used to check data/control
+         * dependencies.
+         */
+        u8 stopUseRegMask = (ENCODE_REG_PC | thisLIR->useMask) &
+                            ~ENCODE_MEM;
+        u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+
+        for (checkLIR = NEXT_LIR(thisLIR);
+             checkLIR != tailLIR;
+             checkLIR = NEXT_LIR(checkLIR)) {
+
+            /*
+             * Skip already dead instructions (whose dataflow information is
+             * outdated and misleading).
+             */
+            if (checkLIR->flags.isNop) continue;
+
+            u8 checkMemMask = (checkLIR->useMask | checkLIR->defMask) &
+                              ENCODE_MEM;
+            u8 aliasCondition = thisMemMask & checkMemMask;
+            bool stopHere = false;
+
+            /*
+             * Potential aliases seen - check the alias relations
+             */
+            if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+                bool isCheckLIRLoad = EncodingMap[checkLIR->opcode].flags &
+                                      IS_LOAD;
+                if  (aliasCondition == ENCODE_LITERAL) {
+                    /*
+                     * Should only see literal loads in the instruction
+                     * stream.
+                     */
+                    assert(!(EncodingMap[checkLIR->opcode].flags &
+                             IS_STORE));
+                    /* Same value && same register type */
+                    if (checkLIR->aliasInfo == thisLIR->aliasInfo &&
+                        REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId)){
+                        /*
+                         * Different destination register - insert
+                         * a move
+                         */
+                        if (checkLIR->operands[0] != nativeRegId) {
+                            convertMemOpIntoMove(cUnit, checkLIR,
+                                                 checkLIR->operands[0],
+                                                 nativeRegId);
+                        }
+                        checkLIR->flags.isNop = true;
+                    }
+                } else if (aliasCondition == ENCODE_DALVIK_REG) {
+                    /* Must alias */
+                    if (checkLIR->aliasInfo == thisLIR->aliasInfo) {
+                        /* Only optimize compatible registers */
+                        bool regCompatible =
+                            REGTYPE(checkLIR->operands[0]) ==
+                            REGTYPE(nativeRegId);
+                        if ((isThisLIRLoad && isCheckLIRLoad) ||
+                            (!isThisLIRLoad && isCheckLIRLoad)) {
+                            /* RAR or RAW */
+                            if (regCompatible) {
+                                /*
+                                 * Different destination register -
+                                 * insert a move
+                                 */
+                                if (checkLIR->operands[0] !=
+                                    nativeRegId) {
+                                    convertMemOpIntoMove(cUnit,
+                                                 checkLIR,
+                                                 checkLIR->operands[0],
+                                                 nativeRegId);
+                                }
+                                checkLIR->flags.isNop = true;
+                            } else {
+                                /*
+                                 * Destinaions are of different types -
+                                 * something complicated going on so
+                                 * stop looking now.
+                                 */
+                                stopHere = true;
+                            }
+                        } else if (isThisLIRLoad && !isCheckLIRLoad) {
+                            /* WAR - register value is killed */
+                            stopHere = true;
+                        } else if (!isThisLIRLoad && !isCheckLIRLoad) {
+                            /* WAW - nuke the earlier store */
+                            thisLIR->flags.isNop = true;
+                            stopHere = true;
+                        }
+                    /* Partial overlap */
+                    } else if (isDalvikRegisterClobbered(thisLIR, checkLIR)) {
+                        /*
+                         * It is actually ok to continue if checkLIR
+                         * is a read. But it is hard to make a test
+                         * case for this so we just stop here to be
+                         * conservative.
+                         */
+                        stopHere = true;
+                    }
+                }
+                /* Memory content may be updated. Stop looking now. */
+                if (stopHere) {
+                    break;
+                /* The checkLIR has been transformed - check the next one */
+                } else if (checkLIR->flags.isNop) {
+                    continue;
+                }
+            }
+
+
+            /*
+             * this and check LIRs have no memory dependency. Now check if
+             * their register operands have any RAW, WAR, and WAW
+             * dependencies. If so, stop looking.
+             */
+            if (stopHere == false) {
+                stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
+                                         checkLIR);
+            }
+
+            if (stopHere == true) {
+                DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
+                                                "REG CLOBBERED"));
+                /* Only sink store instructions */
+                if (sinkDistance && !isThisLIRLoad) {
+                    MipsLIR *newStoreLIR =
+                        (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+                    *newStoreLIR = *thisLIR;
+                    /*
+                     * Stop point found - insert *before* the checkLIR
+                     * since the instruction list is scanned in the
+                     * top-down order.
+                     */
+                    dvmCompilerInsertLIRBefore((LIR *) checkLIR,
+                                               (LIR *) newStoreLIR);
+                    thisLIR->flags.isNop = true;
+                }
+                break;
+            } else if (!checkLIR->flags.isNop) {
+                sinkDistance++;
+            }
+        }
+    }
+}
+
+/*
+ * Perform a pass of bottom-up walk, from the second instruction in the
+ * superblock, to try to hoist loads to earlier slots.
+ */
+static void applyLoadHoisting(CompilationUnit *cUnit,
+                              MipsLIR *headLIR,
+                              MipsLIR *tailLIR)
+{
+    MipsLIR *thisLIR, *checkLIR;
+    /*
+     * Store the list of independent instructions that can be hoisted past.
+     * Will decide the best place to insert later.
+     */
+    MipsLIR *prevInstList[MAX_HOIST_DISTANCE];
+
+    /* Empty block */
+    if (headLIR == tailLIR) return;
+
+    /* Start from the second instruction */
+    for (thisLIR = NEXT_LIR(headLIR);
+         thisLIR != tailLIR;
+         thisLIR = NEXT_LIR(thisLIR)) {
+
+        /* Skip non-interesting instructions */
+        if ((thisLIR->flags.isNop == true) ||
+            isPseudoOpCode(thisLIR->opcode) ||
+            !(EncodingMap[thisLIR->opcode].flags & IS_LOAD)) {
+            continue;
+        }
+
+        u8 stopUseAllMask = thisLIR->useMask;
+
+        /*
+         * Branches for null/range checks are marked with the true resource
+         * bits, and loads to Dalvik registers, constant pools, and non-alias
+         * locations are safe to be hoisted. So only mark the heap references
+         * conservatively here.
+         */
+        if (stopUseAllMask & ENCODE_HEAP_REF) {
+            stopUseAllMask |= ENCODE_REG_PC;
+        }
+
+        /* Similar as above, but just check for pure register dependency */
+        u8 stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
+        u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+
+        int nextSlot = 0;
+        bool stopHere = false;
+
+        /* Try to hoist the load to a good spot */
+        for (checkLIR = PREV_LIR(thisLIR);
+             checkLIR != headLIR;
+             checkLIR = PREV_LIR(checkLIR)) {
+
+            /*
+             * Skip already dead instructions (whose dataflow information is
+             * outdated and misleading).
+             */
+            if (checkLIR->flags.isNop) continue;
+
+            u8 checkMemMask = checkLIR->defMask & ENCODE_MEM;
+            u8 aliasCondition = stopUseAllMask & checkMemMask;
+            stopHere = false;
+
+            /* Potential WAR alias seen - check the exact relation */
+            if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+                /* We can fully disambiguate Dalvik references */
+                if (aliasCondition == ENCODE_DALVIK_REG) {
+                    /* Must alias or partually overlap */
+                    if ((checkLIR->aliasInfo == thisLIR->aliasInfo) ||
+                        isDalvikRegisterClobbered(thisLIR, checkLIR)) {
+                        stopHere = true;
+                    }
+                /* Conservatively treat all heap refs as may-alias */
+                } else {
+                    assert(aliasCondition == ENCODE_HEAP_REF);
+                    stopHere = true;
+                }
+                /* Memory content may be updated. Stop looking now. */
+                if (stopHere) {
+                    prevInstList[nextSlot++] = checkLIR;
+                    break;
+                }
+            }
+
+            if (stopHere == false) {
+                stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
+                                         checkLIR);
+            }
+
+            /*
+             * Store the dependent or non-pseudo/indepedent instruction to the
+             * list.
+             */
+            if (stopHere || !isPseudoOpCode(checkLIR->opcode)) {
+                prevInstList[nextSlot++] = checkLIR;
+                if (nextSlot == MAX_HOIST_DISTANCE) break;
+            }
+
+            /* Found a new place to put the load - move it here */
+            if (stopHere == true) {
+                DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR
+                                                "HOIST STOP"));
+                break;
+            }
+        }
+
+        /*
+         * Reached the top - use headLIR as the dependent marker as all labels
+         * are barriers.
+         */
+        if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) {
+            prevInstList[nextSlot++] = headLIR;
+        }
+
+        /*
+         * At least one independent instruction is found. Scan in the reversed
+         * direction to find a beneficial slot.
+         */
+        if (nextSlot >= 2) {
+            int firstSlot = nextSlot - 2;
+            int slot;
+            MipsLIR *depLIR = prevInstList[nextSlot-1];
+            /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
+            if (!isPseudoOpCode(depLIR->opcode) &&
+                (EncodingMap[depLIR->opcode].flags & IS_LOAD)) {
+                firstSlot -= LDLD_DISTANCE;
+            }
+            /*
+             * Make sure we check slot >= 0 since firstSlot may be negative
+             * when the loop is first entered.
+             */
+            for (slot = firstSlot; slot >= 0; slot--) {
+                MipsLIR *curLIR = prevInstList[slot];
+                MipsLIR *prevLIR = prevInstList[slot+1];
+
+                /* Check the highest instruction */
+                if (prevLIR->defMask == ENCODE_ALL) {
+                    /*
+                     * If the first instruction is a load, don't hoist anything
+                     * above it since it is unlikely to be beneficial.
+                     */
+                    if (EncodingMap[curLIR->opcode].flags & IS_LOAD) continue;
+                    /*
+                     * If the remaining number of slots is less than LD_LATENCY,
+                     * insert the hoisted load here.
+                     */
+                    if (slot < LD_LATENCY) break;
+                }
+
+                /*
+                 * NOTE: now prevLIR is guaranteed to be a non-pseudo
+                 * instruction (ie accessing EncodingMap[prevLIR->opcode] is
+                 * safe).
+                 *
+                 * Try to find two instructions with load/use dependency until
+                 * the remaining instructions are less than LD_LATENCY.
+                 */
+                if (((curLIR->useMask & prevLIR->defMask) &&
+                     (EncodingMap[prevLIR->opcode].flags & IS_LOAD)) ||
+                    (slot < LD_LATENCY)) {
+                    break;
+                }
+            }
+
+            /* Found a slot to hoist to */
+            if (slot >= 0) {
+                MipsLIR *curLIR = prevInstList[slot];
+                MipsLIR *newLoadLIR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR),
+                                                               true);
+                *newLoadLIR = *thisLIR;
+                /*
+                 * Insertion is guaranteed to succeed since checkLIR
+                 * is never the first LIR on the list
+                 */
+                dvmCompilerInsertLIRBefore((LIR *) curLIR,
+                                           (LIR *) newLoadLIR);
+                thisLIR->flags.isNop = true;
+            }
+        }
+    }
+}
+
+void dvmCompilerApplyLocalOptimizations(CompilationUnit *cUnit, LIR *headLIR,
+                                        LIR *tailLIR)
+{
+    if (!(gDvmJit.disableOpt & (1 << kLoadStoreElimination))) {
+        applyLoadStoreElimination(cUnit, (MipsLIR *) headLIR,
+                                  (MipsLIR *) tailLIR);
+    }
+    if (!(gDvmJit.disableOpt & (1 << kLoadHoisting))) {
+        applyLoadHoisting(cUnit, (MipsLIR *) headLIR, (MipsLIR *) tailLIR);
+    }
+}
diff --git a/vm/compiler/codegen/mips/Mips32/Factory.cpp b/vm/compiler/codegen/mips/Mips32/Factory.cpp
new file mode 100644
index 0000000..9a158b4
--- /dev/null
+++ b/vm/compiler/codegen/mips/Mips32/Factory.cpp
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen for the Thumb ISA and is intended to be
+ * includes by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+static int coreTemps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
+                          r_T3, r_T4, r_T5, r_T6, r_T7, r_T8, r_T9, r_S0, r_S4};
+#ifdef __mips_hard_float
+static int fpTemps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+                        r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+#endif
+
+static void storePair(CompilationUnit *cUnit, int base, int lowReg,
+                      int highReg);
+static void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg);
+static MipsLIR *loadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
+                            int rDest);
+static MipsLIR *storeWordDisp(CompilationUnit *cUnit, int rBase,
+                             int displacement, int rSrc);
+static MipsLIR *genRegRegCheck(CompilationUnit *cUnit,
+                              MipsConditionCode cond,
+                              int reg1, int reg2, int dOffset,
+                              MipsLIR *pcrLabel);
+static MipsLIR *loadConstant(CompilationUnit *cUnit, int rDest, int value);
+
+#ifdef __mips_hard_float
+static MipsLIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+    MipsLIR* res = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    res->operands[0] = rDest;
+    res->operands[1] = rSrc;
+    if (rDest == rSrc) {
+        res->flags.isNop = true;
+    } else {
+        /* must be both DOUBLE or both not DOUBLE */
+        assert(DOUBLEREG(rDest) == DOUBLEREG(rSrc));
+        if (DOUBLEREG(rDest)) {
+            res->opcode = kMipsFmovd;
+        } else {
+            if (SINGLEREG(rDest)) {
+                if (SINGLEREG(rSrc)) {
+                    res->opcode = kMipsFmovs;
+                } else {
+                    /* note the operands are swapped for the mtc1 instr */
+                    res->opcode = kMipsMtc1;
+                    res->operands[0] = rSrc;
+                    res->operands[1] = rDest;
+                }
+            } else {
+                assert(SINGLEREG(rSrc));
+                res->opcode = kMipsMfc1;
+            }
+        }
+    }
+    setupResourceMasks(res);
+    return res;
+}
+#endif
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.  If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) rDest is freshly returned from dvmCompilerAllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+static MipsLIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest,
+                                     int value)
+{
+    MipsLIR *res;
+
+#ifdef __mips_hard_float
+    int rDestSave = rDest;
+    int isFpReg = FPREG(rDest);
+    if (isFpReg) {
+        assert(SINGLEREG(rDest));
+        rDest = dvmCompilerAllocTemp(cUnit);
+    }
+#endif
+
+    /* See if the value can be constructed cheaply */
+    if (value == 0) {
+        res = newLIR2(cUnit, kMipsMove, rDest, r_ZERO);
+    } else if ((value > 0) && (value <= 65535)) {
+        res = newLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
+    } else if ((value < 0) && (value >= -32768)) {
+        res = newLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
+    } else {
+        res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
+        if (value & 0xffff)
+	    newLIR3(cUnit, kMipsOri, rDest, rDest, value);
+    }
+
+#ifdef __mips_hard_float
+    if (isFpReg) {
+        newLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
+        dvmCompilerFreeTemp(cUnit, rDest);
+    }
+#endif
+
+    return res;
+}
+
+/*
+ * Load an immediate value into a fixed or temp register.  Target
+ * register is clobbered, and marked inUse.
+ */
+static MipsLIR *loadConstant(CompilationUnit *cUnit, int rDest, int value)
+{
+    if (dvmCompilerIsTemp(cUnit, rDest)) {
+        dvmCompilerClobber(cUnit, rDest);
+        dvmCompilerMarkInUse(cUnit, rDest);
+    }
+    return loadConstantNoClobber(cUnit, rDest, value);
+}
+
+/*
+ * Load a class pointer value into a fixed or temp register.  Target
+ * register is clobbered, and marked inUse.
+ */
+static MipsLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value)
+{
+    MipsLIR *res;
+    if (dvmCompilerIsTemp(cUnit, rDest)) {
+        dvmCompilerClobber(cUnit, rDest);
+        dvmCompilerMarkInUse(cUnit, rDest);
+    }
+    res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
+    if (value & 0xffff)
+        newLIR3(cUnit, kMipsOri, rDest, rDest, value);
+    return res;
+}
+
+static MipsLIR *opNone(CompilationUnit *cUnit, OpKind op)
+{
+    MipsLIR *res;
+    MipsOpCode opcode = kMipsNop;
+    switch (op) {
+        case kOpUncondBr:
+            opcode = kMipsB;
+            break;
+        default:
+            LOGE("Jit: bad case in opNone");
+            dvmCompilerAbort(cUnit);
+    }
+    res = newLIR0(cUnit, opcode);
+    return res;
+}
+
+static MipsLIR *opCompareBranch(CompilationUnit *cUnit, MipsOpCode opc, int rs, int rt)
+{
+    MipsLIR *res;
+    if (rt < 0) {
+      assert(opc >= kMipsBeqz && opc <= kMipsBnez);
+      res = newLIR1(cUnit, opc, rs);
+    } else  {
+      assert(opc == kMipsBeq || opc == kMipsBne);
+      res = newLIR2(cUnit, opc, rs, rt);
+    }
+    return res;
+}
+
+static MipsLIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
+
+static MipsLIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+{
+    MipsOpCode opcode = kMipsNop;
+    switch (op) {
+        case kOpBlx:
+            opcode = kMipsJalr;
+            break;
+        default:
+            assert(0);
+    }
+    return newLIR2(cUnit, opcode, r_RA, rDestSrc);
+}
+
+static MipsLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
+                           int rSrc1, int value);
+static MipsLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+                        int value)
+{
+    MipsLIR *res;
+    bool neg = (value < 0);
+    int absValue = (neg) ? -value : value;
+    bool shortForm = (absValue & 0xff) == absValue;
+    MipsOpCode opcode = kMipsNop;
+    switch (op) {
+        case kOpAdd:
+            return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+            break;
+        case kOpSub:
+            return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+            break;
+        default:
+            LOGE("Jit: bad case in opRegImm");
+            dvmCompilerAbort(cUnit);
+            break;
+    }
+    if (shortForm)
+        res = newLIR2(cUnit, opcode, rDestSrc1, absValue);
+    else {
+        int rScratch = dvmCompilerAllocTemp(cUnit);
+        res = loadConstant(cUnit, rScratch, value);
+        if (op == kOpCmp)
+            newLIR2(cUnit, opcode, rDestSrc1, rScratch);
+        else
+            newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rScratch);
+    }
+    return res;
+}
+
+static MipsLIR *opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest,
+                           int rSrc1, int rSrc2)
+{
+    MipsOpCode opcode = kMipsNop;
+    switch (op) {
+        case kOpAdd:
+            opcode = kMipsAddu;
+            break;
+        case kOpSub:
+            opcode = kMipsSubu;
+            break;
+        case kOpAnd:
+            opcode = kMipsAnd;
+            break;
+        case kOpMul:
+            opcode = kMipsMul;
+            break;
+        case kOpOr:
+            opcode = kMipsOr;
+            break;
+        case kOpXor:
+            opcode = kMipsXor;
+            break;
+        case kOpLsl:
+            opcode = kMipsSllv;
+            break;
+        case kOpLsr:
+            opcode = kMipsSrlv;
+            break;
+        case kOpAsr:
+            opcode = kMipsSrav;
+            break;
+        default:
+            LOGE("Jit: bad case in opRegRegReg");
+            dvmCompilerAbort(cUnit);
+            break;
+    }
+    return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+}
+
+static MipsLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
+                           int rSrc1, int value)
+{
+    MipsLIR *res;
+    MipsOpCode opcode = kMipsNop;
+    bool shortForm = true;
+
+    switch(op) {
+        case kOpAdd:
+            if (IS_SIMM16(value)) {
+                opcode = kMipsAddiu;
+            }
+            else {
+                shortForm = false;
+                opcode = kMipsAddu;
+            }
+            break;
+        case kOpSub:
+            if (IS_SIMM16((-value))) {
+                value = -value;
+                opcode = kMipsAddiu;
+            }
+            else {
+                shortForm = false;
+                opcode = kMipsSubu;
+            }
+            break;
+        case kOpLsl:
+                assert(value >= 0 && value <= 31);
+                opcode = kMipsSll;
+                break;
+        case kOpLsr:
+                assert(value >= 0 && value <= 31);
+                opcode = kMipsSrl;
+                break;
+        case kOpAsr:
+                assert(value >= 0 && value <= 31);
+                opcode = kMipsSra;
+                break;
+        case kOpAnd:
+            if (IS_UIMM16((value))) {
+                opcode = kMipsAndi;
+            }
+            else {
+                shortForm = false;
+                opcode = kMipsAnd;
+            }
+            break;
+        case kOpOr:
+            if (IS_UIMM16((value))) {
+                opcode = kMipsOri;
+            }
+            else {
+                shortForm = false;
+                opcode = kMipsOr;
+            }
+            break;
+        case kOpXor:
+            if (IS_UIMM16((value))) {
+                opcode = kMipsXori;
+            }
+            else {
+                shortForm = false;
+                opcode = kMipsXor;
+            }
+            break;
+        case kOpMul:
+            shortForm = false;
+            opcode = kMipsMul;
+            break;
+        default:
+            LOGE("Jit: bad case in opRegRegImm");
+            dvmCompilerAbort(cUnit);
+            break;
+    }
+
+    if (shortForm)
+        res = newLIR3(cUnit, opcode, rDest, rSrc1, value);
+    else {
+        if (rDest != rSrc1) {
+            res = loadConstant(cUnit, rDest, value);
+            newLIR3(cUnit, opcode, rDest, rSrc1, rDest);
+        } else {
+            int rScratch = dvmCompilerAllocTemp(cUnit);
+            res = loadConstant(cUnit, rScratch, value);
+            newLIR3(cUnit, opcode, rDest, rSrc1, rScratch);
+        }
+    }
+    return res;
+}
+
+static MipsLIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
+                        int rSrc2)
+{
+    MipsOpCode opcode = kMipsNop;
+    MipsLIR *res;
+    switch (op) {
+        case kOpMov:
+            opcode = kMipsMove;
+            break;
+        case kOpMvn:
+            return newLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
+        case kOpNeg:
+            return newLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
+        case kOpAdd:
+        case kOpAnd:
+        case kOpMul:
+        case kOpOr:
+        case kOpSub:
+        case kOpXor:
+            return opRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
+        case kOp2Byte:
+#if __mips_isa_rev>=2
+            res = newLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
+#else
+            res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
+            opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
+#endif
+            return res;
+        case kOp2Short:
+#if __mips_isa_rev>=2
+            res = newLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
+#else
+            res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
+            opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
+#endif
+            return res;
+        case kOp2Char:
+             return newLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
+        default:
+            LOGE("Jit: bad case in opRegReg");
+            dvmCompilerAbort(cUnit);
+            break;
+    }
+    return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+}
+
+static MipsLIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
+                                     int rDestHi, int valLo, int valHi)
+{
+    MipsLIR *res;
+    res = loadConstantNoClobber(cUnit, rDestLo, valLo);
+    loadConstantNoClobber(cUnit, rDestHi, valHi);
+    return res;
+}
+
+/* Load value from base + scaled index. */
+static MipsLIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
+                               int rIndex, int rDest, int scale, OpSize size)
+{
+    MipsLIR *first = NULL;
+    MipsLIR *res;
+    MipsOpCode opcode = kMipsNop;
+    int tReg = dvmCompilerAllocTemp(cUnit);
+
+#ifdef __mips_hard_float
+    if (FPREG(rDest)) {
+        assert(SINGLEREG(rDest));
+        assert((size == kWord) || (size == kSingle));
+        size = kSingle;
+    } else {
+        if (size == kSingle)
+            size = kWord;
+    }
+#endif
+
+    if (!scale) {
+        first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+    } else {
+        first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+        newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+    }
+
+    switch (size) {
+#ifdef __mips_hard_float
+        case kSingle:
+            opcode = kMipsFlwc1;
+            break;
+#endif
+        case kWord:
+            opcode = kMipsLw;
+            break;
+        case kUnsignedHalf:
+            opcode = kMipsLhu;
+            break;
+        case kSignedHalf:
+            opcode = kMipsLh;
+            break;
+        case kUnsignedByte:
+            opcode = kMipsLbu;
+            break;
+        case kSignedByte:
+            opcode = kMipsLb;
+            break;
+        default:
+            LOGE("Jit: bad case in loadBaseIndexed");
+            dvmCompilerAbort(cUnit);
+    }
+
+    res = newLIR3(cUnit, opcode, rDest, 0, tReg);
+#if defined(WITH_SELF_VERIFICATION)
+    if (cUnit->heapMemOp)
+        res->flags.insertWrapper = true;
+#endif
+    dvmCompilerFreeTemp(cUnit, tReg);
+    return (first) ? first : res;
+}
+
+/* store value base base + scaled index. */
+static MipsLIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase,
+                                int rIndex, int rSrc, int scale, OpSize size)
+{
+    MipsLIR *first = NULL;
+    MipsLIR *res;
+    MipsOpCode opcode = kMipsNop;
+    int rNewIndex = rIndex;
+    int tReg = dvmCompilerAllocTemp(cUnit);
+
+#ifdef __mips_hard_float
+    if (FPREG(rSrc)) {
+        assert(SINGLEREG(rSrc));
+        assert((size == kWord) || (size == kSingle));
+        size = kSingle;
+    } else {
+        if (size == kSingle)
+            size = kWord;
+    }
+#endif
+
+    if (!scale) {
+        first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+    } else {
+        first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+        newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+    }
+
+    switch (size) {
+#ifdef __mips_hard_float
+        case kSingle:
+            opcode = kMipsFswc1;
+            break;
+#endif
+        case kWord:
+            opcode = kMipsSw;
+            break;
+        case kUnsignedHalf:
+        case kSignedHalf:
+            opcode = kMipsSh;
+            break;
+        case kUnsignedByte:
+        case kSignedByte:
+            opcode = kMipsSb;
+            break;
+        default:
+            LOGE("Jit: bad case in storeBaseIndexed");
+            dvmCompilerAbort(cUnit);
+    }
+    res = newLIR3(cUnit, opcode, rSrc, 0, tReg);
+#if defined(WITH_SELF_VERIFICATION)
+    if (cUnit->heapMemOp)
+        res->flags.insertWrapper = true;
+#endif
+    dvmCompilerFreeTemp(cUnit, rNewIndex);
+    return first;
+}
+
+static MipsLIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+{
+    int i;
+    int loadCnt = 0;
+    MipsLIR *res = NULL ;
+    genBarrier(cUnit);
+
+    for (i = 0; i < 8; i++, rMask >>= 1) {
+        if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+            newLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
+            loadCnt++;
+        }
+    }
+
+    if (loadCnt) {/* increment after */
+        newLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
+    }
+
+#if defined(WITH_SELF_VERIFICATION)
+    if (cUnit->heapMemOp)
+        res->flags.insertWrapper = true;
+#endif
+    genBarrier(cUnit);
+    return res; /* NULL always returned which should be ok since no callers use it */
+}
+
+static MipsLIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask)
+{
+    int i;
+    int storeCnt = 0;
+    MipsLIR *res = NULL ;
+    genBarrier(cUnit);
+
+    for (i = 0; i < 8; i++, rMask >>= 1) {
+        if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+            newLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
+            storeCnt++;
+        }
+    }
+
+    if (storeCnt) { /* increment after */
+        newLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
+    }
+
+#if defined(WITH_SELF_VERIFICATION)
+    if (cUnit->heapMemOp)
+        res->flags.insertWrapper = true;
+#endif
+    genBarrier(cUnit);
+    return res; /* NULL always returned which should be ok since no callers use it */
+}
+
+static MipsLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
+                                int displacement, int rDest, int rDestHi,
+                                OpSize size, int sReg)
+/*
+ * Load value from base + displacement.  Optionally perform null check
+ * on base (which must have an associated sReg and MIR).  If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps.  If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+{
+    MipsLIR *res;
+    MipsLIR *load = NULL;
+    MipsLIR *load2 = NULL;
+    MipsOpCode opcode = kMipsNop;
+    bool shortForm = IS_SIMM16(displacement);
+    bool pair = false;
+
+    switch (size) {
+        case kLong:
+        case kDouble:
+            pair = true;
+            opcode = kMipsLw;
+#ifdef __mips_hard_float
+            if (FPREG(rDest)) {
+                opcode = kMipsFlwc1;
+                if (DOUBLEREG(rDest)) {
+                    rDest = rDest - FP_DOUBLE;
+                } else {
+                    assert(FPREG(rDestHi));
+                    assert(rDest == (rDestHi - 1));
+                }
+                rDestHi = rDest + 1;
+            }
+#endif
+            shortForm = IS_SIMM16_2WORD(displacement);
+            assert((displacement & 0x3) == 0);
+            break;
+        case kWord:
+        case kSingle:
+            opcode = kMipsLw;
+#ifdef __mips_hard_float
+            if (FPREG(rDest)) {
+                opcode = kMipsFlwc1;
+                assert(SINGLEREG(rDest));
+            }
+#endif
+            assert((displacement & 0x3) == 0);
+            break;
+        case kUnsignedHalf:
+            opcode = kMipsLhu;
+            assert((displacement & 0x1) == 0);
+            break;
+        case kSignedHalf:
+            opcode = kMipsLh;
+            assert((displacement & 0x1) == 0);
+            break;
+        case kUnsignedByte:
+            opcode = kMipsLbu;
+            break;
+        case kSignedByte:
+            opcode = kMipsLb;
+            break;
+        default:
+            LOGE("Jit: bad case in loadBaseIndexedBody");
+            dvmCompilerAbort(cUnit);
+    }
+
+    if (shortForm) {
+        if (!pair) {
+            load = res = newLIR3(cUnit, opcode, rDest, displacement, rBase);
+        } else {
+            load = res = newLIR3(cUnit, opcode, rDest, displacement + LOWORD_OFFSET, rBase);
+            load2 = newLIR3(cUnit, opcode, rDestHi, displacement + HIWORD_OFFSET, rBase);
+        }
+    } else {
+        if (pair) {
+            int rTmp = dvmCompilerAllocFreeTemp(cUnit);
+            res = opRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
+            load = newLIR3(cUnit, opcode, rDest, LOWORD_OFFSET, rTmp);
+            load2 = newLIR3(cUnit, opcode, rDestHi, HIWORD_OFFSET, rTmp);
+            dvmCompilerFreeTemp(cUnit, rTmp);
+        } else {
+            int rTmp = (rBase == rDest) ? dvmCompilerAllocFreeTemp(cUnit)
+                                        : rDest;
+            res = loadConstant(cUnit, rTmp, displacement);
+            load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
+            if (rTmp != rDest)
+                dvmCompilerFreeTemp(cUnit, rTmp);
+        }
+    }
+
+    if (rBase == rFP) {
+        if (load != NULL)
+            annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                                    true /* isLoad */);
+        if (load2 != NULL)
+            annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+                                    true /* isLoad */);
+    }
+#if defined(WITH_SELF_VERIFICATION)
+    if (load != NULL && cUnit->heapMemOp)
+        load->flags.insertWrapper = true;
+    if (load2 != NULL && cUnit->heapMemOp)
+        load2->flags.insertWrapper = true;
+#endif
+    return load;
+}
+
+static MipsLIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir, int rBase,
+                            int displacement, int rDest, OpSize size,
+                            int sReg)
+{
+    return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1,
+                            size, sReg);
+}
+
+static MipsLIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir, int rBase,
+                                int displacement, int rDestLo, int rDestHi,
+                                int sReg)
+{
+    return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
+                            kLong, sReg);
+}
+
+static MipsLIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase,
+                                 int displacement, int rSrc, int rSrcHi,
+                                 OpSize size)
+{
+    MipsLIR *res;
+    MipsLIR *store = NULL;
+    MipsLIR *store2 = NULL;
+    MipsOpCode opcode = kMipsNop;
+    bool shortForm = IS_SIMM16(displacement);
+    bool pair = false;
+
+    switch (size) {
+        case kLong:
+        case kDouble:
+            pair = true;
+            opcode = kMipsSw;
+#ifdef __mips_hard_float
+            if (FPREG(rSrc)) {
+                opcode = kMipsFswc1;
+                if (DOUBLEREG(rSrc)) {
+                    rSrc = rSrc - FP_DOUBLE;
+                } else {
+                    assert(FPREG(rSrcHi));
+                    assert(rSrc == (rSrcHi - 1));
+                }
+                rSrcHi = rSrc + 1;
+            }
+#endif
+            shortForm = IS_SIMM16_2WORD(displacement);
+            assert((displacement & 0x3) == 0);
+            break;
+        case kWord:
+        case kSingle:
+            opcode = kMipsSw;
+#ifdef __mips_hard_float
+            if (FPREG(rSrc)) {
+                opcode = kMipsFswc1;
+                assert(SINGLEREG(rSrc));
+            }
+#endif
+            assert((displacement & 0x3) == 0);
+            break;
+        case kUnsignedHalf:
+        case kSignedHalf:
+            opcode = kMipsSh;
+            assert((displacement & 0x1) == 0);
+            break;
+        case kUnsignedByte:
+        case kSignedByte:
+            opcode = kMipsSb;
+            break;
+        default:
+            LOGE("Jit: bad case in storeBaseIndexedBody");
+            dvmCompilerAbort(cUnit);
+    }
+
+    if (shortForm) {
+        if (!pair) {
+            store = res = newLIR3(cUnit, opcode, rSrc, displacement, rBase);
+        } else {
+            store = res = newLIR3(cUnit, opcode, rSrc, displacement + LOWORD_OFFSET, rBase);
+            store2 = newLIR3(cUnit, opcode, rSrcHi, displacement + HIWORD_OFFSET, rBase);
+        }
+    } else {
+        int rScratch = dvmCompilerAllocTemp(cUnit);
+        res = opRegRegImm(cUnit, kOpAdd, rScratch, rBase, displacement);
+        if (!pair) {
+            store =  newLIR3(cUnit, opcode, rSrc, 0, rScratch);
+        } else {
+            store =  newLIR3(cUnit, opcode, rSrc, LOWORD_OFFSET, rScratch);
+            store2 = newLIR3(cUnit, opcode, rSrcHi, HIWORD_OFFSET, rScratch);
+        }
+        dvmCompilerFreeTemp(cUnit, rScratch);
+    }
+
+    if (rBase == rFP) {
+        if (store != NULL)
+            annotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                                    false /* isLoad */);
+        if (store2 != NULL)
+            annotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+                                    false /* isLoad */);
+    }
+
+#if defined(WITH_SELF_VERIFICATION)
+    if (store != NULL && cUnit->heapMemOp)
+        store->flags.insertWrapper = true;
+    if (store2 != NULL && cUnit->heapMemOp)
+        store2->flags.insertWrapper = true;
+#endif
+    return res;
+}
+
+static MipsLIR *storeBaseDisp(CompilationUnit *cUnit, int rBase,
+                             int displacement, int rSrc, OpSize size)
+{
+    return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+}
+
+static MipsLIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase,
+                                 int displacement, int rSrcLo, int rSrcHi)
+{
+    return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+}
+
+static void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+{
+    storeWordDisp(cUnit, base, LOWORD_OFFSET, lowReg);
+    storeWordDisp(cUnit, base, HIWORD_OFFSET, highReg);
+}
+
+static void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
+{
+    loadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
+    loadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
+}
+
+static MipsLIR* genRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+    MipsLIR* res;
+    MipsOpCode opcode;
+#ifdef __mips_hard_float
+    if (FPREG(rDest) || FPREG(rSrc))
+        return fpRegCopy(cUnit, rDest, rSrc);
+#endif
+    res = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    opcode = kMipsMove;
+    assert(LOWREG(rDest) && LOWREG(rSrc));
+    res->operands[0] = rDest;
+    res->operands[1] = rSrc;
+    res->opcode = opcode;
+    setupResourceMasks(res);
+    if (rDest == rSrc) {
+        res->flags.isNop = true;
+    }
+    return res;
+}
+
+static MipsLIR* genRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+    MipsLIR *res = genRegCopyNoInsert(cUnit, rDest, rSrc);
+    dvmCompilerAppendLIR(cUnit, (LIR*)res);
+    return res;
+}
+
+static void genRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
+                           int srcLo, int srcHi)
+{
+#ifdef __mips_hard_float
+    bool destFP = FPREG(destLo) && FPREG(destHi);
+    bool srcFP = FPREG(srcLo) && FPREG(srcHi);
+    assert(FPREG(srcLo) == FPREG(srcHi));
+    assert(FPREG(destLo) == FPREG(destHi));
+    if (destFP) {
+        if (srcFP) {
+            genRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
+        } else {
+           /* note the operands are swapped for the mtc1 instr */
+            newLIR2(cUnit, kMipsMtc1, srcLo, destLo);
+            newLIR2(cUnit, kMipsMtc1, srcHi, destHi);
+        }
+    } else {
+        if (srcFP) {
+            newLIR2(cUnit, kMipsMfc1, destLo, srcLo);
+            newLIR2(cUnit, kMipsMfc1, destHi, srcHi);
+        } else {
+            // Handle overlap
+            if (srcHi == destLo) {
+                genRegCopy(cUnit, destHi, srcHi);
+                genRegCopy(cUnit, destLo, srcLo);
+            } else {
+                genRegCopy(cUnit, destLo, srcLo);
+                genRegCopy(cUnit, destHi, srcHi);
+            }
+        }
+    }
+#else
+    // Handle overlap
+    if (srcHi == destLo) {
+        genRegCopy(cUnit, destHi, srcHi);
+        genRegCopy(cUnit, destLo, srcLo);
+    } else {
+        genRegCopy(cUnit, destLo, srcLo);
+        genRegCopy(cUnit, destHi, srcHi);
+    }
+#endif
+}
+
+static inline MipsLIR *genRegImmCheck(CompilationUnit *cUnit,
+                                     MipsConditionCode cond, int reg,
+                                     int checkValue, int dOffset,
+                                     MipsLIR *pcrLabel)
+{
+    MipsLIR *branch = NULL;
+
+    if (checkValue == 0) {
+        MipsOpCode opc = kMipsNop;
+        if (cond == kMipsCondEq) {
+            opc = kMipsBeqz;
+	} else if (cond == kMipsCondNe) {
+            opc = kMipsBnez;
+        } else if (cond == kMipsCondLt || cond == kMipsCondMi) {
+            opc = kMipsBltz;
+        } else if (cond == kMipsCondLe) {
+            opc = kMipsBlez;
+        } else if (cond == kMipsCondGt) {
+            opc = kMipsBgtz;
+        } else if (cond == kMipsCondGe) {
+            opc = kMipsBgez;
+        } else {
+            LOGE("Jit: bad case in genRegImmCheck");
+            dvmCompilerAbort(cUnit);
+        }
+        branch = opCompareBranch(cUnit, opc, reg, -1);
+    } else if (IS_SIMM16(checkValue)) {
+        if (cond == kMipsCondLt) {
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            newLIR3(cUnit, kMipsSlti, tReg, reg, checkValue);
+            branch = opCompareBranch(cUnit, kMipsBne, tReg, r_ZERO);
+            dvmCompilerFreeTemp(cUnit, tReg);
+        } else {
+            LOGE("Jit: bad case in genRegImmCheck");
+            dvmCompilerAbort(cUnit);
+        }
+    } else {
+        LOGE("Jit: bad case in genRegImmCheck");
+        dvmCompilerAbort(cUnit);
+    }
+
+    if (cUnit->jitMode == kJitMethod) {
+        BasicBlock *bb = cUnit->curBlock;
+        if (bb->taken) {
+            MipsLIR  *exceptionLabel = (MipsLIR *) cUnit->blockLabelList;
+            exceptionLabel += bb->taken->id;
+            branch->generic.target = (LIR *) exceptionLabel;
+            return exceptionLabel;
+        } else {
+            LOGE("Catch blocks not handled yet");
+            dvmAbort();
+            return NULL;
+        }
+    } else {
+        return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+    }
+}
+
+#if defined(WITH_SELF_VERIFICATION)
+static void genSelfVerificationPreBranch(CompilationUnit *cUnit,
+                                         MipsLIR *origLIR) {
+// DOUGLAS - this still needs to be implemented for MIPS.
+#if 0
+    /*
+     * We need two separate pushes, since we want r5 to be pushed first.
+     * Store multiple will push LR first.
+     */
+    MipsLIR *pushFP = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    pushFP->opcode = kThumbPush;
+    pushFP->operands[0] = 1 << r5FP;
+    setupResourceMasks(pushFP);
+    dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) pushFP);
+
+    MipsLIR *pushLR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    pushLR->opcode = kThumbPush;
+    /* Thumb push can handle LR, but is encoded differently at bit 8 */
+    pushLR->operands[0] = 1 << 8;
+    setupResourceMasks(pushLR);
+    dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) pushLR);
+#endif
+}
+
+static void genSelfVerificationPostBranch(CompilationUnit *cUnit,
+                                         MipsLIR *origLIR) {
+// DOUGLAS - this still needs to be implemented for MIPS.
+#if 0
+    /*
+     * Since Thumb cannot pop memory content into LR, we have to pop LR
+     * to a temp first (r5 in this case). Then we move r5 to LR, then pop the
+     * original r5 from stack.
+     */
+    /* Pop memory content(LR) into r5 first */
+    MipsLIR *popForLR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    popForLR->opcode = kThumbPop;
+    popForLR->operands[0] = 1 << r5FP;
+    setupResourceMasks(popForLR);
+    dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) popForLR);
+
+    MipsLIR *copy = genRegCopyNoInsert(cUnit, r14lr, r5FP);
+    dvmCompilerInsertLIRAfter((LIR *) popForLR, (LIR *) copy);
+
+    /* Now restore the original r5 */
+    MipsLIR *popFP = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
+    popFP->opcode = kThumbPop;
+    popFP->operands[0] = 1 << r5FP;
+    setupResourceMasks(popFP);
+    dvmCompilerInsertLIRAfter((LIR *) copy, (LIR *) popFP);
+#endif
+}
+#endif
diff --git a/vm/compiler/codegen/mips/Mips32/Gen.cpp b/vm/compiler/codegen/mips/Mips32/Gen.cpp
new file mode 100644
index 0000000..29c7c5f
--- /dev/null
+++ b/vm/compiler/codegen/mips/Mips32/Gen.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen for the Mips ISA and is intended to be
+ * includes by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+/*
+ * Reserve 8 bytes at the beginning of the trace
+ *        +----------------------------+
+ *        | prof count addr (4 bytes)  |
+ *        +----------------------------+
+ *        | chain cell offset (4 bytes)|
+ *        +----------------------------+
+ *
+ * ...and then code to increment the execution
+ *
+ * For continuous profiling (24 bytes)
+ *       lahi  a0, addr    # get ptr to prof count addr into a0
+ *       lalo  a0, addr
+ *       lw    a0, 0(a0)   # read prof count addr into a0
+ *       lw    a1, 0(a0)   # read prof count into a1
+ *       addiu a1, a1, 1   # increment count
+ *       sw    a1, 0(a0)   # store count
+ *
+ * For periodic profiling (8 bytes)
+ *       call  TEMPLATE_PERIODIC_PROFILING
+ *       nop
+ *
+ * and return the size (in bytes) of the generated code.
+ */
+static int genTraceProfileEntry(CompilationUnit *cUnit)
+{
+    intptr_t addr = (intptr_t)dvmJitNextTraceCounter();
+    assert(__BYTE_ORDER == __LITTLE_ENDIAN);
+    MipsLIR *executionCount = newLIR1(cUnit, kMips32BitData, addr);
+    cUnit->chainCellOffsetLIR =
+        (LIR *) newLIR1(cUnit, kMips32BitData, CHAIN_CELL_OFFSET_TAG);
+    cUnit->headerSize = 8;
+    if ((gDvmJit.profileMode == kTraceProfilingContinuous) ||
+        (gDvmJit.profileMode == kTraceProfilingDisabled)) {
+        MipsLIR *loadAddr = newLIR2(cUnit, kMipsLahi, r_A0, 0);
+        loadAddr->generic.target = (LIR *) executionCount;
+        loadAddr = newLIR3(cUnit, kMipsLalo, r_A0, r_A0, 0);
+        loadAddr ->generic.target = (LIR *) executionCount;
+        newLIR3(cUnit, kMipsLw, r_A0, 0, r_A0);
+        newLIR3(cUnit, kMipsLw, r_A1, 0, r_A0);
+        newLIR3(cUnit, kMipsAddiu, r_A1, r_A1, 1);
+        newLIR3(cUnit, kMipsSw, r_A1, 0, r_A0);
+        return 24;
+    } else {
+        int opcode = TEMPLATE_PERIODIC_PROFILING;
+        newLIR1(cUnit, kMipsJal,
+            (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
+        newLIR0(cUnit, kMipsNop); /* delay slot */
+        return 8;
+    }
+}
+
+/*
+ * Perform a "reg cmp imm" operation and jump to the PCR region if condition
+ * satisfies.
+ */
+static void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest,
+                        RegLocation rlSrc)
+{
+    RegLocation rlResult;
+    rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    opRegRegImm(cUnit, kOpAdd, rlResult.lowReg,
+                rlSrc.lowReg, 0x80000000);
+    storeValue(cUnit, rlDest, rlResult);
+}
+
+static void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest,
+                         RegLocation rlSrc)
+{
+    RegLocation rlResult;
+    rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+    rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+    opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg,
+                        0x80000000);
+    genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+    storeValueWide(cUnit, rlDest, rlResult);
+}
+
+static void genMulLong(CompilationUnit *cUnit, RegLocation rlDest,
+                       RegLocation rlSrc1, RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
+    loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
+    genDispatchToHandler(cUnit, TEMPLATE_MUL_LONG);
+    rlResult = dvmCompilerGetReturnWide(cUnit);
+    storeValueWide(cUnit, rlDest, rlResult);
+}
+
+static bool partialOverlap(int sreg1, int sreg2)
+{
+    return abs(sreg1 - sreg2) == 1;
+}
+
+static void withCarryHelper(CompilationUnit *cUnit, MipsOpCode opc,
+                            RegLocation rlDest, RegLocation rlSrc1,
+                            RegLocation rlSrc2, int sltuSrc1, int sltuSrc2)
+{
+    int tReg = dvmCompilerAllocTemp(cUnit);
+    newLIR3(cUnit, opc, rlDest.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+    newLIR3(cUnit, kMipsSltu, tReg, sltuSrc1, sltuSrc2);
+    newLIR3(cUnit, opc, rlDest.highReg, rlSrc1.highReg, rlSrc2.highReg);
+    newLIR3(cUnit, opc, rlDest.highReg, rlDest.highReg, tReg);
+    dvmCompilerFreeTemp(cUnit, tReg);
+}
+
+static void genLong3Addr(CompilationUnit *cUnit, MIR *mir, OpKind firstOp,
+                         OpKind secondOp, RegLocation rlDest,
+                         RegLocation rlSrc1, RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    int carryOp = (secondOp == kOpAdc || secondOp == kOpSbc);
+
+    if (partialOverlap(rlSrc1.sRegLow,rlSrc2.sRegLow) ||
+        partialOverlap(rlSrc1.sRegLow,rlDest.sRegLow) ||
+        partialOverlap(rlSrc2.sRegLow,rlDest.sRegLow)) {
+        // Rare case - not enough registers to properly handle
+        genInterpSingleStep(cUnit, mir);
+    } else if (rlDest.sRegLow == rlSrc1.sRegLow) {
+        rlResult = loadValueWide(cUnit, rlDest, kCoreReg);
+        rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+        if (!carryOp) {
+            opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlResult.lowReg, rlSrc2.lowReg);
+            opRegRegReg(cUnit, secondOp, rlResult.highReg, rlResult.highReg, rlSrc2.highReg);
+        } else if (secondOp == kOpAdc) {
+            withCarryHelper(cUnit, kMipsAddu, rlResult, rlResult, rlSrc2,
+                            rlResult.lowReg, rlSrc2.lowReg);
+        } else {
+            int tReg = dvmCompilerAllocTemp(cUnit);
+            newLIR2(cUnit, kMipsMove, tReg, rlResult.lowReg);
+            withCarryHelper(cUnit, kMipsSubu, rlResult, rlResult, rlSrc2,
+                            tReg, rlResult.lowReg);
+            dvmCompilerFreeTemp(cUnit, tReg);
+        }
+        storeValueWide(cUnit, rlDest, rlResult);
+    } else if (rlDest.sRegLow == rlSrc2.sRegLow) {
+        rlResult = loadValueWide(cUnit, rlDest, kCoreReg);
+        rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+        if (!carryOp) {
+            opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg, rlResult.lowReg);
+            opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg, rlResult.highReg);
+        } else if (secondOp == kOpAdc) {
+            withCarryHelper(cUnit, kMipsAddu, rlResult, rlSrc1, rlResult,
+                            rlResult.lowReg, rlSrc1.lowReg);
+        } else {
+            withCarryHelper(cUnit, kMipsSubu, rlResult, rlSrc1, rlResult,
+                            rlSrc1.lowReg, rlResult.lowReg);
+        }
+        storeValueWide(cUnit, rlDest, rlResult);
+    } else {
+        rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+        rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+        rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
+        if (!carryOp) {
+            opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+            opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
+        } else if (secondOp == kOpAdc) {
+            withCarryHelper(cUnit, kMipsAddu, rlResult, rlSrc1, rlSrc2,
+                            rlResult.lowReg, rlSrc1.lowReg);
+        } else {
+            withCarryHelper(cUnit, kMipsSubu, rlResult, rlSrc1, rlSrc2,
+                            rlSrc1.lowReg, rlResult.lowReg);
+        }
+        storeValueWide(cUnit, rlDest, rlResult);
+    }
+}
+
+void dvmCompilerInitializeRegAlloc(CompilationUnit *cUnit)
+{
+    int numTemps = sizeof(coreTemps)/sizeof(int);
+    RegisterPool *pool = (RegisterPool *) dvmCompilerNew(sizeof(*pool), true);
+    cUnit->regPool = pool;
+    pool->numCoreTemps = numTemps;
+    pool->coreTemps =
+            (RegisterInfo *) dvmCompilerNew(numTemps * sizeof(*pool->coreTemps), true);
+    dvmCompilerInitPool(pool->coreTemps, coreTemps, pool->numCoreTemps);
+#ifdef __mips_hard_float
+    int numFPTemps = sizeof(fpTemps)/sizeof(int);
+    pool->numFPTemps = numFPTemps;
+    pool->FPTemps =
+            (RegisterInfo *) dvmCompilerNew(numFPTemps * sizeof(*pool->FPTemps), true);
+    dvmCompilerInitPool(pool->FPTemps, fpTemps, pool->numFPTemps);
+#else
+    pool->numFPTemps = 0;
+    pool->FPTemps = NULL;
+    dvmCompilerInitPool(pool->FPTemps, NULL, 0);
+#endif
+    pool->nullCheckedRegs =
+        dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
+}
+
+/* Export the Dalvik PC assicated with an instruction to the StackSave area */
+static MipsLIR *genExportPC(CompilationUnit *cUnit, MIR *mir)
+{
+    MipsLIR *res;
+    int rDPC = dvmCompilerAllocTemp(cUnit);
+    int rAddr = dvmCompilerAllocTemp(cUnit);
+    int offset = offsetof(StackSaveArea, xtra.currentPc);
+    res = loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset));
+    newLIR3(cUnit, kMipsAddiu, rAddr, rFP, -(sizeof(StackSaveArea) - offset));
+    storeWordDisp( cUnit, rAddr, 0, rDPC);
+    return res;
+}
+
+static void genMonitor(CompilationUnit *cUnit, MIR *mir)
+{
+    genMonitorPortable(cUnit, mir);
+}
+
+static void genCmpLong(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
+                       RegLocation rlSrc1, RegLocation rlSrc2)
+{
+    RegLocation rlResult;
+    loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
+    loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
+    genDispatchToHandler(cUnit, TEMPLATE_CMP_LONG);
+    rlResult = dvmCompilerGetReturn(cUnit);
+    storeValue(cUnit, rlDest, rlResult);
+}
+
+static bool genInlinedAbsFloat(CompilationUnit *cUnit, MIR *mir)
+{
+    int offset = offsetof(Thread, interpSave.retval);
+    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
+    int reg0 = loadValue(cUnit, rlSrc, kCoreReg).lowReg;
+#if __mips_isa_rev>=2
+    newLIR4(cUnit, kMipsExt, reg0, reg0, 0, 31-1 /* size-1 */);
+#else
+    newLIR2(cUnit, kMipsSll, reg0, 1);
+    newLIR2(cUnit, kMipsSrl, reg0, 1);
+#endif
+    storeWordDisp(cUnit, rSELF, offset, reg0);
+    //TUNING: rewrite this to not clobber
+    dvmCompilerClobber(cUnit, reg0);
+    return false;
+}
+
+static bool genInlinedAbsDouble(CompilationUnit *cUnit, MIR *mir)
+{
+    int offset = offsetof(Thread, interpSave.retval);
+    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
+    RegLocation regSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+    int reglo = regSrc.lowReg;
+    int reghi = regSrc.highReg;
+    storeWordDisp(cUnit, rSELF, offset + LOWORD_OFFSET, reglo);
+#if __mips_isa_rev>=2
+    newLIR4(cUnit, kMipsExt, reghi, reghi, 0, 31-1 /* size-1 */);
+#else
+    newLIR2(cUnit, kMipsSll, reghi, 1);
+    newLIR2(cUnit, kMipsSrl, reghi, 1);
+#endif
+    storeWordDisp(cUnit, rSELF, offset + HIWORD_OFFSET, reghi);
+    //TUNING: rewrite this to not clobber
+    dvmCompilerClobber(cUnit, reghi);
+    return false;
+}
+
+/* No select in thumb, so we need to branch.  Thumb2 will do better */
+static bool genInlinedMinMaxInt(CompilationUnit *cUnit, MIR *mir, bool isMin)
+{
+    int offset = offsetof(Thread, interpSave.retval);
+    RegLocation rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
+    RegLocation rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
+    int reg0 = loadValue(cUnit, rlSrc1, kCoreReg).lowReg;
+    int reg1 = loadValue(cUnit, rlSrc2, kCoreReg).lowReg;
+    int tReg = dvmCompilerAllocTemp(cUnit);
+    if (isMin) {
+       newLIR3(cUnit, kMipsSlt, tReg, reg0, reg1);
+    }
+    else {
+       newLIR3(cUnit, kMipsSlt, tReg, reg1, reg0);
+    }
+    newLIR3(cUnit, kMipsMovz, reg0, reg1, tReg);
+    dvmCompilerFreeTemp(cUnit, tReg);
+    newLIR3(cUnit, kMipsSw, reg0, offset, rSELF);
+    //TUNING: rewrite this to not clobber
+    dvmCompilerClobber(cUnit,reg0);
+    return false;
+}
+
+static void genMultiplyByTwoBitMultiplier(CompilationUnit *cUnit,
+        RegLocation rlSrc, RegLocation rlResult, int lit,
+        int firstBit, int secondBit)
+{
+    // We can't implement "add src, src, src, lsl#shift" on Thumb, so we have
+    // to do a regular multiply.
+    opRegRegImm(cUnit, kOpMul, rlResult.lowReg, rlSrc.lowReg, lit);
+}
diff --git a/vm/compiler/codegen/mips/Mips32/Ralloc.cpp b/vm/compiler/codegen/mips/Mips32/Ralloc.cpp
new file mode 100644
index 0000000..6810131
--- /dev/null
+++ b/vm/compiler/codegen/mips/Mips32/Ralloc.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen for the Mips ISA and is intended to be
+ * includes by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int dvmCompilerAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
+                                  int regClass)
+{
+    int highReg;
+    int lowReg;
+    int res = 0;
+
+#ifdef __mips_hard_float
+    if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+        lowReg = dvmCompilerAllocTempDouble(cUnit);
+        highReg = lowReg + 1;
+        res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+        return res;
+    }
+#endif
+
+    lowReg = dvmCompilerAllocTemp(cUnit);
+    highReg = dvmCompilerAllocTemp(cUnit);
+    res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+    return res;
+}
+
+int dvmCompilerAllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass)
+{
+#ifdef __mips_hard_float
+    if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
+{
+        return dvmCompilerAllocTempFloat(cUnit);
+}
+#endif
+    return dvmCompilerAllocTemp(cUnit);
+}
diff --git a/vm/compiler/codegen/mips/MipsLIR.h b/vm/compiler/codegen/mips/MipsLIR.h
new file mode 100644
index 0000000..fc82da2
--- /dev/null
+++ b/vm/compiler/codegen/mips/MipsLIR.h
@@ -0,0 +1,644 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_COMPILER_CODEGEN_MIPS_MIPSLIR_H_
+#define DALVIK_VM_COMPILER_CODEGEN_MIPS_MIPSLIR_H_
+
+#include "Dalvik.h"
+#include "compiler/CompilerInternals.h"
+
+/*
+ * zero is always the value 0
+ * at is scratch for Jit (normally used as temp reg by assembler)
+ * v0, v1 are scratch for Jit (normally hold subroutine return values)
+ * a0-a3 are scratch for Jit (normally hold subroutine arguments)
+ * t0-t7 are scratch for Jit
+ * t8 is scratch for Jit
+ * t9 is scratch for Jit (normally used for function calls)
+ * s0 (rFP) is reserved [holds Dalvik frame pointer]
+ * s1 (rSELF) is reserved [holds current &Thread]
+ * s2 (rINST) is scratch for Jit
+ * s3 (rIBASE) is scratch for Jit
+ * s4-s7 are scratch for Jit
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is scratch for Jit
+ * ra is scratch for Jit (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers (16 df pairs), but current Jit code
+ *       only support 16 fp registers (8 df pairs).
+ * f0-f15
+ * df0-df7, where df0={f0,f1}, df1={f2,f3}, ... , df7={f14,f15}
+ *
+ * f0-f15 (df0-df7) trashed across C calls
+ *
+ * For mips32 code use:
+ *      a0-a3 to hold operands
+ *      v0-v1 to hold results
+ *      t0-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ */
+
+/* Offset to distingish FP regs */
+#define FP_REG_OFFSET 32
+/* Offset to distinguish DP FP regs */
+#define FP_DOUBLE 64
+/* Offset to distingish the extra regs */
+#define EXTRA_REG_OFFSET 128
+/* Reg types */
+#define REGTYPE(x) (x & (FP_REG_OFFSET | FP_DOUBLE))
+#define FPREG(x) ((x & FP_REG_OFFSET) == FP_REG_OFFSET)
+#define EXTRAREG(x) ((x & EXTRA_REG_OFFSET) == EXTRA_REG_OFFSET)
+#define LOWREG(x) ((x & 0x1f) == x)
+#define DOUBLEREG(x) ((x & FP_DOUBLE) == FP_DOUBLE)
+#define SINGLEREG(x) (FPREG(x) && !DOUBLEREG(x))
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area.  Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define S2D(x,y) ((x) | FP_DOUBLE)
+/* Mask to strip off fp flags */
+#define FP_REG_MASK (FP_REG_OFFSET-1)
+/* non-existent Dalvik register */
+#define vNone   (-1)
+/* non-existant physical register */
+#define rNone   (-1)
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+#define r_ARG0 r_A0
+#define r_ARG1 r_A1
+#define r_ARG2 r_A2
+#define r_ARG3 r_A3
+#define r_RESULT0 r_V0
+#define r_RESULT1 r_V1
+#else
+#define LOWORD_OFFSET 4
+#define HIWORD_OFFSET 0
+#define r_ARG0 r_A1
+#define r_ARG1 r_A0
+#define r_ARG2 r_A3
+#define r_ARG3 r_A2
+#define r_RESULT0 r_V1
+#define r_RESULT1 r_V0
+#endif
+
+/* These are the same for both big and little endian. */
+#define r_FARG0 r_F12
+#define r_FARG1 r_F13
+#define r_FRESULT0 r_F0
+#define r_FRESULT1 r_F1
+
+/* RegisterLocation templates return values (r_V0, or r_V0/r_V1) */
+#define LOC_C_RETURN {kLocPhysReg, 0, 0, r_V0, 0, -1}
+#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, r_RESULT0, r_RESULT1, -1}
+#define LOC_C_RETURN_ALT {kLocPhysReg, 0, 1, r_F0, 0, -1}
+#define LOC_C_RETURN_WIDE_ALT {kLocPhysReg, 1, 1, r_FRESULT0, r_FRESULT1, -1}
+/* RegisterLocation templates for interpState->retVal; */
+#define LOC_DALVIK_RETURN_VAL {kLocRetval, 0, 0, 0, 0, -1}
+#define LOC_DALVIK_RETURN_VAL_WIDE {kLocRetval, 1, 0, 0, 0, -1}
+
+ /*
+ * Data structure tracking the mapping between a Dalvik register (pair) and a
+ * native register (pair). The idea is to reuse the previously loaded value
+ * if possible, otherwise to keep the value in a native register as long as
+ * possible.
+ */
+typedef struct RegisterInfo {
+    int reg;                    // Reg number
+    bool inUse;                 // Has it been allocated?
+    bool pair;                  // Part of a register pair?
+    int partner;                // If pair, other reg of pair
+    bool live;                  // Is there an associated SSA name?
+    bool dirty;                 // If live, is it dirty?
+    int sReg;                   // Name of live value
+    struct LIR *defStart;       // Starting inst in last def sequence
+    struct LIR *defEnd;         // Ending inst in last def sequence
+} RegisterInfo;
+
+typedef struct RegisterPool {
+    BitVector *nullCheckedRegs; // Track which registers have been null-checked
+    int numCoreTemps;
+    RegisterInfo *coreTemps;
+    int nextCoreTemp;
+    int numFPTemps;
+    RegisterInfo *FPTemps;
+    int nextFPTemp;
+} RegisterPool;
+
+typedef enum ResourceEncodingPos {
+    kGPReg0     = 0,
+    kRegSP      = 29,
+    kRegLR      = 31,
+    kFPReg0     = 32, /* only 16 fp regs supported currently */
+    kFPRegEnd   = 48,
+    kRegHI      = kFPRegEnd,
+    kRegLO,
+    kRegPC,
+    kRegEnd     = 51,
+    kCCode      = kRegEnd,
+    kFPStatus,          // FP status word
+    // The following four bits are for memory disambiguation
+    kDalvikReg,         // 1 Dalvik Frame (can be fully disambiguated)
+    kLiteral,           // 2 Literal pool (can be fully disambiguated)
+    kHeapRef,           // 3 Somewhere on the heap (alias with any other heap)
+    kMustNotAlias,      // 4 Guaranteed to be non-alias (eg *(r6+x))
+} ResourceEncodingPos;
+
+#define ENCODE_REG_LIST(N)      ((u8) N)
+#define ENCODE_REG_SP           (1ULL << kRegSP)
+#define ENCODE_REG_LR           (1ULL << kRegLR)
+#define ENCODE_REG_PC           (1ULL << kRegPC)
+#define ENCODE_CCODE            (1ULL << kCCode)
+#define ENCODE_FP_STATUS        (1ULL << kFPStatus)
+
+/* Abstract memory locations */
+#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
+#define ENCODE_LITERAL          (1ULL << kLiteral)
+#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
+#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
+
+#define ENCODE_ALL              (~0ULL)
+#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
+                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+
+#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
+#define DECODE_ALIAS_INFO_WIDE(X)       ((X & 0x80000000) ? 1 : 0)
+
+typedef enum OpSize {
+    kWord,
+    kLong,
+    kSingle,
+    kDouble,
+    kUnsignedHalf,
+    kSignedHalf,
+    kUnsignedByte,
+    kSignedByte,
+} OpSize;
+
+typedef enum OpKind {
+    kOpMov,
+    kOpMvn,
+    kOpCmp,
+    kOpLsl,
+    kOpLsr,
+    kOpAsr,
+    kOpRor,
+    kOpNot,
+    kOpAnd,
+    kOpOr,
+    kOpXor,
+    kOpNeg,
+    kOpAdd,
+    kOpAdc,
+    kOpSub,
+    kOpSbc,
+    kOpRsub,
+    kOpMul,
+    kOpDiv,
+    kOpRem,
+    kOpBic,
+    kOpCmn,
+    kOpTst,
+    kOpBkpt,
+    kOpBlx,
+    kOpPush,
+    kOpPop,
+    kOp2Char,
+    kOp2Short,
+    kOp2Byte,
+    kOpCondBr,
+    kOpUncondBr,
+} OpKind;
+
+/*
+ * Annotate special-purpose core registers:
+ *
+ * rPC, rFP, and rSELF are for architecture-independent code to use.
+ */
+typedef enum NativeRegisterPool {
+    r_ZERO = 0,
+    r_AT = 1,
+    r_V0 = 2,
+    r_V1 = 3,
+    r_A0 = 4,
+    r_A1 = 5,
+    r_A2 = 6,
+    r_A3 = 7,
+    r_T0 = 8,
+    r_T1 = 9,
+    r_T2 = 10,
+    r_T3 = 11,
+    r_T4 = 12,
+    r_T5 = 13,
+    r_T6 = 14,
+    r_T7 = 15,
+    r_S0 = 16,
+    r_S1 = 17,
+    r_S2 = 18,
+    r_S3 = 19,
+    r_S4 = 20,
+    r_S5 = 21,
+    r_S6 = 22,
+    r_S7 = 23,
+    r_T8 = 24,
+    r_T9 = 25,
+    r_K0 = 26,
+    r_K1 = 27,
+    r_GP = 28,
+    r_SP = 29,
+    r_FP = 30,
+    r_RA = 31,
+
+    r_F0 = 0 + FP_REG_OFFSET,
+    r_F1,
+    r_F2,
+    r_F3,
+    r_F4,
+    r_F5,
+    r_F6,
+    r_F7,
+    r_F8,
+    r_F9,
+    r_F10,
+    r_F11,
+    r_F12,
+    r_F13,
+    r_F14,
+    r_F15,
+#if 0 /* only 16 fp regs supported currently */
+    r_F16,
+    r_F17,
+    r_F18,
+    r_F19,
+    r_F20,
+    r_F21,
+    r_F22,
+    r_F23,
+    r_F24,
+    r_F25,
+    r_F26,
+    r_F27,
+    r_F28,
+    r_F29,
+    r_F30,
+    r_F31,
+#endif
+    r_DF0 = r_F0 + FP_DOUBLE,
+    r_DF1 = r_F2 + FP_DOUBLE,
+    r_DF2 = r_F4 + FP_DOUBLE,
+    r_DF3 = r_F6 + FP_DOUBLE,
+    r_DF4 = r_F8 + FP_DOUBLE,
+    r_DF5 = r_F10 + FP_DOUBLE,
+    r_DF6 = r_F12 + FP_DOUBLE,
+    r_DF7 = r_F14 + FP_DOUBLE,
+#if 0 /* only 16 fp regs supported currently */
+    r_DF8 = r_F16 + FP_DOUBLE,
+    r_DF9 = r_F18 + FP_DOUBLE,
+    r_DF10 = r_F20 + FP_DOUBLE,
+    r_DF11 = r_F22 + FP_DOUBLE,
+    r_DF12 = r_F24 + FP_DOUBLE,
+    r_DF13 = r_F26 + FP_DOUBLE,
+    r_DF14 = r_F28 + FP_DOUBLE,
+    r_DF15 = r_F30 + FP_DOUBLE,
+#endif
+    r_HI = EXTRA_REG_OFFSET,
+    r_LO,
+    r_PC,
+} NativeRegisterPool;
+
+
+/* must match gp offset used mterp/mips files */
+#define STACK_OFFSET_GP 84
+
+/* MIPSTODO: properly remap arm regs (dPC, dFP, dGLUE) and remove these mappings */
+#define r4PC r_S0
+#define rFP r_S1
+#define rSELF r_S2
+#define rINST r_S4
+
+/* Shift encodings */
+typedef enum MipsShiftEncodings {
+    kMipsLsl = 0x0,
+    kMipsLsr = 0x1,
+    kMipsAsr = 0x2,
+    kMipsRor = 0x3
+} MipsShiftEncodings;
+
+/* condition encodings */
+typedef enum MipsConditionCode {
+    kMipsCondEq = 0x0,    /* 0000 */
+    kMipsCondNe = 0x1,    /* 0001 */
+    kMipsCondCs = 0x2,    /* 0010 */
+    kMipsCondCc = 0x3,    /* 0011 */
+    kMipsCondMi = 0x4,    /* 0100 */
+    kMipsCondPl = 0x5,    /* 0101 */
+    kMipsCondVs = 0x6,    /* 0110 */
+    kMipsCondVc = 0x7,    /* 0111 */
+    kMipsCondHi = 0x8,    /* 1000 */
+    kMipsCondLs = 0x9,    /* 1001 */
+    kMipsCondGe = 0xa,    /* 1010 */
+    kMipsCondLt = 0xb,    /* 1011 */
+    kMipsCondGt = 0xc,    /* 1100 */
+    kMipsCondLe = 0xd,    /* 1101 */
+    kMipsCondAl = 0xe,    /* 1110 */
+    kMipsCondNv = 0xf,    /* 1111 */
+} MipsConditionCode;
+
+#define isPseudoOpCode(opCode) ((int)(opCode) < 0)
+
+/*
+ * The following enum defines the list of supported Thumb instructions by the
+ * assembler. Their corresponding snippet positions will be defined in
+ * Assemble.c.
+ */
+typedef enum MipsOpCode {
+    kMipsChainingCellBottom = -18,
+    kMipsPseudoBarrier = -17,
+    kMipsPseudoExtended = -16,
+    kMipsPseudoSSARep = -15,
+    kMipsPseudoEntryBlock = -14,
+    kMipsPseudoExitBlock = -13,
+    kMipsPseudoTargetLabel = -12,
+    kMipsPseudoChainingCellBackwardBranch = -11,
+    kMipsPseudoChainingCellHot = -10,
+    kMipsPseudoChainingCellInvokePredicted = -9,
+    kMipsPseudoChainingCellInvokeSingleton = -8,
+    kMipsPseudoChainingCellNormal = -7,
+    kMipsPseudoDalvikByteCodeBoundary = -6,
+    kMipsPseudoPseudoAlign4 = -5,
+    kMipsPseudoPCReconstructionCell = -4,
+    kMipsPseudoPCReconstructionBlockLabel = -3,
+    kMipsPseudoEHBlockLabel = -2,
+    kMipsPseudoNormalBlockLabel = -1,
+
+    kMipsFirst,
+    kMips32BitData = kMipsFirst, /* data [31..0] */
+    kMipsAddiu,   /* addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
+    kMipsAddu,    /* add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001] */
+    kMipsAnd,     /* and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100] */
+    kMipsAndi,    /* andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0] */
+    kMipsB,       /* b o   [0001000000000000] o[15..0] */
+    kMipsBal,     /* bal o [0000010000010001] o[15..0] */
+    /* NOTE: the code tests the range kMipsBeq thru kMipsBne, so
+             adding an instruction in this range may require updates */
+    kMipsBeq,     /* beq s,t,o [000100] s[25..21] t[20..16] o[15..0] */
+    kMipsBeqz,    /* beqz s,o [000100] s[25..21] [00000] o[15..0] */
+    kMipsBgez,    /* bgez s,o [000001] s[25..21] [00001] o[15..0] */
+    kMipsBgtz,    /* bgtz s,o [000111] s[25..21] [00000] o[15..0] */
+    kMipsBlez,    /* blez s,o [000110] s[25..21] [00000] o[15..0] */
+    kMipsBltz,    /* bltz s,o [000001] s[25..21] [00000] o[15..0] */
+    kMipsBnez,    /* bnez s,o [000101] s[25..21] [00000] o[15..0] */
+    kMipsBne,     /* bne s,t,o [000101] s[25..21] t[20..16] o[15..0] */
+    kMipsDiv,     /* div s,t [000000] s[25..21] t[20..16] [0000000000011010] */
+#if __mips_isa_rev>=2
+    kMipsExt,     /* ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000] */
+#endif
+    kMipsJal,     /* jal t [000011] t[25..0] */
+    kMipsJalr,    /* jalr d,s [000000] s[25..21] [00000] d[15..11]
+                                  hint[10..6] [001001] */
+    kMipsJr,      /* jr s [000000] s[25..21] [0000000000] hint[10..6] [001000] */
+    kMipsLahi,    /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi */
+    kMipsLalo,    /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo */
+    kMipsLui,     /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] */
+    kMipsLb,      /* lb t,o(b) [100000] b[25..21] t[20..16] o[15..0] */
+    kMipsLbu,     /* lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0] */
+    kMipsLh,      /* lh t,o(b) [100001] b[25..21] t[20..16] o[15..0] */
+    kMipsLhu,     /* lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0] */
+    kMipsLw,      /* lw t,o(b) [100011] b[25..21] t[20..16] o[15..0] */
+    kMipsMfhi,    /* mfhi d [0000000000000000] d[15..11] [00000010000] */
+    kMipsMflo,    /* mflo d [0000000000000000] d[15..11] [00000010010] */
+    kMipsMove,    /* move d,s [000000] s[25..21] [00000] d[15..11] [00000100101] */
+    kMipsMovz,    /* movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010] */
+    kMipsMul,     /* mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010] */
+    kMipsNop,     /* nop [00000000000000000000000000000000] */
+    kMipsNor,     /* nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111] */
+    kMipsOr,      /* or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101] */
+    kMipsOri,     /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
+    kMipsPref,    /* pref h,o(b) [101011] b[25..21] h[20..16] o[15..0] */
+    kMipsSb,      /* sb t,o(b) [101000] b[25..21] t[20..16] o[15..0] */
+#if __mips_isa_rev>=2
+    kMipsSeb,     /* seb d,t [01111100000] t[20..16] d[15..11] [10000100000] */
+    kMipsSeh,     /* seh d,t [01111100000] t[20..16] d[15..11] [11000100000] */
+#endif
+    kMipsSh,      /* sh t,o(b) [101001] b[25..21] t[20..16] o[15..0] */
+    kMipsSll,     /* sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000] */
+    kMipsSllv,    /* sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100] */
+    kMipsSlt,     /* slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010] */
+    kMipsSlti,    /* slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0] */
+    kMipsSltu,    /* sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011] */
+    kMipsSra,     /* sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011] */
+    kMipsSrav,    /* srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111] */
+    kMipsSrl,     /* srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010] */
+    kMipsSrlv,    /* srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110] */
+    kMipsSubu,    /* subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011] */
+    kMipsSw,      /* sw t,o(b) [101011] b[25..21] t[20..16] o[15..0] */
+    kMipsXor,     /* xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110] */
+    kMipsXori,    /* xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0] */
+#ifdef __mips_hard_float
+    kMipsFadds,   /* add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000] */
+    kMipsFsubs,   /* sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001] */
+    kMipsFmuls,   /* mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010] */
+    kMipsFdivs,   /* div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011] */
+    kMipsFaddd,   /* add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000] */
+    kMipsFsubd,   /* sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001] */
+    kMipsFmuld,   /* mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010] */
+    kMipsFdivd,   /* div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011] */
+    kMipsFcvtsd,  /* cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000] */
+    kMipsFcvtsw,  /* cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000] */
+    kMipsFcvtds,  /* cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001] */
+    kMipsFcvtdw,  /* cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001] */
+    kMipsFcvtws,  /* cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100] */
+    kMipsFcvtwd,  /* cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100] */
+    kMipsFmovs,   /* mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110] */
+    kMipsFmovd,   /* mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110] */
+    kMipsFlwc1,   /* lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0] */
+    kMipsFldc1,   /* ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0] */
+    kMipsFswc1,   /* swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0] */
+    kMipsFsdc1,   /* sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0] */
+    kMipsMfc1,    /* mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000] */
+    kMipsMtc1,    /* mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000] */
+#endif
+    kMipsUndefined,  /* undefined [011001xxxxxxxxxxxxxxxx] */
+    kMipsLast
+} MipsOpCode;
+
+/* Bit flags describing the behavior of each native opcode */
+typedef enum MipsOpFeatureFlags {
+    kIsBranch = 0,
+    kRegDef0,
+    kRegDef1,
+    kRegDefSP,
+    kRegDefLR,
+    kRegDefList0,
+    kRegDefList1,
+    kRegUse0,
+    kRegUse1,
+    kRegUse2,
+    kRegUse3,
+    kRegUseSP,
+    kRegUsePC,
+    kRegUseList0,
+    kRegUseList1,
+    kNoOperand,
+    kIsUnaryOp,
+    kIsBinaryOp,
+    kIsTertiaryOp,
+    kIsQuadOp,
+    kIsIT,
+    kSetsCCodes,
+    kUsesCCodes,
+    kMemLoad,
+    kMemStore,
+} MipsOpFeatureFlags;
+
+#define IS_LOAD         (1 << kMemLoad)
+#define IS_STORE        (1 << kMemStore)
+#define IS_BRANCH       (1 << kIsBranch)
+#define REG_DEF0        (1 << kRegDef0)
+#define REG_DEF1        (1 << kRegDef1)
+#define REG_DEF_SP      (1 << kRegDefSP)
+#define REG_DEF_LR      (1 << kRegDefLR)
+#define REG_DEF_LIST0   (1 << kRegDefList0)
+#define REG_DEF_LIST1   (1 << kRegDefList1)
+#define REG_USE0        (1 << kRegUse0)
+#define REG_USE1        (1 << kRegUse1)
+#define REG_USE2        (1 << kRegUse2)
+#define REG_USE3        (1 << kRegUse3)
+#define REG_USE_SP      (1 << kRegUseSP)
+#define REG_USE_PC      (1 << kRegUsePC)
+#define REG_USE_LIST0   (1 << kRegUseList0)
+#define REG_USE_LIST1   (1 << kRegUseList1)
+#define NO_OPERAND      (1 << kNoOperand)
+#define IS_UNARY_OP     (1 << kIsUnaryOp)
+#define IS_BINARY_OP    (1 << kIsBinaryOp)
+#define IS_TERTIARY_OP  (1 << kIsTertiaryOp)
+#define IS_QUAD_OP      (1 << kIsQuadOp)
+#define IS_IT           (1 << kIsIT)
+#define SETS_CCODES     (1 << kSetsCCodes)
+#define USES_CCODES     (1 << kUsesCCodes)
+
+/* Common combo register usage patterns */
+#define REG_USE01       (REG_USE0 | REG_USE1)
+#define REG_USE02       (REG_USE0 | REG_USE2)
+#define REG_USE012      (REG_USE01 | REG_USE2)
+#define REG_USE12       (REG_USE1 | REG_USE2)
+#define REG_USE23       (REG_USE2 | REG_USE3)
+#define REG_DEF01       (REG_DEF0 | REG_DEF1)
+#define REG_DEF0_USE0   (REG_DEF0 | REG_USE0)
+#define REG_DEF0_USE1   (REG_DEF0 | REG_USE1)
+#define REG_DEF0_USE2   (REG_DEF0 | REG_USE2)
+#define REG_DEF0_USE01  (REG_DEF0 | REG_USE01)
+#define REG_DEF0_USE12  (REG_DEF0 | REG_USE12)
+#define REG_DEF01_USE2  (REG_DEF0 | REG_DEF1 | REG_USE2)
+
+/* Instruction assembly fieldLoc kind */
+typedef enum MipsEncodingKind {
+    kFmtUnused,
+    kFmtBitBlt,        /* Bit string using end/start */
+    kFmtDfp,           /* Double FP reg */
+    kFmtSfp,           /* Single FP reg */
+} MipsEncodingKind;
+
+/* Struct used to define the snippet positions for each Thumb opcode */
+typedef struct MipsEncodingMap {
+    u4 skeleton;
+    struct {
+        MipsEncodingKind kind;
+        int end;   /* end for kFmtBitBlt, 1-bit slice end for FP regs */
+        int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
+    } fieldLoc[4];
+    MipsOpCode opcode;
+    int flags;
+    const char *name;
+    const char* fmt;
+    int size;
+} MipsEncodingMap;
+
+/* Keys for target-specific scheduling and other optimization hints */
+typedef enum MipsTargetOptHints {
+    kMaxHoistDistance,
+} MipsTargetOptHints;
+
+extern MipsEncodingMap EncodingMap[kMipsLast];
+
+/*
+ * Each instance of this struct holds a pseudo or real LIR instruction:
+ * - pseudo ones (eg labels and marks) and will be discarded by the assembler.
+ * - real ones will be assembled into Thumb instructions.
+ *
+ * Machine resources are encoded into a 64-bit vector, where the encodings are
+ * as following:
+ * - [ 0..15]: general purpose registers including PC, SP, and LR
+ * - [16..47]: floating-point registers where d0 is expanded to s[01] and s0
+ *   starts at bit 16
+ * - [48]: IT block
+ * - [49]: integer condition code
+ * - [50]: floatint-point status word
+ */
+typedef struct MipsLIR {
+    LIR generic;
+    MipsOpCode opcode;
+    int operands[4];            // [0..3] = [dest, src1, src2, extra]
+    struct {
+        bool isNop:1;           // LIR is optimized away
+        bool insertWrapper:1;   // insert branch to emulate memory accesses
+        unsigned int age:4;     // default is 0, set lazily by the optimizer
+        unsigned int size:3;    // bytes (2 for thumb, 2/4 for thumb2)
+        unsigned int unused:23;
+    } flags;
+    int aliasInfo;              // For Dalvik register access & litpool disambiguation
+    u8 useMask;                 // Resource mask for use
+    u8 defMask;                 // Resource mask for def
+} MipsLIR;
+
+/* Init values when a predicted chain is initially assembled */
+/* E7FE is branch to self */
+#define PREDICTED_CHAIN_BX_PAIR_INIT     0xe7fe
+#define PREDICTED_CHAIN_DELAY_SLOT_INIT  0
+#define PREDICTED_CHAIN_CLAZZ_INIT       0
+#define PREDICTED_CHAIN_METHOD_INIT      0
+#define PREDICTED_CHAIN_COUNTER_INIT     0
+
+/* Utility macros to traverse the LIR/MipsLIR list */
+#define NEXT_LIR(lir) ((MipsLIR *) lir->generic.next)
+#define PREV_LIR(lir) ((MipsLIR *) lir->generic.prev)
+
+#define NEXT_LIR_LVALUE(lir) (lir)->generic.next
+#define PREV_LIR_LVALUE(lir) (lir)->generic.prev
+
+#define CHAIN_CELL_OFFSET_TAG   0xcdabcdabL
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) /* 2 offsets must fit */
+
+#define CHAIN_CELL_NORMAL_SIZE    16
+#define CHAIN_CELL_PREDICTED_SIZE 20
+
+
+#endif  // DALVIK_VM_COMPILER_CODEGEN_MIPS_MIPSLIR_H_
diff --git a/vm/compiler/codegen/mips/Ralloc.h b/vm/compiler/codegen/mips/Ralloc.h
new file mode 100644
index 0000000..33ad2fb
--- /dev/null
+++ b/vm/compiler/codegen/mips/Ralloc.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains register alloction support and is intended to be
+ * included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+#include "compiler/CompilerUtility.h"
+#include "compiler/CompilerIR.h"
+#include "compiler/Dataflow.h"
+#include "compiler/codegen/mips/MipsLIR.h"
+
+/*
+ * Return most flexible allowed register class based on size.
+ * Bug: 2813841
+ * Must use a core register for data types narrower than word (due
+ * to possible unaligned load/store.
+ */
+static inline RegisterClass dvmCompilerRegClassBySize(OpSize size)
+{
+    return (size == kUnsignedHalf ||
+            size == kSignedHalf ||
+            size == kUnsignedByte ||
+            size == kSignedByte ) ? kCoreReg : kAnyReg;
+}
+
+static inline int dvmCompilerS2VReg(CompilationUnit *cUnit, int sReg)
+{
+    assert(sReg != INVALID_SREG);
+    return DECODE_REG(dvmConvertSSARegToDalvik(cUnit, sReg));
+}
+
+/* Reset the tracker to unknown state */
+static inline void dvmCompilerResetNullCheck(CompilationUnit *cUnit)
+{
+    dvmClearAllBits(cUnit->regPool->nullCheckedRegs);
+}
+
+/*
+ * Get the "real" sreg number associated with an sReg slot.  In general,
+ * sReg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the cUnit->regLocation
+ * array.  However, renaming is accomplished by simply replacing RegLocation
+ * entries in the cUnit->reglocation[] array.  Therefore, when location
+ * records for operands are first created, we need to ask the locRecord
+ * identified by the dataflow pass what it's new name is.
+ */
+
+static inline int dvmCompilerSRegHi(int lowSreg) {
+    return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
+}
+
+
+static inline bool dvmCompilerLiveOut(CompilationUnit *cUnit, int sReg)
+{
+    //TODO: fully implement
+    return true;
+}
+
+static inline int dvmCompilerSSASrc(MIR *mir, int num)
+{
+    assert(mir->ssaRep->numUses > num);
+    return mir->ssaRep->uses[num];
+}
+
+extern RegLocation dvmCompilerEvalLoc(CompilationUnit *cUnit, RegLocation loc,
+                                      int regClass, bool update);
+/* Mark a temp register as dead.  Does not affect allocation state. */
+extern void dvmCompilerClobber(CompilationUnit *cUnit, int reg);
+
+extern RegLocation dvmCompilerUpdateLoc(CompilationUnit *cUnit,
+                                        RegLocation loc);
+
+/* see comments for updateLoc */
+extern RegLocation dvmCompilerUpdateLocWide(CompilationUnit *cUnit,
+                                            RegLocation loc);
+
+/* Clobber all of the temps that might be used by a handler. */
+extern void dvmCompilerClobberHandlerRegs(CompilationUnit *cUnit);
+
+extern void dvmCompilerMarkLive(CompilationUnit *cUnit, int reg, int sReg);
+
+extern void dvmCompilerMarkDirty(CompilationUnit *cUnit, int reg);
+
+extern void dvmCompilerMarkPair(CompilationUnit *cUnit, int lowReg,
+                                int highReg);
+
+extern void dvmCompilerMarkClean(CompilationUnit *cUnit, int reg);
+
+extern void dvmCompilerResetDef(CompilationUnit *cUnit, int reg);
+
+extern void dvmCompilerResetDefLoc(CompilationUnit *cUnit, RegLocation rl);
+
+/* Set up temp & preserved register pools specialized by target */
+extern void dvmCompilerInitPool(RegisterInfo *regs, int *regNums, int num);
+
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+extern void dvmCompilerMarkDef(CompilationUnit *cUnit, RegLocation rl,
+                               LIR *start, LIR *finish);
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+extern void dvmCompilerMarkDefWide(CompilationUnit *cUnit, RegLocation rl,
+                                   LIR *start, LIR *finish);
+
+extern RegLocation dvmCompilerGetSrcWide(CompilationUnit *cUnit, MIR *mir,
+                                         int low, int high);
+
+extern RegLocation dvmCompilerGetDestWide(CompilationUnit *cUnit, MIR *mir,
+                                          int low, int high);
+// Get the LocRecord associated with an SSA name use.
+extern RegLocation dvmCompilerGetSrc(CompilationUnit *cUnit, MIR *mir, int num);
+
+// Get the LocRecord associated with an SSA name def.
+extern RegLocation dvmCompilerGetDest(CompilationUnit *cUnit, MIR *mir,
+                                      int num);
+
+extern RegLocation dvmCompilerGetReturnWide(CompilationUnit *cUnit);
+
+/* Clobber all regs that might be used by an external C call */
+extern void dvmCompilerClobberCallRegs(CompilationUnit *cUnit);
+
+extern RegisterInfo *dvmCompilerIsTemp(CompilationUnit *cUnit, int reg);
+
+extern void dvmCompilerMarkInUse(CompilationUnit *cUnit, int reg);
+
+extern int dvmCompilerAllocTemp(CompilationUnit *cUnit);
+
+extern int dvmCompilerAllocTempFloat(CompilationUnit *cUnit);
+
+//REDO: too many assumptions.
+extern int dvmCompilerAllocTempDouble(CompilationUnit *cUnit);
+
+extern void dvmCompilerFreeTemp(CompilationUnit *cUnit, int reg);
+
+extern void dvmCompilerResetDefLocWide(CompilationUnit *cUnit, RegLocation rl);
+
+extern void dvmCompilerResetDefTracking(CompilationUnit *cUnit);
+
+/* Kill the corresponding bit in the null-checked register list */
+extern void dvmCompilerKillNullCheckedLoc(CompilationUnit *cUnit,
+                                          RegLocation loc);
+
+//FIXME - this needs to also check the preserved pool.
+extern RegisterInfo *dvmCompilerIsLive(CompilationUnit *cUnit, int reg);
+
+/* To be used when explicitly managing register use */
+extern void dvmCompilerLockAllTemps(CompilationUnit *cUnit);
+
+extern void dvmCompilerFlushAllRegs(CompilationUnit *cUnit);
+
+extern RegLocation dvmCompilerGetReturnWideAlt(CompilationUnit *cUnit);
+
+extern RegLocation dvmCompilerGetReturn(CompilationUnit *cUnit);
+
+extern RegLocation dvmCompilerGetReturnAlt(CompilationUnit *cUnit);
+
+/* Clobber any temp associated with an sReg.  Could be in either class */
+extern void dvmCompilerClobberSReg(CompilationUnit *cUnit, int sReg);
+
+/* Return a temp if one is available, -1 otherwise */
+extern int dvmCompilerAllocFreeTemp(CompilationUnit *cUnit);
+
+/*
+ * Similar to dvmCompilerAllocTemp(), but forces the allocation of a specific
+ * register.  No check is made to see if the register was previously
+ * allocated.  Use with caution.
+ */
+extern void dvmCompilerLockTemp(CompilationUnit *cUnit, int reg);
+
+extern RegLocation dvmCompilerWideToNarrow(CompilationUnit *cUnit,
+                                           RegLocation rl);
+
+/*
+ * Free all allocated temps in the temp pools.  Note that this does
+ * not affect the "liveness" of a temp register, which will stay
+ * live until it is either explicitly killed or reallocated.
+ */
+extern void dvmCompilerResetRegPool(CompilationUnit *cUnit);
+
+extern void dvmCompilerClobberAllRegs(CompilationUnit *cUnit);
+
+extern void dvmCompilerResetDefTracking(CompilationUnit *cUnit);
diff --git a/vm/compiler/codegen/mips/RallocUtil.cpp b/vm/compiler/codegen/mips/RallocUtil.cpp
new file mode 100644
index 0000000..1904373
--- /dev/null
+++ b/vm/compiler/codegen/mips/RallocUtil.cpp
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains register alloction support and is intended to be
+ * included by:
+ *
+ *        Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+#include "compiler/CompilerUtility.h"
+#include "compiler/CompilerIR.h"
+#include "compiler/Dataflow.h"
+#include "MipsLIR.h"
+#include "Codegen.h"
+#include "Ralloc.h"
+
+#define SREG(c, s) ((c)->regLocation[(s)].sRegLow)
+/*
+ * Get the "real" sreg number associated with an sReg slot.  In general,
+ * sReg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the cUnit->regLocation
+ * array.  However, renaming is accomplished by simply replacing RegLocation
+ * entries in the cUnit->reglocation[] array.  Therefore, when location
+ * records for operands are first created, we need to ask the locRecord
+ * identified by the dataflow pass what it's new name is.
+ */
+
+/*
+ * Free all allocated temps in the temp pools.  Note that this does
+ * not affect the "liveness" of a temp register, which will stay
+ * live until it is either explicitly killed or reallocated.
+ */
+extern void dvmCompilerResetRegPool(CompilationUnit *cUnit)
+{
+    int i;
+    for (i=0; i < cUnit->regPool->numCoreTemps; i++) {
+        cUnit->regPool->coreTemps[i].inUse = false;
+    }
+    for (i=0; i < cUnit->regPool->numFPTemps; i++) {
+        cUnit->regPool->FPTemps[i].inUse = false;
+    }
+}
+
+ /* Set up temp & preserved register pools specialized by target */
+extern void dvmCompilerInitPool(RegisterInfo *regs, int *regNums, int num)
+{
+    int i;
+    for (i=0; i < num; i++) {
+        regs[i].reg = regNums[i];
+        regs[i].inUse = false;
+        regs[i].pair = false;
+        regs[i].live = false;
+        regs[i].dirty = false;
+        regs[i].sReg = INVALID_SREG;
+    }
+}
+
+static void dumpRegPool(RegisterInfo *p, int numRegs)
+{
+    int i;
+    LOGE("================================================");
+    for (i=0; i < numRegs; i++ ){
+        LOGE("R[%d]: U:%d, P:%d, part:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+           p[i].reg, p[i].inUse, p[i].pair, p[i].partner, p[i].live,
+           p[i].dirty, p[i].sReg,(int)p[i].defStart, (int)p[i].defEnd);
+    }
+    LOGE("================================================");
+}
+
+static RegisterInfo *getRegInfo(CompilationUnit *cUnit, int reg)
+{
+    int numTemps = cUnit->regPool->numCoreTemps;
+    RegisterInfo *p = cUnit->regPool->coreTemps;
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return &p[i];
+        }
+    }
+    p = cUnit->regPool->FPTemps;
+    numTemps = cUnit->regPool->numFPTemps;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return &p[i];
+        }
+    }
+    LOGE("Tried to get info on a non-existant temp: r%d",reg);
+    dvmCompilerAbort(cUnit);
+    return NULL;
+}
+
+static void flushRegWide(CompilationUnit *cUnit, int reg1, int reg2)
+{
+    RegisterInfo *info1 = getRegInfo(cUnit, reg1);
+    RegisterInfo *info2 = getRegInfo(cUnit, reg2);
+    assert(info1 && info2 && info1->pair && info2->pair &&
+           (info1->partner == info2->reg) &&
+           (info2->partner == info1->reg));
+    if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+        info1->dirty = false;
+        info2->dirty = false;
+        if (dvmCompilerS2VReg(cUnit, info2->sReg) <
+            dvmCompilerS2VReg(cUnit, info1->sReg))
+            info1 = info2;
+        dvmCompilerFlushRegWideImpl(cUnit, rFP,
+                                    dvmCompilerS2VReg(cUnit, info1->sReg) << 2,
+                                    info1->reg, info1->partner);
+    }
+}
+
+static void flushReg(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *info = getRegInfo(cUnit, reg);
+    if (info->live && info->dirty) {
+        info->dirty = false;
+        dvmCompilerFlushRegImpl(cUnit, rFP,
+                                dvmCompilerS2VReg(cUnit, info->sReg) << 2,
+                                reg, kWord);
+    }
+}
+
+/* return true if found reg to clobber */
+static bool clobberRegBody(CompilationUnit *cUnit, RegisterInfo *p,
+                           int numTemps, int reg)
+{
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            if (p[i].live && p[i].dirty) {
+                if (p[i].pair) {
+                    flushRegWide(cUnit, p[i].reg, p[i].partner);
+                } else {
+                    flushReg(cUnit, p[i].reg);
+                }
+            }
+            p[i].live = false;
+            p[i].sReg = INVALID_SREG;
+            p[i].defStart = NULL;
+            p[i].defEnd = NULL;
+            if (p[i].pair) {
+                p[i].pair = false;
+                /* partners should be in same pool */
+                clobberRegBody(cUnit, p, numTemps, p[i].partner);
+            }
+            return true;
+        }
+    }
+    return false;
+}
+
+/* Mark a temp register as dead.  Does not affect allocation state. */
+void dvmCompilerClobber(CompilationUnit *cUnit, int reg)
+{
+    if (!clobberRegBody(cUnit, cUnit->regPool->coreTemps,
+                        cUnit->regPool->numCoreTemps, reg)) {
+        clobberRegBody(cUnit, cUnit->regPool->FPTemps,
+                       cUnit->regPool->numFPTemps, reg);
+    }
+}
+
+static void clobberSRegBody(RegisterInfo *p, int numTemps, int sReg)
+{
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].sReg == sReg) {
+            p[i].live = false;
+            p[i].defStart = NULL;
+            p[i].defEnd = NULL;
+        }
+    }
+}
+
+/* Clobber any temp associated with an sReg.  Could be in either class */
+extern void dvmCompilerClobberSReg(CompilationUnit *cUnit, int sReg)
+{
+    clobberSRegBody(cUnit->regPool->coreTemps, cUnit->regPool->numCoreTemps,
+                    sReg);
+    clobberSRegBody(cUnit->regPool->FPTemps, cUnit->regPool->numFPTemps,
+                    sReg);
+}
+
+static int allocTempBody(CompilationUnit *cUnit, RegisterInfo *p, int numTemps,
+                         int *nextTemp, bool required)
+{
+    int i;
+    int next = *nextTemp;
+    for (i=0; i< numTemps; i++) {
+        if (next >= numTemps)
+            next = 0;
+        if (!p[next].inUse && !p[next].live) {
+            dvmCompilerClobber(cUnit, p[next].reg);
+            p[next].inUse = true;
+            p[next].pair = false;
+            *nextTemp = next + 1;
+            return p[next].reg;
+        }
+        next++;
+    }
+    next = *nextTemp;
+    for (i=0; i< numTemps; i++) {
+        if (next >= numTemps)
+            next = 0;
+        if (!p[next].inUse) {
+            dvmCompilerClobber(cUnit, p[next].reg);
+            p[next].inUse = true;
+            p[next].pair = false;
+            *nextTemp = next + 1;
+            return p[next].reg;
+        }
+        next++;
+    }
+    if (required) {
+        LOGE("No free temp registers");
+        dvmCompilerAbort(cUnit);
+    }
+    return -1;  // No register available
+}
+
+//REDO: too many assumptions.
+extern int dvmCompilerAllocTempDouble(CompilationUnit *cUnit)
+{
+    RegisterInfo *p = cUnit->regPool->FPTemps;
+    int numTemps = cUnit->regPool->numFPTemps;
+    /* Cleanup - not all targets need aligned regs */
+    int start = cUnit->regPool->nextFPTemp + (cUnit->regPool->nextFPTemp & 1);
+    int next = start;
+    int i;
+
+    for (i=0; i < numTemps; i+=2) {
+        if (next >= numTemps)
+            next = 0;
+        if ((!p[next].inUse && !p[next].live) &&
+            (!p[next+1].inUse && !p[next+1].live)) {
+            dvmCompilerClobber(cUnit, p[next].reg);
+            dvmCompilerClobber(cUnit, p[next+1].reg);
+            p[next].inUse = true;
+            p[next+1].inUse = true;
+            assert((p[next].reg+1) == p[next+1].reg);
+            assert((p[next].reg & 0x1) == 0);
+            cUnit->regPool->nextFPTemp += 2;
+            return p[next].reg;
+        }
+        next += 2;
+    }
+    next = start;
+    for (i=0; i < numTemps; i+=2) {
+        if (next >= numTemps)
+            next = 0;
+        if (!p[next].inUse && !p[next+1].inUse) {
+            dvmCompilerClobber(cUnit, p[next].reg);
+            dvmCompilerClobber(cUnit, p[next+1].reg);
+            p[next].inUse = true;
+            p[next+1].inUse = true;
+            assert((p[next].reg+1) == p[next+1].reg);
+            assert((p[next].reg & 0x1) == 0);
+            cUnit->regPool->nextFPTemp += 2;
+            return p[next].reg;
+        }
+        next += 2;
+    }
+    LOGE("No free temp registers");
+    dvmCompilerAbort(cUnit);
+    return -1;
+}
+
+/* Return a temp if one is available, -1 otherwise */
+extern int dvmCompilerAllocFreeTemp(CompilationUnit *cUnit)
+{
+    return allocTempBody(cUnit, cUnit->regPool->coreTemps,
+                         cUnit->regPool->numCoreTemps,
+                         &cUnit->regPool->nextCoreTemp, true);
+}
+
+extern int dvmCompilerAllocTemp(CompilationUnit *cUnit)
+{
+    return allocTempBody(cUnit, cUnit->regPool->coreTemps,
+                         cUnit->regPool->numCoreTemps,
+                         &cUnit->regPool->nextCoreTemp, true);
+}
+
+extern int dvmCompilerAllocTempFloat(CompilationUnit *cUnit)
+{
+    return allocTempBody(cUnit, cUnit->regPool->FPTemps,
+                         cUnit->regPool->numFPTemps,
+                         &cUnit->regPool->nextFPTemp, true);
+}
+
+static RegisterInfo *allocLiveBody(RegisterInfo *p, int numTemps, int sReg)
+{
+    int i;
+    if (sReg == -1)
+        return NULL;
+    for (i=0; i < numTemps; i++) {
+        if (p[i].live && (p[i].sReg == sReg)) {
+            p[i].inUse = true;
+            return &p[i];
+        }
+    }
+    return NULL;
+}
+
+static RegisterInfo *allocLive(CompilationUnit *cUnit, int sReg,
+                               int regClass)
+{
+    RegisterInfo *res = NULL;
+    switch(regClass) {
+        case kAnyReg:
+            res = allocLiveBody(cUnit->regPool->FPTemps,
+                                cUnit->regPool->numFPTemps, sReg);
+            if (res)
+                break;
+            /* Intentional fallthrough */
+        case kCoreReg:
+            res = allocLiveBody(cUnit->regPool->coreTemps,
+                                cUnit->regPool->numCoreTemps, sReg);
+            break;
+        case kFPReg:
+            res = allocLiveBody(cUnit->regPool->FPTemps,
+                                cUnit->regPool->numFPTemps, sReg);
+            break;
+        default:
+            LOGE("Invalid register type");
+            dvmCompilerAbort(cUnit);
+    }
+    return res;
+}
+
+extern void dvmCompilerFreeTemp(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *p = cUnit->regPool->coreTemps;
+    int numTemps = cUnit->regPool->numCoreTemps;
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            p[i].inUse = false;
+            p[i].pair = false;
+            return;
+        }
+    }
+    p = cUnit->regPool->FPTemps;
+    numTemps = cUnit->regPool->numFPTemps;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            p[i].inUse = false;
+            p[i].pair = false;
+            return;
+        }
+    }
+    LOGE("Tried to free a non-existant temp: r%d",reg);
+    dvmCompilerAbort(cUnit);
+}
+
+/*
+ * FIXME - this needs to also check the preserved pool once we start
+ * start using preserved registers.
+ */
+extern RegisterInfo *dvmCompilerIsLive(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *p = cUnit->regPool->coreTemps;
+    int numTemps = cUnit->regPool->numCoreTemps;
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return p[i].live ? &p[i] : NULL;
+        }
+    }
+    p = cUnit->regPool->FPTemps;
+    numTemps = cUnit->regPool->numFPTemps;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return p[i].live ? &p[i] : NULL;
+        }
+    }
+    return NULL;
+}
+
+extern RegisterInfo *dvmCompilerIsTemp(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *p = cUnit->regPool->coreTemps;
+    int numTemps = cUnit->regPool->numCoreTemps;
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return &p[i];
+        }
+    }
+    p = cUnit->regPool->FPTemps;
+    numTemps = cUnit->regPool->numFPTemps;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            return &p[i];
+        }
+    }
+    return NULL;
+}
+
+/*
+ * Similar to dvmCompilerAllocTemp(), but forces the allocation of a specific
+ * register.  No check is made to see if the register was previously
+ * allocated.  Use with caution.
+ */
+extern void dvmCompilerLockTemp(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *p = cUnit->regPool->coreTemps;
+    int numTemps = cUnit->regPool->numCoreTemps;
+    int i;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            p[i].inUse = true;
+            p[i].live = false;
+            return;
+        }
+    }
+    p = cUnit->regPool->FPTemps;
+    numTemps = cUnit->regPool->numFPTemps;
+    for (i=0; i< numTemps; i++) {
+        if (p[i].reg == reg) {
+            p[i].inUse = true;
+            p[i].live = false;
+            return;
+        }
+    }
+    LOGE("Tried to lock a non-existant temp: r%d",reg);
+    dvmCompilerAbort(cUnit);
+}
+
+/* Clobber all regs that might be used by an external C call */
+extern void dvmCompilerClobberCallRegs(CompilationUnit *cUnit)
+{
+    dvmCompilerClobber(cUnit, r_ZERO);
+    dvmCompilerClobber(cUnit, r_AT);
+    dvmCompilerClobber(cUnit, r_V0);
+    dvmCompilerClobber(cUnit, r_V1);
+    dvmCompilerClobber(cUnit, r_A0);
+    dvmCompilerClobber(cUnit, r_A1);
+    dvmCompilerClobber(cUnit, r_A2);
+    dvmCompilerClobber(cUnit, r_A3);
+    dvmCompilerClobber(cUnit, r_T0);
+    dvmCompilerClobber(cUnit, r_T1);
+    dvmCompilerClobber(cUnit, r_T2);
+    dvmCompilerClobber(cUnit, r_T3);
+    dvmCompilerClobber(cUnit, r_T4);
+    dvmCompilerClobber(cUnit, r_T5);
+    dvmCompilerClobber(cUnit, r_T6);
+    dvmCompilerClobber(cUnit, r_T7);
+    dvmCompilerClobber(cUnit, r_T8);
+    dvmCompilerClobber(cUnit, r_T9);
+    dvmCompilerClobber(cUnit, r_K0);
+    dvmCompilerClobber(cUnit, r_K1);
+    dvmCompilerClobber(cUnit, r_GP);
+    dvmCompilerClobber(cUnit, r_FP);
+    dvmCompilerClobber(cUnit, r_RA);
+    dvmCompilerClobber(cUnit, r_HI);
+    dvmCompilerClobber(cUnit, r_LO);
+    dvmCompilerClobber(cUnit, r_F0);
+    dvmCompilerClobber(cUnit, r_F1);
+    dvmCompilerClobber(cUnit, r_F2);
+    dvmCompilerClobber(cUnit, r_F3);
+    dvmCompilerClobber(cUnit, r_F4);
+    dvmCompilerClobber(cUnit, r_F5);
+    dvmCompilerClobber(cUnit, r_F6);
+    dvmCompilerClobber(cUnit, r_F7);
+    dvmCompilerClobber(cUnit, r_F8);
+    dvmCompilerClobber(cUnit, r_F9);
+    dvmCompilerClobber(cUnit, r_F10);
+    dvmCompilerClobber(cUnit, r_F11);
+    dvmCompilerClobber(cUnit, r_F12);
+    dvmCompilerClobber(cUnit, r_F13);
+    dvmCompilerClobber(cUnit, r_F14);
+    dvmCompilerClobber(cUnit, r_F15);
+}
+
+/* Clobber all of the temps that might be used by a handler. */
+extern void dvmCompilerClobberHandlerRegs(CompilationUnit *cUnit)
+{
+    //TUNING: reduce the set of regs used by handlers.  Only a few need lots.
+    dvmCompilerClobberCallRegs(cUnit);
+    dvmCompilerClobber(cUnit, r_S0);
+    dvmCompilerClobber(cUnit, r_S1);
+    dvmCompilerClobber(cUnit, r_S2);
+    dvmCompilerClobber(cUnit, r_S3);
+    dvmCompilerClobber(cUnit, r_S4);
+    dvmCompilerClobber(cUnit, r_S5);
+    dvmCompilerClobber(cUnit, r_S6);
+    dvmCompilerClobber(cUnit, r_S7);
+}
+
+extern void dvmCompilerResetDef(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *p = getRegInfo(cUnit, reg);
+    p->defStart = NULL;
+    p->defEnd = NULL;
+}
+
+static void nullifyRange(CompilationUnit *cUnit, LIR *start, LIR *finish,
+                         int sReg1, int sReg2)
+{
+    if (start && finish) {
+        LIR *p;
+        assert(sReg1 == sReg2);
+        for (p = start; ;p = p->next) {
+            ((MipsLIR *)p)->flags.isNop = true;
+            if (p == finish)
+                break;
+        }
+    }
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+extern void dvmCompilerMarkDef(CompilationUnit *cUnit, RegLocation rl,
+                    LIR *start, LIR *finish)
+{
+    assert(!rl.wide);
+    assert(start && start->next);
+    assert(finish);
+    RegisterInfo *p = getRegInfo(cUnit, rl.lowReg);
+    p->defStart = start->next;
+    p->defEnd = finish;
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence.  Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+extern void dvmCompilerMarkDefWide(CompilationUnit *cUnit, RegLocation rl,
+                        LIR *start, LIR *finish)
+{
+    assert(rl.wide);
+    assert(start && start->next);
+    assert(finish);
+    RegisterInfo *p = getRegInfo(cUnit, rl.lowReg);
+    dvmCompilerResetDef(cUnit, rl.highReg);  // Only track low of pair
+    p->defStart = start->next;
+    p->defEnd = finish;
+}
+
+extern RegLocation dvmCompilerWideToNarrow(CompilationUnit *cUnit,
+                                           RegLocation rl)
+{
+    assert(rl.wide);
+    if (rl.location == kLocPhysReg) {
+        RegisterInfo *infoLo = getRegInfo(cUnit, rl.lowReg);
+        RegisterInfo *infoHi = getRegInfo(cUnit, rl.highReg);
+        if (!infoLo->pair) {
+            dumpRegPool(cUnit->regPool->coreTemps,
+                        cUnit->regPool->numCoreTemps);
+            assert(infoLo->pair);
+        }
+        if (!infoHi->pair) {
+            dumpRegPool(cUnit->regPool->coreTemps,
+                        cUnit->regPool->numCoreTemps);
+            assert(infoHi->pair);
+        }
+        assert(infoLo->pair);
+        assert(infoHi->pair);
+        assert(infoLo->partner == infoHi->reg);
+        assert(infoHi->partner == infoLo->reg);
+        infoLo->pair = false;
+        infoHi->pair = false;
+        infoLo->defStart = NULL;
+        infoLo->defEnd = NULL;
+        infoHi->defStart = NULL;
+        infoHi->defEnd = NULL;
+    }
+#ifndef HAVE_LITTLE_ENDIAN
+    else if (rl.location == kLocDalvikFrame) {
+        rl.sRegLow = dvmCompilerSRegHi(rl.sRegLow);
+    }
+#endif
+
+    rl.wide = false;
+    return rl;
+}
+
+extern void dvmCompilerResetDefLoc(CompilationUnit *cUnit, RegLocation rl)
+{
+    assert(!rl.wide);
+    if (!(gDvmJit.disableOpt & (1 << kSuppressLoads))) {
+        RegisterInfo *p = getRegInfo(cUnit, rl.lowReg);
+        assert(!p->pair);
+        nullifyRange(cUnit, p->defStart, p->defEnd,
+                     p->sReg, rl.sRegLow);
+    }
+    dvmCompilerResetDef(cUnit, rl.lowReg);
+}
+
+extern void dvmCompilerResetDefLocWide(CompilationUnit *cUnit, RegLocation rl)
+{
+    assert(rl.wide);
+    if (!(gDvmJit.disableOpt & (1 << kSuppressLoads))) {
+        RegisterInfo *p = getRegInfo(cUnit, rl.lowReg);
+        assert(p->pair);
+        nullifyRange(cUnit, p->defStart, p->defEnd,
+                     p->sReg, rl.sRegLow);
+    }
+    dvmCompilerResetDef(cUnit, rl.lowReg);
+    dvmCompilerResetDef(cUnit, rl.highReg);
+}
+
+extern void dvmCompilerResetDefTracking(CompilationUnit *cUnit)
+{
+    int i;
+    for (i=0; i< cUnit->regPool->numCoreTemps; i++) {
+        dvmCompilerResetDef(cUnit, cUnit->regPool->coreTemps[i].reg);
+    }
+    for (i=0; i< cUnit->regPool->numFPTemps; i++) {
+        dvmCompilerResetDef(cUnit, cUnit->regPool->FPTemps[i].reg);
+    }
+}
+
+extern void dvmCompilerClobberAllRegs(CompilationUnit *cUnit)
+{
+    int i;
+    for (i=0; i< cUnit->regPool->numCoreTemps; i++) {
+        dvmCompilerClobber(cUnit, cUnit->regPool->coreTemps[i].reg);
+    }
+    for (i=0; i< cUnit->regPool->numFPTemps; i++) {
+        dvmCompilerClobber(cUnit, cUnit->regPool->FPTemps[i].reg);
+    }
+}
+
+/* To be used when explicitly managing register use */
+extern void dvmCompilerLockAllTemps(CompilationUnit *cUnit)
+{
+    int i;
+    for (i=0; i< cUnit->regPool->numCoreTemps; i++) {
+        dvmCompilerLockTemp(cUnit, cUnit->regPool->coreTemps[i].reg);
+    }
+}
+
+// Make sure nothing is live and dirty
+static void flushAllRegsBody(CompilationUnit *cUnit, RegisterInfo *info,
+                             int numRegs)
+{
+    int i;
+    for (i=0; i < numRegs; i++) {
+        if (info[i].live && info[i].dirty) {
+            if (info[i].pair) {
+                flushRegWide(cUnit, info[i].reg, info[i].partner);
+            } else {
+                flushReg(cUnit, info[i].reg);
+            }
+        }
+    }
+}
+
+extern void dvmCompilerFlushAllRegs(CompilationUnit *cUnit)
+{
+    flushAllRegsBody(cUnit, cUnit->regPool->coreTemps,
+                     cUnit->regPool->numCoreTemps);
+    flushAllRegsBody(cUnit, cUnit->regPool->FPTemps,
+                     cUnit->regPool->numFPTemps);
+    dvmCompilerClobberAllRegs(cUnit);
+}
+
+
+//TUNING: rewrite all of this reg stuff.  Probably use an attribute table
+static bool regClassMatches(int regClass, int reg)
+{
+    if (regClass == kAnyReg) {
+        return true;
+    } else if (regClass == kCoreReg) {
+        return !FPREG(reg);
+    } else {
+        return FPREG(reg);
+    }
+}
+
+extern void dvmCompilerMarkLive(CompilationUnit *cUnit, int reg, int sReg)
+{
+    RegisterInfo *info = getRegInfo(cUnit, reg);
+    if ((info->reg == reg) && (info->sReg == sReg) && info->live) {
+        return;  /* already live */
+    } else if (sReg != INVALID_SREG) {
+        dvmCompilerClobberSReg(cUnit, sReg);
+        info->live = true;
+    } else {
+        /* Can't be live if no associated sReg */
+        info->live = false;
+    }
+    info->sReg = sReg;
+}
+
+extern void dvmCompilerMarkPair(CompilationUnit *cUnit, int lowReg, int highReg)
+{
+    RegisterInfo *infoLo = getRegInfo(cUnit, lowReg);
+    RegisterInfo *infoHi = getRegInfo(cUnit, highReg);
+    infoLo->pair = infoHi->pair = true;
+    infoLo->partner = highReg;
+    infoHi->partner = lowReg;
+}
+
+extern void dvmCompilerMarkClean(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *info = getRegInfo(cUnit, reg);
+    info->dirty = false;
+}
+
+extern void dvmCompilerMarkDirty(CompilationUnit *cUnit, int reg)
+{
+    RegisterInfo *info = getRegInfo(cUnit, reg);
+    info->dirty = true;
+}
+
+extern void dvmCompilerMarkInUse(CompilationUnit *cUnit, int reg)
+{
+      RegisterInfo *info = getRegInfo(cUnit, reg);
+          info->inUse = true;
+}
+
+void copyRegInfo(CompilationUnit *cUnit, int newReg, int oldReg)
+{
+    RegisterInfo *newInfo = getRegInfo(cUnit, newReg);
+    RegisterInfo *oldInfo = getRegInfo(cUnit, oldReg);
+    *newInfo = *oldInfo;
+    newInfo->reg = newReg;
+}
+
+/*
+ * Return an updated location record with current in-register status.
+ * If the value lives in live temps, reflect that fact.  No code
+ * is generated.  The the live value is part of an older pair,
+ * clobber both low and high.
+ * TUNING: clobbering both is a bit heavy-handed, but the alternative
+ * is a bit complex when dealing with FP regs.  Examine code to see
+ * if it's worthwhile trying to be more clever here.
+ */
+extern RegLocation dvmCompilerUpdateLoc(CompilationUnit *cUnit, RegLocation loc)
+{
+    assert(!loc.wide);
+    if (loc.location == kLocDalvikFrame) {
+        RegisterInfo *infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
+        if (infoLo) {
+            if (infoLo->pair) {
+                dvmCompilerClobber(cUnit, infoLo->reg);
+                dvmCompilerClobber(cUnit, infoLo->partner);
+            } else {
+                loc.lowReg = infoLo->reg;
+                loc.location = kLocPhysReg;
+            }
+        }
+    }
+
+    return loc;
+}
+
+/* see comments for updateLoc */
+extern RegLocation dvmCompilerUpdateLocWide(CompilationUnit *cUnit,
+                                            RegLocation loc)
+{
+    assert(loc.wide);
+    if (loc.location == kLocDalvikFrame) {
+        // Are the dalvik regs already live in physical registers?
+        RegisterInfo *infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
+        RegisterInfo *infoHi = allocLive(cUnit,
+              dvmCompilerSRegHi(loc.sRegLow), kAnyReg);
+        bool match = true;
+        match = match && (infoLo != NULL);
+        match = match && (infoHi != NULL);
+        // Are they both core or both FP?
+        match = match && (FPREG(infoLo->reg) == FPREG(infoHi->reg));
+        // If a pair of floating point singles, are they properly aligned?
+        if (match && FPREG(infoLo->reg)) {
+            match &= ((infoLo->reg & 0x1) == 0);
+            match &= ((infoHi->reg - infoLo->reg) == 1);
+        }
+        // If previously used as a pair, it is the same pair?
+        if (match && (infoLo->pair || infoHi->pair)) {
+            match = (infoLo->pair == infoHi->pair);
+            match &= ((infoLo->reg == infoHi->partner) &&
+                      (infoHi->reg == infoLo->partner));
+        }
+        if (match) {
+            // Can reuse - update the register usage info
+            loc.lowReg = infoLo->reg;
+            loc.highReg = infoHi->reg;
+            loc.location = kLocPhysReg;
+            dvmCompilerMarkPair(cUnit, loc.lowReg, loc.highReg);
+            assert(!FPREG(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+            return loc;
+        }
+        // Can't easily reuse - clobber any overlaps
+        if (infoLo) {
+            dvmCompilerClobber(cUnit, infoLo->reg);
+            if (infoLo->pair)
+                dvmCompilerClobber(cUnit, infoLo->partner);
+        }
+        if (infoHi) {
+            dvmCompilerClobber(cUnit, infoHi->reg);
+            if (infoHi->pair)
+                dvmCompilerClobber(cUnit, infoHi->partner);
+        }
+    }
+
+    return loc;
+}
+
+static RegLocation evalLocWide(CompilationUnit *cUnit, RegLocation loc,
+                               int regClass, bool update)
+{
+    assert(loc.wide);
+    int newRegs;
+    int lowReg;
+    int highReg;
+
+    loc = dvmCompilerUpdateLocWide(cUnit, loc);
+
+    /* If already in registers, we can assume proper form.  Right reg class? */
+    if (loc.location == kLocPhysReg) {
+        assert(FPREG(loc.lowReg) == FPREG(loc.highReg));
+        assert(!FPREG(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+        if (!regClassMatches(regClass, loc.lowReg)) {
+            /* Wrong register class.  Reallocate and copy */
+            newRegs = dvmCompilerAllocTypedTempPair(cUnit, loc.fp, regClass);
+            lowReg = newRegs & 0xff;
+            highReg = (newRegs >> 8) & 0xff;
+            dvmCompilerRegCopyWide(cUnit, lowReg, highReg, loc.lowReg,
+                                   loc.highReg);
+            copyRegInfo(cUnit, lowReg, loc.lowReg);
+            copyRegInfo(cUnit, highReg, loc.highReg);
+            dvmCompilerClobber(cUnit, loc.lowReg);
+            dvmCompilerClobber(cUnit, loc.highReg);
+            loc.lowReg = lowReg;
+            loc.highReg = highReg;
+            dvmCompilerMarkPair(cUnit, loc.lowReg, loc.highReg);
+            assert(!FPREG(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+        }
+        return loc;
+    }
+
+    assert((loc.location != kLocRetval) || (loc.sRegLow == INVALID_SREG));
+    assert((loc.location != kLocRetval) ||
+           (dvmCompilerSRegHi(loc.sRegLow) == INVALID_SREG));
+
+    newRegs = dvmCompilerAllocTypedTempPair(cUnit, loc.fp, regClass);
+    loc.lowReg = newRegs & 0xff;
+    loc.highReg = (newRegs >> 8) & 0xff;
+
+    dvmCompilerMarkPair(cUnit, loc.lowReg, loc.highReg);
+    if (update) {
+        loc.location = kLocPhysReg;
+        dvmCompilerMarkLive(cUnit, loc.lowReg, loc.sRegLow);
+        dvmCompilerMarkLive(cUnit, loc.highReg, dvmCompilerSRegHi(loc.sRegLow));
+    }
+    assert(!FPREG(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+    return loc;
+}
+
+extern RegLocation dvmCompilerEvalLoc(CompilationUnit *cUnit, RegLocation loc,
+                                      int regClass, bool update)
+{
+    int newReg;
+    if (loc.wide)
+        return evalLocWide(cUnit, loc, regClass, update);
+    loc = dvmCompilerUpdateLoc(cUnit, loc);
+
+    if (loc.location == kLocPhysReg) {
+        if (!regClassMatches(regClass, loc.lowReg)) {
+            /* Wrong register class.  Realloc, copy and transfer ownership */
+            newReg = dvmCompilerAllocTypedTemp(cUnit, loc.fp, regClass);
+            dvmCompilerRegCopy(cUnit, newReg, loc.lowReg);
+            copyRegInfo(cUnit, newReg, loc.lowReg);
+            dvmCompilerClobber(cUnit, loc.lowReg);
+            loc.lowReg = newReg;
+        }
+        return loc;
+    }
+
+    assert((loc.location != kLocRetval) || (loc.sRegLow == INVALID_SREG));
+
+    newReg = dvmCompilerAllocTypedTemp(cUnit, loc.fp, regClass);
+    loc.lowReg = newReg;
+
+    if (update) {
+        loc.location = kLocPhysReg;
+        dvmCompilerMarkLive(cUnit, loc.lowReg, loc.sRegLow);
+    }
+    return loc;
+}
+
+static inline int getDestSSAName(MIR *mir, int num)
+{
+    assert(mir->ssaRep->numDefs > num);
+    return mir->ssaRep->defs[num];
+}
+
+// Get the LocRecord associated with an SSA name use.
+extern RegLocation dvmCompilerGetSrc(CompilationUnit *cUnit, MIR *mir, int num)
+{
+    RegLocation loc = cUnit->regLocation[
+         SREG(cUnit, dvmCompilerSSASrc(mir, num))];
+    loc.fp = cUnit->regLocation[dvmCompilerSSASrc(mir, num)].fp;
+    loc.wide = false;
+    return loc;
+}
+
+// Get the LocRecord associated with an SSA name def.
+extern RegLocation dvmCompilerGetDest(CompilationUnit *cUnit, MIR *mir,
+                                      int num)
+{
+    RegLocation loc = cUnit->regLocation[SREG(cUnit, getDestSSAName(mir, num))];
+    loc.fp = cUnit->regLocation[getDestSSAName(mir, num)].fp;
+    loc.wide = false;
+    return loc;
+}
+
+static RegLocation getLocWide(CompilationUnit *cUnit, MIR *mir,
+                              int low, int high, bool isSrc)
+{
+    RegLocation lowLoc;
+    RegLocation highLoc;
+    /* Copy loc record for low word and patch in data from high word */
+    if (isSrc) {
+        lowLoc = dvmCompilerGetSrc(cUnit, mir, low);
+        highLoc = dvmCompilerGetSrc(cUnit, mir, high);
+    } else {
+        lowLoc = dvmCompilerGetDest(cUnit, mir, low);
+        highLoc = dvmCompilerGetDest(cUnit, mir, high);
+    }
+    /* Avoid this case by either promoting both or neither. */
+    assert(lowLoc.location == highLoc.location);
+    if (lowLoc.location == kLocPhysReg) {
+        /* This case shouldn't happen if we've named correctly */
+        assert(lowLoc.fp == highLoc.fp);
+    }
+    lowLoc.wide = true;
+    lowLoc.highReg = highLoc.lowReg;
+    return lowLoc;
+}
+
+extern RegLocation dvmCompilerGetDestWide(CompilationUnit *cUnit, MIR *mir,
+                                          int low, int high)
+{
+    return getLocWide(cUnit, mir, low, high, false);
+}
+
+extern RegLocation dvmCompilerGetSrcWide(CompilationUnit *cUnit, MIR *mir,
+                                         int low, int high)
+{
+    return getLocWide(cUnit, mir, low, high, true);
+}
+
+extern RegLocation dvmCompilerGetReturnWide(CompilationUnit *cUnit)
+{
+    RegLocation res = LOC_C_RETURN_WIDE;
+    dvmCompilerClobber(cUnit, r_V0);
+    dvmCompilerClobber(cUnit, r_V1);
+    dvmCompilerMarkInUse(cUnit, r_V0);
+    dvmCompilerMarkInUse(cUnit, r_V1);
+    dvmCompilerMarkPair(cUnit, res.lowReg, res.highReg);
+    return res;
+}
+
+extern RegLocation dvmCompilerGetReturn(CompilationUnit *cUnit)
+{
+    RegLocation res = LOC_C_RETURN;
+    dvmCompilerClobber(cUnit, r_V0);
+    dvmCompilerMarkInUse(cUnit, r_V0);
+    return res;
+}
+
+extern RegLocation dvmCompilerGetReturnWideAlt(CompilationUnit *cUnit)
+{
+    RegLocation res = LOC_C_RETURN_WIDE_ALT;
+    dvmCompilerClobber(cUnit, r_F0);
+    dvmCompilerClobber(cUnit, r_F1);
+    dvmCompilerMarkInUse(cUnit, r_F0);
+    dvmCompilerMarkInUse(cUnit, r_F1);
+    dvmCompilerMarkPair(cUnit, res.lowReg, res.highReg);
+    return res;
+}
+
+extern RegLocation dvmCompilerGetReturnAlt(CompilationUnit *cUnit)
+{
+    RegLocation res = LOC_C_RETURN_ALT;
+    dvmCompilerClobber(cUnit, r_F0);
+    dvmCompilerMarkInUse(cUnit, r_F0);
+    return res;
+}
+
+/* Kill the corresponding bit in the null-checked register list */
+extern void dvmCompilerKillNullCheckedLoc(CompilationUnit *cUnit,
+                                          RegLocation loc)
+{
+    if (loc.location != kLocRetval) {
+        assert(loc.sRegLow != INVALID_SREG);
+        dvmClearBit(cUnit->regPool->nullCheckedRegs, loc.sRegLow);
+        if (loc.wide) {
+            assert(dvmCompilerSRegHi(loc.sRegLow) != INVALID_SREG);
+            dvmClearBit(cUnit->regPool->nullCheckedRegs,
+                        dvmCompilerSRegHi(loc.sRegLow));
+        }
+    }
+}
+
+extern void dvmCompilerFlushRegWideForV5TEVFP(CompilationUnit *cUnit,
+                                              int reg1, int reg2)
+{
+    flushRegWide(cUnit, reg1, reg2);
+}
+
+extern void dvmCompilerFlushRegForV5TEVFP(CompilationUnit *cUnit, int reg)
+{
+    flushReg(cUnit, reg);
+}
diff --git a/vm/compiler/codegen/mips/mips/ArchVariant.cpp b/vm/compiler/codegen/mips/mips/ArchVariant.cpp
new file mode 100644
index 0000000..51a590a
--- /dev/null
+++ b/vm/compiler/codegen/mips/mips/ArchVariant.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" void dvmCompilerTemplateStart(void);
+
+/*
+ * This file is included by Codegen-mips.c, and implements architecture
+ * variant-specific code.
+ */
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+JitInstructionSetType dvmCompilerInstructionSet(void)
+{
+    return DALVIK_JIT_MIPS;
+}
+
+/* First, declare dvmCompiler_TEMPLATE_XXX for each template */
+#define JIT_TEMPLATE(X) extern "C" void dvmCompiler_TEMPLATE_##X();
+#include "../../../template/mips/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+/* Architecture-specific initializations and checks go here */
+bool dvmCompilerArchVariantInit(void)
+{
+    int i = 0;
+
+    /*
+     * Then, populate the templateEntryOffsets array with the offsets from the
+     * the dvmCompilerTemplateStart symbol for each template.
+     */
+#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
+    (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
+#include "../../../template/mips/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+    /* Target-specific configuration */
+    gDvmJit.jitTableSize = 1 << 9; // 512
+    gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+    gDvmJit.threshold = 200;
+    gDvmJit.codeCacheSize = 512*1024;
+
+#if defined(WITH_SELF_VERIFICATION)
+    /* Force into blocking mode */
+    gDvmJit.blockingMode = true;
+    gDvm.nativeDebuggerActive = true;
+#endif
+
+    /* Codegen-specific assumptions */
+    assert(OFFSETOF_MEMBER(ClassObject, vtable) < 128 &&
+           (OFFSETOF_MEMBER(ClassObject, vtable) & 0x3) == 0);
+    assert(OFFSETOF_MEMBER(ArrayObject, length) < 128 &&
+           (OFFSETOF_MEMBER(ArrayObject, length) & 0x3) == 0);
+    assert(OFFSETOF_MEMBER(ArrayObject, contents) < 256);
+
+    /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
+    assert(sizeof(StackSaveArea) < 236);
+
+    /*
+     * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+     * offset from the struct is less than 128.
+     */
+    assert((offsetof(Thread, jitToInterpEntries) +
+            sizeof(struct JitToInterpEntries)) < 128);
+
+    /* FIXME - comment out the following to enable method-based JIT */
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
+    // Make sure all threads have current values
+    dvmJitUpdateThreadStateAll();
+
+    return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+    int res;
+    switch (key) {
+        case kMaxHoistDistance:
+            res = 2;
+            break;
+        default:
+            LOGE("Unknown target optimization hint key: %d",key);
+            res = 0;
+    }
+    return res;
+}
+
+void dvmCompilerGenMemBarrier(CompilationUnit *cUnit, int barrierKind)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
diff --git a/vm/compiler/codegen/mips/mips/ArchVariant.h b/vm/compiler/codegen/mips/mips/ArchVariant.h
new file mode 100644
index 0000000..ec04dd8
--- /dev/null
+++ b/vm/compiler/codegen/mips/mips/ArchVariant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_COMPILER_CODEGEN_MIPS_ARCHVARIANT_H_
+#define DALVIK_VM_COMPILER_CODEGEN_MIPS_ARCHVARIANT_H_
+
+/* Create the TemplateOpcode enum */
+#define JIT_TEMPLATE(X) TEMPLATE_##X,
+enum TemplateOpcode{
+#include "../../../template/mips/TemplateOpList.h"
+/*
+ * For example,
+ *     TEMPLATE_CMP_LONG,
+ *     TEMPLATE_RETURN,
+ *     ...
+ */
+    TEMPLATE_LAST_MARK,
+};
+#undef JIT_TEMPLATE
+
+#endif  // DALVIK_VM_COMPILER_CODEGEN_MIPS_ARCHVARIANT_H_
diff --git a/vm/compiler/codegen/mips/mips/CallingConvention.S b/vm/compiler/codegen/mips/mips/CallingConvention.S
new file mode 100644
index 0000000..ab97655
--- /dev/null
+++ b/vm/compiler/codegen/mips/mips/CallingConvention.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Save & restore for callee-save FP registers.
+ * On entry:
+ *    a0 : pointer to save area of JIT_CALLEE_SAVE_WORD_SIZE
+ */
+    .text
+    .align 2
+    .global dvmJitCalleeSave
+    .type dvmJitCalleeSave, %function
+dvmJitCalleeSave:
+    jr $31
+
+    .global dvmJitCalleeRestore
+    .type dvmJitCalleeRestore, %function
+dvmJitCalleeRestore:
+    jr $31
diff --git a/vm/compiler/codegen/mips/mips/Codegen.cpp b/vm/compiler/codegen/mips/mips/Codegen.cpp
new file mode 100644
index 0000000..2c7456e
--- /dev/null
+++ b/vm/compiler/codegen/mips/mips/Codegen.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #define _CODEGEN_C
+
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "libdex/DexOpcodes.h"
+#include "compiler/CompilerInternals.h"
+#include "compiler/codegen/mips/MipsLIR.h"
+#include "mterp/common/FindInterface.h"
+#include "compiler/codegen/mips/Ralloc.h"
+#include "compiler/codegen/mips/Codegen.h"
+#include "compiler/Loop.h"
+#include "ArchVariant.h"
+
+/* Architectural independent building blocks */
+#include "../CodegenCommon.cpp"
+
+/* Architectural independent building blocks */
+#include "../Mips32/Factory.cpp"
+/* Factory utilities dependent on arch-specific features */
+#include "../CodegenFactory.cpp"
+
+/* Thumb-specific codegen routines */
+#include "../Mips32/Gen.cpp"
+/* Thumb+Portable FP codegen routines */
+#include "../FP/MipsFP.cpp"
+
+/* Thumb-specific register allocation */
+#include "../Mips32/Ralloc.cpp"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../CodegenDriver.cpp"
+
+/* Dummy driver for method-based JIT */
+#include "MethodCodegenDriver.cpp"
+
+/* Architecture manifest */
+#include "ArchVariant.cpp"
diff --git a/vm/compiler/codegen/mips/mips/MethodCodegenDriver.cpp b/vm/compiler/codegen/mips/mips/MethodCodegenDriver.cpp
new file mode 100644
index 0000000..735a478
--- /dev/null
+++ b/vm/compiler/codegen/mips/mips/MethodCodegenDriver.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+void dvmCompilerMethodMIR2LIR(CompilationUnit *cUnit)
+{
+    LOGE("Method-based JIT not supported for the Mips target");
+    dvmAbort();
+}
diff --git a/vm/compiler/codegen/x86/ArchUtility.cpp b/vm/compiler/codegen/x86/ArchUtility.cpp
index f7c48d6..e7b7d70 100644
--- a/vm/compiler/codegen/x86/ArchUtility.cpp
+++ b/vm/compiler/codegen/x86/ArchUtility.cpp
@@ -28,3 +28,10 @@
 {
     return 0;
 }
+
+/* Target-specific cache clearing */
+void dvmCompilerCacheClear(char *start, size_t size)
+{
+    /* 0 is an invalid opcode for x86. */
+    memset(start, 0, size);
+}
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
index 4fd5a71..23614e9 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
@@ -16,8 +16,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    ldr     ip, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    blx     ip
     bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -30,8 +30,8 @@
 .L${opcode}_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     $naninst                            @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
index d0f2bec..f9293e6 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
@@ -36,8 +36,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    ldr     ip, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    blx     ip
     bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -48,8 +48,8 @@
 .L${opcode}_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     $naninst                            @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index 03b97a4..99a17ab 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -41,8 +41,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 2a73c22..d8661d9 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -44,8 +44,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -54,8 +54,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index a7a0961..b7015eb 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -48,8 +48,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -57,4 +57,4 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index d074c9e..b10afcf 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -9,8 +9,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index 16660ae..001b80b 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -29,20 +29,20 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}
 
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 
     ldmfd   sp!, {r0-r1}
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
     b       212f
 121:
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 212:
 
     @ native return; r10=newSaveArea
@@ -68,7 +68,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kCallsiteInterpreted
 #endif
-    mov     pc, r1
+    bx      r1
 
 /*
  * On entry:
@@ -85,7 +85,7 @@
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
-    mov     pc, r1                  @ branch to dvmMterpCommonExceptionThrown
+    bx      r1                  @ branch to dvmMterpCommonExceptionThrown
 
     .align  2
 .LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/config-mips b/vm/compiler/template/config-mips
new file mode 100644
index 0000000..f212150
--- /dev/null
+++ b/vm/compiler/template/config-mips
@@ -0,0 +1,93 @@
+
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv5TE architecture targets.
+#
+
+# file header and basic definitions
+#import c/header.c
+import mips/header.S
+
+# C pre-processor defines for stub C instructions
+#import cstubs/stubdefs.c
+
+# highly-platform-specific defs
+import mips/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+#import c/opcommon.c
+
+# opcode list; argument to op-start is default directory
+op-start mips
+
+    op TEMPLATE_SHL_LONG mips
+    op TEMPLATE_SHR_LONG mips
+    op TEMPLATE_USHR_LONG mips
+    op TEMPLATE_INT_TO_DOUBLE_VFP mips
+    op TEMPLATE_FLOAT_TO_DOUBLE_VFP mips
+    op TEMPLATE_ADD_DOUBLE_VFP mips
+    op TEMPLATE_DIV_DOUBLE_VFP mips
+    op TEMPLATE_MUL_DOUBLE_VFP mips
+    op TEMPLATE_SUB_DOUBLE_VFP mips
+    op TEMPLATE_ADD_FLOAT_VFP mips
+    op TEMPLATE_DIV_FLOAT_VFP mips
+    op TEMPLATE_MUL_FLOAT_VFP mips
+    op TEMPLATE_SUB_FLOAT_VFP mips
+    op TEMPLATE_FLOAT_TO_INT_VFP mips
+    op TEMPLATE_INT_TO_FLOAT_VFP mips
+    op TEMPLATE_DOUBLE_TO_FLOAT_VFP mips
+    op TEMPLATE_DOUBLE_TO_INT_VFP mips
+    op TEMPLATE_CMP_LONG mips
+    op TEMPLATE_CMPL_FLOAT_VFP mips
+    op TEMPLATE_CMPL_DOUBLE_VFP mips
+    op TEMPLATE_CMPG_FLOAT_VFP mips
+    op TEMPLATE_CMPG_DOUBLE_VFP mips
+    op TEMPLATE_MUL_LONG mips
+    op TEMPLATE_INTERPRET mips
+    op TEMPLATE_THROW_EXCEPTION_COMMON mips
+    op TEMPLATE_SQRT_DOUBLE_VFP mips
+    op TEMPLATE_SAVE_STATE mips
+    op TEMPLATE_RESTORE_STATE mips
+    op TEMPLATE_RETURN mips
+    op TEMPLATE_STRING_COMPARETO mips
+    op TEMPLATE_STRING_INDEXOF mips
+    op TEMPLATE_MEM_OP_DECODE mips
+    op TEMPLATE_MONITOR_ENTER mips
+    op TEMPLATE_MONITOR_ENTER_DEBUG mips
+    op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN mips
+    op TEMPLATE_INVOKE_METHOD_CHAIN mips
+    op TEMPLATE_INVOKE_METHOD_NATIVE mips
+    op TEMPLATE_INVOKE_METHOD_NO_OPT mips
+
+    # New templates for ICS
+    op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF mips
+    op TEMPLATE_INVOKE_METHOD_CHAIN_PROF mips
+    op TEMPLATE_INVOKE_METHOD_NATIVE_PROF mips
+    op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF mips
+    op TEMPLATE_PERIODIC_PROFILING mips
+    op TEMPLATE_RETURN_PROF mips
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.c
+
+# end of defs; include this when cstubs/stubdefs.c is included
+#import cstubs/enddefs.c
+
+# common subroutines for asm
+import mips/footer.S
diff --git a/vm/compiler/template/mips/TEMPLATE_ADD_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_ADD_DOUBLE_VFP.S
new file mode 100644
index 0000000..56a02e3
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_ADD_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinopWide.S" {"instr":"JAL(__adddf3)","instr_f":"add.d fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_ADD_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_ADD_FLOAT_VFP.S
new file mode 100644
index 0000000..b0cbb31
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_ADD_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinop.S" {"instr":"JAL(__addsf3)", "instr_f":"add.s fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_CMPG_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_CMPG_DOUBLE_VFP.S
new file mode 100644
index 0000000..f5fa114
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_CMPG_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/TEMPLATE_CMPL_DOUBLE_VFP.S" { "naninst":"li            rTEMP, 1" }
diff --git a/vm/compiler/template/mips/TEMPLATE_CMPG_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_CMPG_FLOAT_VFP.S
new file mode 100644
index 0000000..c239a75
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_CMPG_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/TEMPLATE_CMPL_FLOAT_VFP.S" { "naninst":"li     rTEMP, 1" }
diff --git a/vm/compiler/template/mips/TEMPLATE_CMPL_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_CMPL_DOUBLE_VFP.S
new file mode 100644
index 0000000..0a1dd68
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_CMPL_DOUBLE_VFP.S
@@ -0,0 +1,68 @@
+%default { "naninst":"li     rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two double precision floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    move rOBJ, a0                       # save a0
+    move rBIX, a1                       # save a1
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__eqdf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, ${opcode}_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__ltdf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, ${opcode}_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__gtdf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, ${opcode}_finish
+#else
+    LOAD64_F(fs0, fs0f, a0)             # fs0<- vBB
+    LOAD64_F(fs1, fs1f, a1)             # fs1<- vCC
+    c.olt.d     fcc0, fs0, fs1          # Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, ${opcode}_finish
+    c.olt.d     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, ${opcode}_finish
+    c.eq.d      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, ${opcode}_finish
+#endif
+
+    $naninst
+
+${opcode}_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_CMPL_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_CMPL_FLOAT_VFP.S
new file mode 100644
index 0000000..7ef7723
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_CMPL_FLOAT_VFP.S
@@ -0,0 +1,68 @@
+%default { "naninst":"li     rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    LOAD(rOBJ, a0)                      # rOBJ<- vBB
+    LOAD(rBIX, a1)                      # rBIX<- vCC
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__eqsf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, ${opcode}_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__ltsf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, ${opcode}_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__gtsf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, ${opcode}_finish
+#else
+    LOAD_F(fs0, a0)                     # fs0<- vBB
+    LOAD_F(fs1, a1)                     # fs1<- vCC
+    c.olt.s     fcc0, fs0, fs1          #Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, ${opcode}_finish
+    c.olt.s     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, ${opcode}_finish
+    c.eq.s      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, ${opcode}_finish
+#endif
+
+    $naninst
+
+${opcode}_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_CMP_LONG.S b/vm/compiler/template/mips/TEMPLATE_CMP_LONG.S
new file mode 100644
index 0000000..9ecb069
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_CMP_LONG.S
@@ -0,0 +1,27 @@
+%verify "endianess"
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     *
+     * This code assumes the register pair ordering will depend on endianess (a1:a0 or a0:a1).
+     *    a1:a0 => vBB
+     *    a3:a2 => vCC
+     */
+    /* cmp-long vAA, vBB, vCC */
+    slt    t0, rARG1, rARG3             # compare hi
+    sgt    t1, rARG1, rARG3
+    subu   v0, t1, t0                   # v0<- (-1,1,0)
+    bnez   v0, .L${opcode}_finish
+                                        # at this point x.hi==y.hi
+    sltu   t0, rARG0, rARG2             # compare lo
+    sgtu   t1, rARG0, rARG2
+    subu   v0, t1, t0                   # v0<- (-1,1,0) for [< > =]
+.L${opcode}_finish:
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_DIV_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_DIV_DOUBLE_VFP.S
new file mode 100644
index 0000000..a951f93
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_DIV_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinopWide.S" {"instr":"JAL(__divdf3)","instr_f":"div.d fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_DIV_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_DIV_FLOAT_VFP.S
new file mode 100644
index 0000000..11b3da6
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_DIV_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinop.S" {"instr":"JAL(__divsf3)", "instr_f":"div.s fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S
new file mode 100644
index 0000000..51b1e96
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/funopNarrower.S" {"instr":"JAL(__truncdfsf2)","instr_f":"cvt.s.d  fv0,fa0"}
diff --git a/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_INT_VFP.S b/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_INT_VFP.S
new file mode 100644
index 0000000..4774bb1
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_DOUBLE_TO_INT_VFP.S
@@ -0,0 +1,79 @@
+%verify "executed"
+%include "mips/funopNarrower.S" {"instr":"b    d2i_doconv","instr_f":"b    d2i_doconv"}
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ * Use rBIX / rOBJ as global to hold arguments (they are not bound to a global var)
+ */
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+    la          t0, .LDOUBLE_TO_INT_max
+    LOAD64(rARG2, rARG3, t0)
+    move        rBIX, rARG0                       # save a0
+    move        rOBJ, rARG1                       #  and a1
+    JAL(__gedf2)                               # is arg >= maxint?
+
+    move        t0, v0
+    li          v0, ~0x80000000                # return maxint (7fffffff)
+    bgez        t0, .L${opcode}_set_vreg       # nonzero == yes
+
+    move        rARG0, rBIX                       # recover arg
+    move        rARG1, rOBJ
+    la          t0, .LDOUBLE_TO_INT_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)                               # is arg <= minint?
+
+    move        t0, v0
+    li          v0, 0x80000000                 # return minint (80000000)
+    blez        t0, .L${opcode}_set_vreg       # nonzero == yes
+
+    move        rARG0, rBIX                  # recover arg
+    move        rARG1, rOBJ
+    move        rARG2, rBIX                  # compare against self
+    move        rARG3, rOBJ
+    JAL(__nedf2)                        # is arg == self?
+
+    move        t0, v0                  # zero == no
+    li          v0, 0
+    bnez        t0, .L${opcode}_set_vreg        # return zero for NaN
+
+    move        rARG0, rBIX                  # recover arg
+    move        rARG1, rOBJ
+    JAL(__fixdfsi)                      # convert double to int
+    b           .L${opcode}_set_vreg
+#else
+    la          t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d     fcc0, fa1, fa0
+    l.s         fv0, .LDOUBLE_TO_INT_maxret
+    bc1t        .L${opcode}_set_vreg_f
+
+    la          t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d     fcc0, fa0, fa1
+    l.s         fv0, .LDOUBLE_TO_INT_minret
+    bc1t        .L${opcode}_set_vreg_f
+
+    mov.d       fa1, fa0
+    c.un.d      fcc0, fa0, fa1
+    li.s        fv0, 0
+    bc1t        .L${opcode}_set_vreg_f
+
+    trunc.w.d   fv0, fa0
+    b           .L${opcode}_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+    .dword   0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword   0xc1e0000000000000                  # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word   0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word   0x80000000
diff --git a/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S
new file mode 100644
index 0000000..66d14ce
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/funopWider.S" {"instr":"JAL(__extendsfdf2)","instr_f":"cvt.d.s fv0, fa0"}
diff --git a/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_INT_VFP.S b/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_INT_VFP.S
new file mode 100644
index 0000000..6eaaab9
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_FLOAT_TO_INT_VFP.S
@@ -0,0 +1,62 @@
+%verify "executed"
+%include "mips/funop.S" {"instr":"b    f2i_doconv","instr_f":"b        f2i_doconv"}
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+        li      a1, 0x4f000000  # (float)maxint
+        move    rBIX, a0
+        JAL(__gesf2)            # is arg >= maxint?
+        move    t0, v0
+        li      v0, ~0x80000000 # return maxint (7fffffff)
+        bgez    t0, .L${opcode}_set_vreg
+
+        move    a0, rBIX                # recover arg
+        li      a1, 0xcf000000  # (float)minint
+        JAL(__lesf2)
+
+        move    t0, v0
+        li      v0, 0x80000000  # return minint (80000000)
+        blez    t0, .L${opcode}_set_vreg
+        move    a0, rBIX
+        move    a1, rBIX
+        JAL(__nesf2)
+
+        move    t0, v0
+        li      v0, 0           # return zero for NaN
+        bnez    t0, .L${opcode}_set_vreg
+
+        move    a0, rBIX
+        JAL(__fixsfsi)
+        b .L${opcode}_set_vreg
+#else
+        l.s             fa1, .LFLOAT_TO_INT_max
+        c.ole.s         fcc0, fa1, fa0
+        l.s             fv0, .LFLOAT_TO_INT_ret_max
+        bc1t            .L${opcode}_set_vreg_f
+
+        l.s             fa1, .LFLOAT_TO_INT_min
+        c.ole.s         fcc0, fa0, fa1
+        l.s             fv0, .LFLOAT_TO_INT_ret_min
+        bc1t            .L${opcode}_set_vreg_f
+
+        mov.s           fa1, fa0
+        c.un.s          fcc0, fa0, fa1
+        li.s            fv0, 0
+        bc1t            .L${opcode}_set_vreg_f
+
+        trunc.w.s       fv0, fa0
+        b .L${opcode}_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+        .word   0x4f000000
+.LFLOAT_TO_INT_min:
+        .word   0xcf000000
+.LFLOAT_TO_INT_ret_max:
+        .word   0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+        .word   0x80000000
+
diff --git a/vm/compiler/template/mips/TEMPLATE_INTERPRET.S b/vm/compiler/template/mips/TEMPLATE_INTERPRET.S
new file mode 100644
index 0000000..1284621
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INTERPRET.S
@@ -0,0 +1,21 @@
+    /*
+     * This handler transfers control to the interpeter without performing
+     * any lookups.  It may be called either as part of a normal chaining
+     * operation, or from the transition code in header.S.  We distinquish
+     * the two cases by looking at the link register.  If called from a
+     * translation chain, it will point to the chaining Dalvik PC.
+     * On entry:
+     *    ra - if NULL:
+     *        a1 - the Dalvik PC to begin interpretation.
+     *    else
+     *        [ra] contains Dalvik PC to begin interpretation
+     *    rSELF - pointer to thread
+     *    rFP - Dalvik frame pointer
+     */
+    la      t0, dvmJitToInterpPunt
+    move    a0, a1
+    beq     ra, zero, 1f
+    lw      a0, 0(ra)
+1:
+    jr      t0
+    # doesn't return
diff --git a/vm/compiler/template/mips/TEMPLATE_INT_TO_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_INT_TO_DOUBLE_VFP.S
new file mode 100644
index 0000000..0dca600
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INT_TO_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/funopWider.S" {"instr":"JAL(__floatsidf)","instr_f":"cvt.d.w    fv0, fa0"}
diff --git a/vm/compiler/template/mips/TEMPLATE_INT_TO_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_INT_TO_FLOAT_VFP.S
new file mode 100644
index 0000000..384c207
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INT_TO_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/funop.S" {"instr":"JAL(__floatsisf)","instr_f":"cvt.s.w fv0, fa0"}
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN.S
new file mode 100644
index 0000000..c1e03ce
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -0,0 +1,67 @@
+%default { "chaintgt" : ".LinvokeChain" }
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    # methodToCall is guaranteed to be non-native
+$chaintgt:
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    add    t2, ra, 8                              # setup the punt-to-interp address
+                                                  # 8 bytes skips branch and delay slot
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    jr     t2                                     # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    jr     t2                                     # bail to the interpreter
+
+2:
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- methodToCall->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    move   a1, rSELF
+    # a0=methodToCall, a1=rSELF
+    la     t9, dvmFastMethodTraceEnter
+    jalr   t9
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    RETURN                                        # return to the callee-chaining cell
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
new file mode 100644
index 0000000..797ff03
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "mips/TEMPLATE_INVOKE_METHOD_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE.S
new file mode 100644
index 0000000..2579ff9
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -0,0 +1,104 @@
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    lw     rTEMP, offMethod_nativeFunc(a0)        # t9<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+2:
+#else
+    RETURN                                        # bail to the interpreter unconditionally
+#endif
+
+    # go ahead and transfer control to the native code
+    lw     t6, offThread_jniLocal_topCookie(rSELF)  # t6<- thread->localRef->...
+    sw     a1, offThread_curFrame(rSELF)          # self->curFrame = newFp
+    sw     zero, offThread_inJitCodeCache(rSELF)  # not in the jit code cache
+    sw     t6, (offStackSaveArea_localRefCookie - sizeofStackSaveArea)(a1)
+                                                  # newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(rBIX, a1)                    # rBIX<- new stack save area
+    move   a2, a0                                 # a2<- methodToCall
+    move   a0, a1                                 # a0<- newFp
+    add    a1, rSELF, offThread_retval            # a1<- &retval
+    move   a3, rSELF                              # a3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # a2: methodToCall
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    move   a0, a2
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastMethodTraceEnter
+    JALR(t9)                                      # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    move   rOBJ, a2                               # save a2
+#endif
+
+    JALR(rTEMP)                                   # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    move   a0, rOBJ
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastNativeMethodTraceExit
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+#endif
+
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw     a2, offStackSaveArea_returnAddr(rBIX)     # a2 = chaining cell ret addr
+    lw     a0, offStackSaveArea_localRefCookie(rBIX) # a0<- saved->top
+    lw     a1, offThread_exception(rSELF)            # check for exception
+    sw     rFP, offThread_curFrame(rSELF)            # self->curFrame = fp
+    sw     a0, offThread_jniLocal_topCookie(rSELF)   # new top <- old top
+    lw     a0, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+    # a0 = dalvikCallsitePC
+    bnez   a1, .LhandleException                     # handle exception if any
+
+    sw     a2, offThread_inJitCodeCache(rSELF)       # set the mode properly
+    beqz   a2, 3f
+    jr     a2                                        # go if return chaining cell still exist
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     a1, .LdvmJitToInterpTraceSelectNoChain    # defined in footer.S
+    lw     a1, (a1)
+    add    rPC, a0, 3*2                              # reconstruct new rPC (advance 3 dalvik instr)
+
+#if defined(WITH_JIT_TUNING)
+    li     a0, kCallsiteInterpreted
+#endif
+    jr     a1
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
new file mode 100644
index 0000000..e167996
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "mips/TEMPLATE_INVOKE_METHOD_NATIVE.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S
new file mode 100644
index 0000000..d513d1c
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -0,0 +1,80 @@
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    lw     t0, offMethod_accessFlags(a0)          # t0<- methodToCall->accessFlags
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+
+2:
+    and    t6, t0, ACC_NATIVE
+    beqz   t6, 3f
+#if !defined(WITH_SELF_VERIFICATION)
+    j      .LinvokeNative
+#else
+    RETURN                                        # bail to the interpreter
+#endif
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     t0, .LdvmJitToInterpTraceSelectNoChain # defined in footer.S
+    lw     rTEMP, (t0)
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- method->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    # a0=methodToCall, a1=rSELF
+    move   a1, rSELF
+    la     t9, dvmFastMethodTraceEnter
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+
+    # Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    li     a0, kInlineCacheMiss
+#endif
+    jr     rTEMP                                  # dvmJitToInterpTraceSelectNoChain
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
new file mode 100644
index 0000000..386ce63
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
new file mode 100644
index 0000000..e95ab32
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
@@ -0,0 +1,59 @@
+%default { "chaintgt" : ".LinvokeChain" }
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      u4 delay_slot;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr   : to branch to the chaining cell
+     *    - lr+8 : to punt to the interpreter
+     *    - lr+16: to fully resolve the callee and may rechain.
+     *             a3 <- class
+     */
+    # a0 = this, a1 = returnCell, a2 = predictedChainCell, rPC = dalvikCallsite
+    lw      a3, offObject_clazz(a0)     # a3 <- this->class
+    lw      rIBASE, 8(a2)                   # t0 <- predictedChainCell->clazz
+    lw      a0, 12(a2)                  # a0 <- predictedChainCell->method
+    lw      t1, offThread_icRechainCount(rSELF)    # t1 <- shared rechainCount
+
+#if defined(WITH_JIT_TUNING)
+    la      rINST, .LdvmICHitCount
+    #add     t2, t2, 1
+    bne    a3, rIBASE, 1f
+    nop
+    lw      t2, 0(rINST)
+    add     t2, t2, 1
+    sw      t2, 0(rINST)
+1:
+    #add     t2, t2, 1
+#endif
+    beq     a3, rIBASE, $chaintgt       # branch if predicted chain is valid
+    lw      rINST, offClassObject_vtable(a3)     # rINST <- this->class->vtable
+    beqz    rIBASE, 2f                      # initialized class or not
+    sub     a1, t1, 1                   # count--
+    sw      a1, offThread_icRechainCount(rSELF)   # write back to InterpState
+    b       3f
+2:
+    move    a1, zero
+3:
+    add     ra, ra, 16                  # return to fully-resolve landing pad
+    /*
+     * a1 <- count
+     * a2 <- &predictedChainCell
+     * a3 <- this->class
+     * rPC <- dPC
+     * rINST <- this->class->vtable
+     */
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
new file mode 100644
index 0000000..39d6452
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/mips/TEMPLATE_MEM_OP_DECODE.S b/vm/compiler/template/mips/TEMPLATE_MEM_OP_DECODE.S
new file mode 100644
index 0000000..038ccfd
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MEM_OP_DECODE.S
@@ -0,0 +1,165 @@
+#if defined(WITH_SELF_VERIFICATION)
+    /*
+     * This handler encapsulates heap memory ops for selfVerification mode.
+     *
+     * The call to the handler is inserted prior to a heap memory operation.
+     * This handler then calls a function to decode the memory op, and process
+     * it accordingly. Afterwards, the handler changes the return address to
+     * skip the memory op so it never gets executed.
+     */
+#ifdef HARD_FLOAT
+    /* push f0-f31 onto stack */
+    sw      f0, fr0*-4(sp)              # push f0
+    sw      f1, fr1*-4(sp)              # push f1
+    sw      f2, fr2*-4(sp)              # push f2
+    sw      f3, fr3*-4(sp)              # push f3
+    sw      f4, fr4*-4(sp)              # push f4
+    sw      f5, fr5*-4(sp)              # push f5
+    sw      f6, fr6*-4(sp)              # push f6
+    sw      f7, fr7*-4(sp)              # push f7
+    sw      f8, fr8*-4(sp)              # push f8
+    sw      f9, fr9*-4(sp)              # push f9
+    sw      f10, fr10*-4(sp)            # push f10
+    sw      f11, fr11*-4(sp)            # push f11
+    sw      f12, fr12*-4(sp)            # push f12
+    sw      f13, fr13*-4(sp)            # push f13
+    sw      f14, fr14*-4(sp)            # push f14
+    sw      f15, fr15*-4(sp)            # push f15
+    sw      f16, fr16*-4(sp)            # push f16
+    sw      f17, fr17*-4(sp)            # push f17
+    sw      f18, fr18*-4(sp)            # push f18
+    sw      f19, fr19*-4(sp)            # push f19
+    sw      f20, fr20*-4(sp)            # push f20
+    sw      f21, fr21*-4(sp)            # push f21
+    sw      f22, fr22*-4(sp)            # push f22
+    sw      f23, fr23*-4(sp)            # push f23
+    sw      f24, fr24*-4(sp)            # push f24
+    sw      f25, fr25*-4(sp)            # push f25
+    sw      f26, fr26*-4(sp)            # push f26
+    sw      f27, fr27*-4(sp)            # push f27
+    sw      f28, fr28*-4(sp)            # push f28
+    sw      f29, fr29*-4(sp)            # push f29
+    sw      f30, fr30*-4(sp)            # push f30
+    sw      f31, fr31*-4(sp)            # push f31
+
+    sub     sp, (32-0)*4                # adjust stack pointer
+#endif
+
+    /* push gp registers (except zero, gp, sp, and fp) */
+    .set noat
+    sw      AT, r_AT*-4(sp)             # push at
+    .set at
+    sw      v0, r_V0*-4(sp)             # push v0
+    sw      v1, r_V1*-4(sp)             # push v1
+    sw      a0, r_A0*-4(sp)             # push a0
+    sw      a1, r_A1*-4(sp)             # push a1
+    sw      a2, r_A2*-4(sp)             # push a2
+    sw      a3, r_A3*-4(sp)             # push a3
+    sw      t0, r_T0*-4(sp)             # push t0
+    sw      t1, r_T1*-4(sp)             # push t1
+    sw      t2, r_T2*-4(sp)             # push t2
+    sw      t3, r_T3*-4(sp)             # push t3
+    sw      t4, r_T4*-4(sp)             # push t4
+    sw      t5, r_T5*-4(sp)             # push t5
+    sw      t6, r_T6*-4(sp)             # push t6
+    sw      t7, r_T7*-4(sp)             # push t7
+    sw      s0, r_S0*-4(sp)             # push s0
+    sw      s1, r_S1*-4(sp)             # push s1
+    sw      s2, r_S2*-4(sp)             # push s2
+    sw      s3, r_S3*-4(sp)             # push s3
+    sw      s4, r_S4*-4(sp)             # push s4
+    sw      s5, r_S5*-4(sp)             # push s5
+    sw      s6, r_S6*-4(sp)             # push s6
+    sw      s7, r_S7*-4(sp)             # push s7
+    sw      t8, r_T8*-4(sp)             # push t8
+    sw      t9, r_T9*-4(sp)             # push t9
+    sw      k0, r_K0*-4(sp)             # push k0
+    sw      k1, r_K1*-4(sp)             # push k1
+    sw      ra, r_RA*-4(sp)             # push RA
+
+    # Note: even if we don't save all 32 registers, we still need to
+    #       adjust SP by 32 registers due to the way we are storing
+    #       the registers on the stack.
+    sub     sp, (32-0)*4                # adjust stack pointer
+
+    la     a2, .LdvmSelfVerificationMemOpDecode  # defined in footer.S
+    lw     a2, (a2)
+    move   a0, ra                       # a0<- link register
+    move   a1, sp                       # a1<- stack pointer
+    JALR(a2)
+
+    /* pop gp registers (except zero, gp, sp, and fp) */
+    # Note: even if we don't save all 32 registers, we still need to
+    #       adjust SP by 32 registers due to the way we are storing
+    #       the registers on the stack.
+    add     sp, (32-0)*4                # adjust stack pointer
+    .set noat
+    lw      AT, r_AT*-4(sp)             # pop at
+    .set at
+    lw      v0, r_V0*-4(sp)             # pop v0
+    lw      v1, r_V1*-4(sp)             # pop v1
+    lw      a0, r_A0*-4(sp)             # pop a0
+    lw      a1, r_A1*-4(sp)             # pop a1
+    lw      a2, r_A2*-4(sp)             # pop a2
+    lw      a3, r_A3*-4(sp)             # pop a3
+    lw      t0, r_T0*-4(sp)             # pop t0
+    lw      t1, r_T1*-4(sp)             # pop t1
+    lw      t2, r_T2*-4(sp)             # pop t2
+    lw      t3, r_T3*-4(sp)             # pop t3
+    lw      t4, r_T4*-4(sp)             # pop t4
+    lw      t5, r_T5*-4(sp)             # pop t5
+    lw      t6, r_T6*-4(sp)             # pop t6
+    lw      t7, r_T7*-4(sp)             # pop t7
+    lw      s0, r_S0*-4(sp)             # pop s0
+    lw      s1, r_S1*-4(sp)             # pop s1
+    lw      s2, r_S2*-4(sp)             # pop s2
+    lw      s3, r_S3*-4(sp)             # pop s3
+    lw      s4, r_S4*-4(sp)             # pop s4
+    lw      s5, r_S5*-4(sp)             # pop s5
+    lw      s6, r_S6*-4(sp)             # pop s6
+    lw      s7, r_S7*-4(sp)             # pop s7
+    lw      t8, r_T8*-4(sp)             # pop t8
+    lw      t9, r_T9*-4(sp)             # pop t9
+    lw      k0, r_K0*-4(sp)             # pop k0
+    lw      k1, r_K1*-4(sp)             # pop k1
+    lw      ra, r_RA*-4(sp)             # pop RA
+
+#ifdef HARD_FLOAT
+    /* pop f0-f31 from stack */
+    add     sp, (32-0)*4                # adjust stack pointer
+    lw      f0, fr0*-4(sp)              # pop f0
+    lw      f1, fr1*-4(sp)              # pop f1
+    lw      f2, fr2*-4(sp)              # pop f2
+    lw      f3, fr3*-4(sp)              # pop f3
+    lw      f4, fr4*-4(sp)              # pop f4
+    lw      f5, fr5*-4(sp)              # pop f5
+    lw      f6, fr6*-4(sp)              # pop f6
+    lw      f7, fr7*-4(sp)              # pop f7
+    lw      f8, fr8*-4(sp)              # pop f8
+    lw      f9, fr9*-4(sp)              # pop f9
+    lw      f10, fr10*-4(sp)            # pop f10
+    lw      f11, fr11*-4(sp)            # pop f11
+    lw      f12, fr12*-4(sp)            # pop f12
+    lw      f13, fr13*-4(sp)            # pop f13
+    lw      f14, fr14*-4(sp)            # pop f14
+    lw      f15, fr15*-4(sp)            # pop f15
+    lw      f16, fr16*-4(sp)            # pop f16
+    lw      f17, fr17*-4(sp)            # pop f17
+    lw      f18, fr18*-4(sp)            # pop f18
+    lw      f19, fr19*-4(sp)            # pop f19
+    lw      f20, fr20*-4(sp)            # pop f20
+    lw      f21, fr21*-4(sp)            # pop f21
+    lw      f22, fr22*-4(sp)            # pop f22
+    lw      f23, fr23*-4(sp)            # pop f23
+    lw      f24, fr24*-4(sp)            # pop f24
+    lw      f25, fr25*-4(sp)            # pop f25
+    lw      f26, fr26*-4(sp)            # pop f26
+    lw      f27, fr27*-4(sp)            # pop f27
+    lw      f28, fr28*-4(sp)            # pop f28
+    lw      f29, fr29*-4(sp)            # pop f29
+    lw      f30, fr30*-4(sp)            # pop f30
+    lw      f31, fr31*-4(sp)            # pop f31
+#endif
+
+    RETURN
+#endif
diff --git a/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER.S b/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER.S
new file mode 100644
index 0000000..902cdb7
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER.S
@@ -0,0 +1,25 @@
+    /*
+     * Call out to the runtime to lock an object.  Because this thread
+     * may have been suspended in THREAD_MONITOR state and the Jit's
+     * translation cache subsequently cleared, we cannot return directly.
+     * Instead, unconditionally transition to the interpreter to resume.
+     *
+     * On entry:
+     *    a0 - self pointer
+     *    a1 - the object (which has already been null-checked by the caller
+     *    rPC - the Dalvik PC of the following instruction.
+     */
+    la     a2, .LdvmLockObject
+    lw     t9, (a2)
+    sw     zero, offThread_inJitCodeCache(a0)   # record that we're not returning
+    JALR(t9)                                    # dvmLockObject(self, obj)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    la     a2, .LdvmJitToInterpNoChain
+    lw     a2, (a2)
+
+    # Bail to interpreter - no chain [note - rPC still contains dPC]
+#if defined(WITH_JIT_TUNING)
+    li      a0, kHeavyweightMonitor
+#endif
+    jr      a2
diff --git a/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER_DEBUG.S b/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER_DEBUG.S
new file mode 100644
index 0000000..23bf661
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MONITOR_ENTER_DEBUG.S
@@ -0,0 +1,30 @@
+    /*
+     * To support deadlock prediction, this version of MONITOR_ENTER
+     * will always call the heavyweight dvmLockObject, check for an
+     * exception and then bail out to the interpreter.
+     *
+     * On entry:
+     *    a0 - self pointer
+     *    a1 - the object (which has already been null-checked by the caller
+     *    rPC - the Dalvik PC of the following instruction.
+     *
+     */
+    la     a2, .LdvmLockObject
+    lw     t9, (a2)
+    sw     zero, offThread_inJitCodeCache(a0)   # record that we're not returning
+    JALR(t9)                                    # dvmLockObject(self, obj)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # test for exception
+    lw     a1, offThread_exception(rSELF)
+    beqz   a1, 1f
+    sub    a0, rPC, 2                           # roll dPC back to this monitor instruction
+    j      .LhandleException
+1:
+    # Bail to interpreter - no chain [note - rPC still contains dPC]
+#if defined(WITH_JIT_TUNING)
+    li     a0, kHeavyweightMonitor
+#endif
+    la     a2, .LdvmJitToInterpNoChain
+    lw     a2, (a2)
+    jr     a2
diff --git a/vm/compiler/template/mips/TEMPLATE_MUL_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_MUL_DOUBLE_VFP.S
new file mode 100644
index 0000000..9254d76
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MUL_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinopWide.S" {"instr":"JAL(__muldf3)","instr_f":"mul.d fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_MUL_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_MUL_FLOAT_VFP.S
new file mode 100644
index 0000000..c1517b3
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MUL_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinop.S" {"instr":"JAL(__mulsf3)","instr_f":"mul.s fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_MUL_LONG.S b/vm/compiler/template/mips/TEMPLATE_MUL_LONG.S
new file mode 100644
index 0000000..d91dcb8
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_MUL_LONG.S
@@ -0,0 +1,27 @@
+%verify "executed"
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * For JIT: op1 in a0/a1, op2 in a2/a3, return in v0/v1
+     *
+     * Consider WXxYZ (a1a0 x a3a2) with a long multiply:
+     *
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     *
+     */
+    /* mul-long vAA, vBB, vCC */
+    mul     rRESULT1,rARG3,rARG0              #  v1= a3a0
+    multu   rARG2,rARG0
+    mfhi    t1
+    mflo    rRESULT0                          #  v0= a2a0
+    mul     t0,rARG2,rARG1                    #  t0= a2a1
+    addu    rRESULT1,rRESULT1,t1              #  v1= a3a0 + hi(a2a0)
+    addu    rRESULT1,rRESULT1,t0              #  v1= a3a0 + hi(a2a0) + a2a1;
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_PERIODIC_PROFILING.S b/vm/compiler/template/mips/TEMPLATE_PERIODIC_PROFILING.S
new file mode 100644
index 0000000..89031fd
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_PERIODIC_PROFILING.S
@@ -0,0 +1,28 @@
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (ra-16) is address of pointer to counter.  Note: the counter
+     *    actually exists 16 bytes before the return target for mips.
+     *     - 4 bytes for prof count addr.
+     *     - 4 bytes for chain cell offset (2bytes 32 bit aligned).
+     *     - 4 bytes for call TEMPLATE_PERIODIC_PROFILING.
+     *     - 4 bytes for call delay slot.
+     */
+     lw     a0, -16(ra)
+     lw     a1, offThread_pProfileCountdown(rSELF)
+     lw     a2, 0(a0)                   # get counter
+     lw     a3, 0(a1)                   # get countdown timer
+     addu   a2, 1
+     sub    a3, 1                       # FIXME - bug in ARM code???
+     bltz   a3, .L${opcode}_disable_profiling
+     sw     a2, 0(a0)
+     sw     a3, 0(a1)
+     RETURN
+.L${opcode}_disable_profiling:
+     move   rTEMP, ra                   # preserve ra
+     la     a0, dvmJitTraceProfilingOff
+     JALR(a0)
+     jr     rTEMP
diff --git a/vm/compiler/template/mips/TEMPLATE_RESTORE_STATE.S b/vm/compiler/template/mips/TEMPLATE_RESTORE_STATE.S
new file mode 100644
index 0000000..a4c505b
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_RESTORE_STATE.S
@@ -0,0 +1,91 @@
+    /*
+     * This handler restores state following a selfVerification memory access.
+     * On entry:
+     *    a0 - offset from rSELF to the 1st element of the coreRegs save array.
+     * Note: the following registers are not restored
+     *       zero, AT, gp, sp, fp, ra
+     */
+
+    add     a0, a0, rSELF               # pointer to heapArgSpace.coreRegs[0]
+#if 0
+    lw      zero, r_ZERO*4(a0)          # restore zero
+#endif
+    .set noat
+    lw      AT, r_AT*4(a0)              # restore at
+    .set at
+    lw      v0, r_V0*4(a0)              # restore v0
+    lw      v1, r_V1*4(a0)              # restore v1
+
+    lw      a1, r_A1*4(a0)              # restore a1
+    lw      a2, r_A2*4(a0)              # restore a2
+    lw      a3, r_A3*4(a0)              # restore a3
+
+    lw      t0, r_T0*4(a0)              # restore t0
+    lw      t1, r_T1*4(a0)              # restore t1
+    lw      t2, r_T2*4(a0)              # restore t2
+    lw      t3, r_T3*4(a0)              # restore t3
+    lw      t4, r_T4*4(a0)              # restore t4
+    lw      t5, r_T5*4(a0)              # restore t5
+    lw      t6, r_T6*4(a0)              # restore t6
+    lw      t7, r_T7*4(a0)              # restore t7
+
+    lw      s0, r_S0*4(a0)              # restore s0
+    lw      s1, r_S1*4(a0)              # restore s1
+    lw      s2, r_S2*4(a0)              # restore s2
+    lw      s3, r_S3*4(a0)              # restore s3
+    lw      s4, r_S4*4(a0)              # restore s4
+    lw      s5, r_S5*4(a0)              # restore s5
+    lw      s6, r_S6*4(a0)              # restore s6
+    lw      s7, r_S7*4(a0)              # restore s7
+
+    lw      t8, r_T8*4(a0)              # restore t8
+    lw      t9, r_T9*4(a0)              # restore t9
+
+    lw      k0, r_K0*4(a0)              # restore k0
+    lw      k1, r_K1*4(a0)              # restore k1
+
+#if 0
+    lw      gp, r_GP*4(a0)              # restore gp
+    lw      sp, r_SP*4(a0)              # restore sp
+    lw      fp, r_FP*4(a0)              # restore fp
+    lw      ra, r_RA*4(a0)              # restore ra
+#endif
+
+/* #ifdef HARD_FLOAT */
+#if 0
+    lw      f0, fr0*4(a0)               # restore f0
+    lw      f1, fr1*4(a0)               # restore f1
+    lw      f2, fr2*4(a0)               # restore f2
+    lw      f3, fr3*4(a0)               # restore f3
+    lw      f4, fr4*4(a0)               # restore f4
+    lw      f5, fr5*4(a0)               # restore f5
+    lw      f6, fr6*4(a0)               # restore f6
+    lw      f7, fr7*4(a0)               # restore f7
+    lw      f8, fr8*4(a0)               # restore f8
+    lw      f9, fr9*4(a0)               # restore f9
+    lw      f10, fr10*4(a0)             # restore f10
+    lw      f11, fr11*4(a0)             # restore f11
+    lw      f12, fr12*4(a0)             # restore f12
+    lw      f13, fr13*4(a0)             # restore f13
+    lw      f14, fr14*4(a0)             # restore f14
+    lw      f15, fr15*4(a0)             # restore f15
+    lw      f16, fr16*4(a0)             # restore f16
+    lw      f17, fr17*4(a0)             # restore f17
+    lw      f18, fr18*4(a0)             # restore f18
+    lw      f19, fr19*4(a0)             # restore f19
+    lw      f20, fr20*4(a0)             # restore f20
+    lw      f21, fr21*4(a0)             # restore f21
+    lw      f22, fr22*4(a0)             # restore f22
+    lw      f23, fr23*4(a0)             # restore f23
+    lw      f24, fr24*4(a0)             # restore f24
+    lw      f25, fr25*4(a0)             # restore f25
+    lw      f26, fr26*4(a0)             # restore f26
+    lw      f27, fr27*4(a0)             # restore f27
+    lw      f28, fr28*4(a0)             # restore f28
+    lw      f29, fr29*4(a0)             # restore f29
+    lw      f30, fr30*4(a0)             # restore f30
+    lw      f31, fr31*4(a0)             # restore f31
+#endif
+
+    lw      a0, r_A1*4(a0)              # restore a0
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_RETURN.S b/vm/compiler/template/mips/TEMPLATE_RETURN.S
new file mode 100644
index 0000000..e9cee05
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_RETURN.S
@@ -0,0 +1,77 @@
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    # a0=rSELF
+    move    a0, rSELF
+    la      t9, dvmFastMethodTraceExit
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    SAVEAREA_FROM_FP(a0, rFP)           # a0<- saveArea (old)
+    lw      t0, offStackSaveArea_prevFrame(a0)     # t0<- saveArea->prevFrame
+    lbu     t1, offThread_breakFlags(rSELF)        # t1<- breakFlags
+    lw      rPC, offStackSaveArea_savedPc(a0)      # rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    lw      t2,  offStackSaveArea_returnAddr(a0)   # t2<- chaining cell ret
+#else
+    move    t2, zero                               # disable chaining
+#endif
+    lw      a2, offStackSaveArea_method - sizeofStackSaveArea(t0)
+                                                   # a2<- method we're returning to
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     a2, zero, 1f                           # bail to interpreter
+#else
+    bne     a2, zero, 2f
+    JALR(ra)                                       # punt to interpreter and compare state
+    # DOUG: assume this does not return ???
+2:
+#endif
+    la      t4, .LdvmJitToInterpNoChainNoProfile   # defined in footer.S
+    lw      a1, (t4)
+    move    rFP, t0                                # publish new FP
+    beq     a2, zero, 4f
+    lw      t0, offMethod_clazz(a2)                # t0<- method->clazz
+4:
+
+    sw      a2, offThread_method(rSELF)            # self->method = newSave->method
+    lw      a0, offClassObject_pDvmDex(t0)         # a0<- method->clazz->pDvmDex
+    sw      rFP, offThread_curFrame(rSELF)         # self->curFrame = fp
+    add     rPC, rPC, 3*2                          # publish new rPC
+    sw      a0, offThread_methodClassDex(rSELF)
+    movn    t2, zero, t1                           # check the breadFlags and
+                                                   # clear the chaining cell address
+    sw      t2, offThread_inJitCodeCache(rSELF)    # in code cache or not
+    beq     t2, zero, 3f                           # chaining cell exists?
+    JALR(t2)                                       # jump to the chaining cell
+    # DOUG: assume this does not return ???
+3:
+#if defined(WITH_JIT_TUNING)
+    li      a0, kCallsiteInterpreted
+#endif
+    j       a1                                     # callsite is interpreted
+1:
+    sw      zero, offThread_inJitCodeCache(rSELF)  # reset inJitCodeCache
+    SAVE_PC_TO_SELF()                              # SAVE_PC_FP_TO_SELF()
+    SAVE_FP_TO_SELF()
+    la      t4, .LdvmMterpStdBail                  # defined in footer.S
+    lw      a2, (t4)
+    move    a0, rSELF                              # Expecting rSELF in a0
+    JALR(a2)                                       # exit the interpreter
+    # DOUG: assume this does not return ???
diff --git a/vm/compiler/template/mips/TEMPLATE_RETURN_PROF.S b/vm/compiler/template/mips/TEMPLATE_RETURN_PROF.S
new file mode 100644
index 0000000..b4e0754
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_RETURN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "mips/TEMPLATE_RETURN.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/mips/TEMPLATE_SAVE_STATE.S b/vm/compiler/template/mips/TEMPLATE_SAVE_STATE.S
new file mode 100644
index 0000000..2e74481
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SAVE_STATE.S
@@ -0,0 +1,105 @@
+    /*
+     * This handler performs a register save for selfVerification mode.
+     * On entry:
+     *    Top of stack + 4: a1 value to save
+     *    Top of stack + 0: a0 value to save
+     *    a0 - offset from rSELF to the beginning of the heapArgSpace record
+     *    a1 - the value of regMap
+     *
+     * The handler must save regMap, r0-r31, f0-f31 if FPU, and then return with
+     * r0-r31 with their original values (note that this means a0 and a1 must take
+     * the values on the stack - not the ones in those registers on entry.
+     * Finally, the two registers previously pushed must be popped.
+     * Note: the following registers are not saved
+     *       zero, AT, gp, sp, fp, ra
+     */
+    add     a0, a0, rSELF               # pointer to heapArgSpace
+    sw      a1, 0(a0)                   # save regMap
+    add     a0, a0, 4                   # pointer to coreRegs
+#if 0
+    sw      zero, r_ZERO*4(a0)          # save zero
+#endif
+    .set noat
+    sw      AT, r_AT*4(a0)              # save at
+    .set at
+    sw      v0, r_V0*4(a0)              # save v0
+    sw      v1, r_V1*4(a0)              # save v1
+
+    lw      a1, 0(sp)                   # recover a0 value
+    sw      a1, r_A0*4(a0)              # save a0
+    lw      a1, 4(sp)                   # recover a1 value
+    sw      a1, r_A1*4(a0)              # save a1
+    sw      a2, r_A2*4(a0)              # save a2
+    sw      a3, r_A3*4(a0)              # save a3
+
+    sw      t0, r_T0*4(a0)              # save t0
+    sw      t1, r_T1*4(a0)              # save t1
+    sw      t2, r_T2*4(a0)              # save t2
+    sw      t3, r_T3*4(a0)              # save t3
+    sw      t4, r_T4*4(a0)              # save t4
+    sw      t5, r_T5*4(a0)              # save t5
+    sw      t6, r_T6*4(a0)              # save t6
+    sw      t7, r_T7*4(a0)              # save t7
+
+    sw      s0, r_S0*4(a0)              # save s0
+    sw      s1, r_S1*4(a0)              # save s1
+    sw      s2, r_S2*4(a0)              # save s2
+    sw      s3, r_S3*4(a0)              # save s3
+    sw      s4, r_S4*4(a0)              # save s4
+    sw      s5, r_S5*4(a0)              # save s5
+    sw      s6, r_S6*4(a0)              # save s6
+    sw      s7, r_S7*4(a0)              # save s7
+
+    sw      t8, r_T8*4(a0)              # save t8
+    sw      t9, r_T9*4(a0)              # save t9
+
+    sw      k0, r_K0*4(a0)              # save k0
+    sw      k1, r_K1*4(a0)              # save k1
+
+#if 0
+    sw      gp, r_GP*4(a0)              # save gp
+    sw      sp, r_SP*4(a0)              # save sp (need to adjust??? )
+    sw      fp, r_FP*4(a0)              # save fp
+    sw      ra, r_RA*4(a0)              # save ra
+#endif
+
+/* #ifdef HARD_FLOAT */
+#if 0
+    sw      f0, fr0*4(a0)               # save f0
+    sw      f1, fr1*4(a0)               # save f1
+    sw      f2, fr2*4(a0)               # save f2
+    sw      f3, fr3*4(a0)               # save f3
+    sw      f4, fr4*4(a0)               # save f4
+    sw      f5, fr5*4(a0)               # save f5
+    sw      f6, fr6*4(a0)               # save f6
+    sw      f7, fr7*4(a0)               # save f7
+    sw      f8, fr8*4(a0)               # save f8
+    sw      f9, fr9*4(a0)               # save f9
+    sw      f10, fr10*4(a0)             # save f10
+    sw      f11, fr11*4(a0)             # save f11
+    sw      f12, fr12*4(a0)             # save f12
+    sw      f13, fr13*4(a0)             # save f13
+    sw      f14, fr14*4(a0)             # save f14
+    sw      f15, fr15*4(a0)             # save f15
+    sw      f16, fr16*4(a0)             # save f16
+    sw      f17, fr17*4(a0)             # save f17
+    sw      f18, fr18*4(a0)             # save f18
+    sw      f19, fr19*4(a0)             # save f19
+    sw      f20, fr20*4(a0)             # save f20
+    sw      f21, fr21*4(a0)             # save f21
+    sw      f22, fr22*4(a0)             # save f22
+    sw      f23, fr23*4(a0)             # save f23
+    sw      f24, fr24*4(a0)             # save f24
+    sw      f25, fr25*4(a0)             # save f25
+    sw      f26, fr26*4(a0)             # save f26
+    sw      f27, fr27*4(a0)             # save f27
+    sw      f28, fr28*4(a0)             # save f28
+    sw      f29, fr29*4(a0)             # save f29
+    sw      f30, fr30*4(a0)             # save f30
+    sw      f31, fr31*4(a0)             # save f31
+#endif
+
+    lw      a1, 0(sp)                   # recover a0 value
+    lw      a1, 4(sp)                   # recover a1 value
+    sub     sp, sp, 8                   # adjust stack ptr
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_SHL_LONG.S b/vm/compiler/template/mips/TEMPLATE_SHL_LONG.S
new file mode 100644
index 0000000..b15f0c3
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SHL_LONG.S
@@ -0,0 +1,17 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* shl-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    sll     rRESULT0, rARG0, a2		#  rlo<- alo << (shift&31)
+    not     rRESULT1, a2		#  rhi<- 31-shift  (shift is 5b)
+    srl     rARG0, 1
+    srl     rARG0, rRESULT1		#  alo<- alo >> (32-(shift&31))
+    sll     rRESULT1, rARG1, a2		#  rhi<- ahi << (shift&31)
+    or      rRESULT1, rARG0		#  rhi<- rhi | alo
+    andi    a2, 0x20			#  shift< shift & 0x20
+    movn    rRESULT1, rRESULT0, a2	#  rhi<- rlo (if shift&0x20)
+    movn    rRESULT0, zero, a2		#  rlo<- 0  (if shift&0x20)
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_SHR_LONG.S b/vm/compiler/template/mips/TEMPLATE_SHR_LONG.S
new file mode 100644
index 0000000..e59686d
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SHR_LONG.S
@@ -0,0 +1,18 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* shr-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    sra     rRESULT1, rARG1, a2		#  rhi<- ahi >> (shift&31)
+    srl     rRESULT0, rARG0, a2		#  rlo<- alo >> (shift&31)
+    sra     a3, rARG1, 31		#  a3<- sign(ah)
+    not     rARG0, a2			#  alo<- 31-shift (shift is 5b)
+    sll     rARG1, 1
+    sll     rARG1, rARG0		#  ahi<- ahi << (32-(shift&31))
+    or      rRESULT0, rARG1		#  rlo<- rlo | ahi
+    andi    a2, 0x20			#  shift & 0x20
+    movn    rRESULT0, rRESULT1, a2	#  rlo<- rhi (if shift&0x20)
+    movn    rRESULT1, a3, a2		#  rhi<- sign(ahi) (if shift&0x20)
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_SQRT_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_SQRT_DOUBLE_VFP.S
new file mode 100644
index 0000000..253a4a4
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SQRT_DOUBLE_VFP.S
@@ -0,0 +1,23 @@
+%verify "executed"
+
+    /*
+     * 64-bit floating point sqrt operation.
+     * If the result is a NaN, bail out to library code to do
+     * the right thing.
+     *
+     * On entry:
+     *     a2 src addr of op1
+     * On exit:
+     *     v0,v1/fv0 = res
+     */
+#ifdef  SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)        # a0/a1<- vBB/vBB+1
+#else
+    LOAD64_F(fa0, fa0f, a2)         # fa0/fa0f<- vBB/vBB+1
+    sqrt.d	fv0, fa0
+    c.eq.d	fv0, fv0
+    bc1t	1f
+#endif
+    JAL(sqrt)
+1:
+    RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_STRING_COMPARETO.S b/vm/compiler/template/mips/TEMPLATE_STRING_COMPARETO.S
new file mode 100644
index 0000000..a514351
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_STRING_COMPARETO.S
@@ -0,0 +1,146 @@
+    /*
+     * String's compareTo.
+     *
+     * Requires a0/a1 to have been previously checked for null.  Will
+     * return negative if this's string is < comp, 0 if they are the
+     * same and positive if >.
+     *
+     * IMPORTANT NOTE:
+     *
+     * This code relies on hard-coded offsets for string objects, and must be
+     * kept in sync with definitions in UtfString.h.  See asm-constants.h
+     *
+     * On entry:
+     *    a0:   this object pointer
+     *    a1:   comp object pointer
+     *
+     */
+
+     subu  v0, a0, a1                # Same?
+     bnez  v0, 1f
+     RETURN
+1:
+     lw    t0, STRING_FIELDOFF_OFFSET(a0)
+     lw    t1, STRING_FIELDOFF_OFFSET(a1)
+     lw    t2, STRING_FIELDOFF_COUNT(a0)
+     lw    a2, STRING_FIELDOFF_COUNT(a1)
+     lw    a0, STRING_FIELDOFF_VALUE(a0)
+     lw    a1, STRING_FIELDOFF_VALUE(a1)
+
+    /*
+     * At this point, we have this/comp:
+     *    offset: t0/t1
+     *    count:  t2/a2
+     *    value:  a0/a1
+     * We're going to compute
+     *    a3 <- countDiff
+     *    a2 <- minCount
+     */
+     subu  a3, t2, a2                # a3<- countDiff
+     sleu  t7, t2, a2
+     movn  a2, t2, t7                # a2<- minCount
+
+     /*
+      * Note: data pointers point to first element.
+      */
+     addu  a0, 16                    # point to contents[0]
+     addu  a1, 16                    # point to contents[0]
+
+     /* Now, build pointers to the string data */
+     sll   t7, t0, 1                 # multiply offset by 2
+     addu  a0, a0, t7
+     sll   t7, t1, 1                 # multiply offset by 2
+     addu  a1, a1, t7
+
+     /*
+      * At this point we have:
+      *   a0: *this string data
+      *   a1: *comp string data
+      *   a2: iteration count for comparison
+      *   a3: value to return if the first part of the string is equal
+      *   v0: reserved for result
+      *   t0-t5 available for loading string data
+      */
+
+     subu  a2, 2
+     bltz  a2, do_remainder2
+
+     /*
+      * Unroll the first two checks so we can quickly catch early mismatch
+      * on long strings (but preserve incoming alignment)
+      */
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     beqz  v0, 1f
+     RETURN
+1:
+     lhu   t2, 2(a0)
+     lhu   t3, 2(a1)
+     subu  v0, t2, t3
+     beqz  v0, 2f
+     RETURN
+2:
+     addu  a0, 4                     # offset to contents[2]
+     addu  a1, 4                     # offset to contents[2]
+     li    t7, 28
+     bgt   a2, t7, do_memcmp16
+     subu  a2, 3
+     bltz  a2, do_remainder
+
+loopback_triple:
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     beqz  v0, 1f
+     RETURN
+1:
+     lhu   t2, 2(a0)
+     lhu   t3, 2(a1)
+     subu  v0, t2, t3
+     beqz  v0, 2f
+     RETURN
+2:
+     lhu   t4, 4(a0)
+     lhu   t5, 4(a1)
+     subu  v0, t4, t5
+     beqz  v0, 3f
+     RETURN
+3:
+     addu  a0, 6                     # offset to contents[i+3]
+     addu  a1, 6                     # offset to contents[i+3]
+     subu  a2, 3
+     bgez  a2, loopback_triple
+
+do_remainder:
+     addu  a2, 3
+     beqz  a2, returnDiff
+
+loopback_single:
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     bnez  v0, 1f
+     addu  a0, 2                     # offset to contents[i+1]
+     addu  a1, 2                     # offset to contents[i+1]
+     subu  a2, 1
+     bnez  a2, loopback_single
+
+returnDiff:
+     move  v0, a3
+1:
+     RETURN
+
+do_remainder2:
+     addu  a2, 2
+     bnez  a2, loopback_single
+     move  v0, a3
+     RETURN
+
+    /* Long string case */
+do_memcmp16:
+     move  rOBJ, a3                  # save return value if strings are equal
+     JAL(__memcmp16)
+     seq   t0, v0, zero
+     movn  v0, rOBJ, t0              # overwrite return value if strings are equal
+     RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_STRING_INDEXOF.S b/vm/compiler/template/mips/TEMPLATE_STRING_INDEXOF.S
new file mode 100644
index 0000000..9d9cd60
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_STRING_INDEXOF.S
@@ -0,0 +1,111 @@
+    /*
+     * String's indexOf.
+     *
+     * Requires a0 to have been previously checked for null.  Will
+     * return index of match of a1 in v0.
+     *
+     * IMPORTANT NOTE:
+     *
+     * This code relies on hard-coded offsets for string objects, and must be
+     * kept in sync wth definitions in UtfString.h  See asm-constants.h
+     *
+     * On entry:
+     *    a0:   string object pointer
+     *    a1:   char to match
+     *    a2:   Starting offset in string data
+     */
+
+     lw    t0, STRING_FIELDOFF_OFFSET(a0)
+     lw    t1, STRING_FIELDOFF_COUNT(a0)
+     lw    v0, STRING_FIELDOFF_VALUE(a0)
+
+    /*
+     * At this point, we have:
+     *    v0: object pointer
+     *    a1: char to match
+     *    a2: starting offset
+     *    t0: offset
+     *    t1: string length
+     */
+
+    /* Point to first element */
+     addu  v0, 16                    # point to contents[0]
+
+    /* Build pointer to start of string data */
+     sll   t7, t0, 1                 # multiply offset by 2
+     addu  v0, v0, t7
+
+    /* Save a copy of starting data in v1 */
+     move  v1, v0
+
+    /* Clamp start to [0..count] */
+     slt   t7, a2, zero
+     movn  a2, zero, t7
+     sgt   t7, a2, t1
+     movn  a2, t1, t7
+
+    /* Build pointer to start of data to compare */
+     sll   t7, a2, 1                # multiply offset by 2
+     addu  v0, v0, t7
+
+    /* Compute iteration count */
+     subu  a3, t1, a2
+
+    /*
+     * At this point we have:
+     *   v0: start of data to test
+     *   a1: char to compare
+     *   a3: iteration count
+     *   v1: original start of string
+     *   t0-t7 available for loading string data
+     */
+     subu  a3, 4
+     bltz  a3, indexof_remainder
+
+indexof_loop4:
+     lhu   t0, 0(v0)
+     beq   t0, a1, match_0
+     lhu   t0, 2(v0)
+     beq   t0, a1, match_1
+     lhu   t0, 4(v0)
+     beq   t0, a1, match_2
+     lhu   t0, 6(v0)
+     beq   t0, a1, match_3
+     addu  v0, 8                     # offset to contents[i+4]
+     subu  a3, 4
+     bgez  a3, indexof_loop4
+
+indexof_remainder:
+     addu  a3, 4
+     beqz  a3, indexof_nomatch
+
+indexof_loop1:
+     lhu   t0, 0(v0)
+     beq   t0, a1, match_0
+     addu  v0, 2                     # offset to contents[i+1]
+     subu  a3, 1
+     bnez  a3, indexof_loop1
+
+indexof_nomatch:
+     li    v0, -1
+     RETURN
+
+match_0:
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_1:
+     addu  v0, 2
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_2:
+     addu  v0, 4
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_3:
+     addu  v0, 6
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
diff --git a/vm/compiler/template/mips/TEMPLATE_SUB_DOUBLE_VFP.S b/vm/compiler/template/mips/TEMPLATE_SUB_DOUBLE_VFP.S
new file mode 100644
index 0000000..b07bf44
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SUB_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinopWide.S" {"instr":"JAL(__subdf3)","instr_f":"sub.d fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_SUB_FLOAT_VFP.S b/vm/compiler/template/mips/TEMPLATE_SUB_FLOAT_VFP.S
new file mode 100644
index 0000000..b0333cd
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_SUB_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/fbinop.S" {"instr":"JAL(__subsf3)","instr_f":"sub.s fv0, fa0, fa1"}
diff --git a/vm/compiler/template/mips/TEMPLATE_THROW_EXCEPTION_COMMON.S b/vm/compiler/template/mips/TEMPLATE_THROW_EXCEPTION_COMMON.S
new file mode 100644
index 0000000..d6e8b2e
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_THROW_EXCEPTION_COMMON.S
@@ -0,0 +1,6 @@
+    /*
+     * Throw an exception from JIT'ed code.
+     * On entry:
+     *    a0    Dalvik PC that raises the exception
+     */
+    j      .LhandleException
diff --git a/vm/compiler/template/mips/TEMPLATE_USHR_LONG.S b/vm/compiler/template/mips/TEMPLATE_USHR_LONG.S
new file mode 100644
index 0000000..f2a9ddb
--- /dev/null
+++ b/vm/compiler/template/mips/TEMPLATE_USHR_LONG.S
@@ -0,0 +1,17 @@
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* ushr-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    srl     rRESULT1, rARG1, a2		#  rhi<- ahi >> (shift&31)
+    srl     rRESULT0, rARG0, a2		#  rlo<- alo >> (shift&31)
+    not     rARG0, a2			#  alo<- 31-n  (shift is 5b)
+    sll     rARG1, 1
+    sll     rARG1, rARG0		#  ahi<- ahi << (32-(shift&31))
+    or      rRESULT0, rARG1		#  rlo<- rlo | ahi
+    andi    a2, 0x20			#  shift & 0x20
+    movn    rRESULT0, rRESULT1, a2	#  rlo<- rhi (if shift&0x20)
+    movn    rRESULT1, zero, a2		#  rhi<- 0 (if shift&0x20)
+    RETURN
diff --git a/vm/compiler/template/mips/TemplateOpList.h b/vm/compiler/template/mips/TemplateOpList.h
new file mode 100644
index 0000000..6aabdba
--- /dev/null
+++ b/vm/compiler/template/mips/TemplateOpList.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Dalvik opcode list that uses additional templates to complete JIT execution.
+ */
+#ifndef JIT_TEMPLATE
+#define JIT_TEMPLATE(X)
+#endif
+
+JIT_TEMPLATE(CMP_LONG)
+JIT_TEMPLATE(RETURN)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE)
+JIT_TEMPLATE(MUL_LONG)
+JIT_TEMPLATE(SHL_LONG)
+JIT_TEMPLATE(SHR_LONG)
+JIT_TEMPLATE(USHR_LONG)
+JIT_TEMPLATE(ADD_FLOAT_VFP)
+JIT_TEMPLATE(SUB_FLOAT_VFP)
+JIT_TEMPLATE(MUL_FLOAT_VFP)
+JIT_TEMPLATE(DIV_FLOAT_VFP)
+JIT_TEMPLATE(ADD_DOUBLE_VFP)
+JIT_TEMPLATE(SUB_DOUBLE_VFP)
+JIT_TEMPLATE(MUL_DOUBLE_VFP)
+JIT_TEMPLATE(DIV_DOUBLE_VFP)
+JIT_TEMPLATE(DOUBLE_TO_FLOAT_VFP)
+JIT_TEMPLATE(DOUBLE_TO_INT_VFP)
+JIT_TEMPLATE(FLOAT_TO_DOUBLE_VFP)
+JIT_TEMPLATE(FLOAT_TO_INT_VFP)
+JIT_TEMPLATE(INT_TO_DOUBLE_VFP)
+JIT_TEMPLATE(INT_TO_FLOAT_VFP)
+JIT_TEMPLATE(CMPG_DOUBLE_VFP)
+JIT_TEMPLATE(CMPL_DOUBLE_VFP)
+JIT_TEMPLATE(CMPG_FLOAT_VFP)
+JIT_TEMPLATE(CMPL_FLOAT_VFP)
+JIT_TEMPLATE(SQRT_DOUBLE_VFP)
+JIT_TEMPLATE(THROW_EXCEPTION_COMMON)
+JIT_TEMPLATE(MEM_OP_DECODE)
+JIT_TEMPLATE(STRING_COMPARETO)
+JIT_TEMPLATE(STRING_INDEXOF)
+JIT_TEMPLATE(INTERPRET)
+JIT_TEMPLATE(MONITOR_ENTER)
+JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(RESTORE_STATE)
+JIT_TEMPLATE(SAVE_STATE)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/mips/fbinop.S b/vm/compiler/template/mips/fbinop.S
new file mode 100644
index 0000000..e9ccf0a
--- /dev/null
+++ b/vm/compiler/template/mips/fbinop.S
@@ -0,0 +1,38 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary float operation. a0 = a1 op a2.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    LOAD(a1, a2)                        # a1<- vCC
+    .if $chkzero
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+    $preinstr                           # optional op
+    $instr                              # v0 = result
+    STORE(v0, rOBJ)                     # vAA <- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    LOAD_F(fa1, a2)                     # fa1<- vCC
+    .if $chkzero
+    # is second operand zero?
+    li.s        ft0, 0
+    c.eq.s      fcc0, ft0, fa1          # condition bit and comparision with 0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+    $preinstr                           # optional op
+    $instr_f                            # fv0 = result
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
diff --git a/vm/compiler/template/mips/fbinopWide.S b/vm/compiler/template/mips/fbinopWide.S
new file mode 100644
index 0000000..0e31b87
--- /dev/null
+++ b/vm/compiler/template/mips/fbinopWide.S
@@ -0,0 +1,45 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    move t1, a2                         # save a2
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)            # a2/a3<- vCC/vCC+1
+    .if $chkzero
+    or          t0, rARG2, rARG3        # second arg (a2-a3) is zero?
+    beqz        t0, common_errDivideByZero
+    .endif
+    $preinstr                           # optional op
+    $instr                              # result<- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    LOAD64_F(fa0, fa0f, a1)
+    LOAD64_F(fa1, fa1f, a2)
+    .if $chkzero
+    li.d        ft0, 0
+    c.eq.d      fcc0, fa1, ft0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+    $preinstr                           # optional op
+    $instr_f
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    RETURN
diff --git a/vm/compiler/template/mips/footer.S b/vm/compiler/template/mips/footer.S
new file mode 100644
index 0000000..42dc3dd
--- /dev/null
+++ b/vm/compiler/template/mips/footer.S
@@ -0,0 +1,138 @@
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  4
+.LinvokeNative:
+    # Prep for the native call
+    # a1 = newFP, a0 = methodToCall
+    lw     t9, offThread_jniLocal_topCookie(rSELF)  # t9<- thread->localRef->...
+    sw     zero, offThread_inJitCodeCache(rSELF)    # not in jit code cache
+    sw     a1, offThread_curFrame(rSELF)            # self->curFrame = newFp
+    sw     t9, (offStackSaveArea_localRefCookie - sizeofStackSaveArea)(a1)
+                                                 # newFp->localRefCookie=top
+    lhu     ra, offThread_subMode(rSELF)
+    SAVEAREA_FROM_FP(rBIX, a1)                   # rBIX<- new stack save area
+
+    move    a2, a0                               # a2<- methodToCall
+    move    a0, a1                               # a0<- newFp
+    add     a1, rSELF, offThread_retval          # a1<- &retval
+    move    a3, rSELF                            # a3<- self
+    andi    ra, kSubModeMethodTrace
+    beqz    ra, 121f
+    # a2: methodToCall
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    move    rTEMP, a2                            # preserve a2
+
+    move    a0, rTEMP
+    move    a1, rSELF
+    la      t9, dvmFastMethodTraceEnter
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    lw      t9, offMethod_nativeFunc(a2)
+    JALR(t9)                                      # call methodToCall->nativeFunc
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    move    a0, rTEMP
+    move    a1, rSELF
+    la      t9, dvmFastNativeMethodTraceExit
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+    b       212f
+
+121:
+    lw      t9, offMethod_nativeFunc(a2)
+    JALR(t9)                                     # call methodToCall->nativeFunc
+    lw      gp, STACK_OFFSET_GP(sp)
+
+212:
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw     a2, offStackSaveArea_returnAddr(rBIX)     # a2 = chaining cell ret addr
+    lw     a0, offStackSaveArea_localRefCookie(rBIX) # a0<- saved->top
+    lw     a1, offThread_exception(rSELF)            # check for exception
+    sw     rFP, offThread_curFrame(rSELF)            # self->curFrame = fp
+    sw     a0, offThread_jniLocal_topCookie(rSELF)   # new top <- old top
+    lw     a0, offStackSaveArea_savedPc(rBIX)        # reload rPC
+
+    # a0 = dalvikCallsitePC
+    bnez   a1, .LhandleException                     # handle exception if any
+
+    sw     a2, offThread_inJitCodeCache(rSELF)       # set the mode properly
+    beqz   a2, 3f
+    jr     a2                                        # go if return chaining cell still exist
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     a1, .LdvmJitToInterpTraceSelectNoChain    # defined in footer.S
+    lw     a1, (a1)
+    add    rPC, a0, 3*2                              # reconstruct new rPC
+
+#if defined(WITH_JIT_TUNING)
+    li     a0, kCallsiteInterpreted
+#endif
+    jr     a1
+
+
+/*
+ * On entry:
+ * a0  Faulting Dalvik PC
+ */
+.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+    la     t0, .LdeadFood
+    lw     t0, (t0)                  # should not see this under self-verification mode
+    jr     t0
+.LdeadFood:
+    .word   0xdeadf00d
+#endif
+    sw     zero, offThread_inJitCodeCache(rSELF)  # in interpreter land
+    la     a1, .LdvmMterpCommonExceptionThrown  # PIC way of getting &func
+    lw     a1, (a1)
+    la     rIBASE, .LdvmAsmInstructionStart     # PIC way of getting &func
+    lw     rIBASE, (rIBASE)
+    move   rPC, a0                              # reload the faulting Dalvid address
+    jr     a1                                   # branch to dvmMterpCommonExeceptionThrown
+
+    .align  4
+.LdvmAsmInstructionStart:
+    .word   dvmAsmInstructionStart
+.LdvmJitToInterpNoChainNoProfile:
+    .word   dvmJitToInterpNoChainNoProfile
+.LdvmJitToInterpTraceSelectNoChain:
+    .word   dvmJitToInterpTraceSelectNoChain
+.LdvmJitToInterpNoChain:
+    .word   dvmJitToInterpNoChain
+.LdvmMterpStdBail:
+    .word   dvmMterpStdBail
+.LdvmMterpCommonExceptionThrown:
+    .word   dvmMterpCommonExceptionThrown
+.LdvmLockObject:
+    .word   dvmLockObject
+#if defined(WITH_JIT_TUNING)
+.LdvmICHitCount:
+    .word   gDvmICHitCount
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+.LdvmSelfVerificationMemOpDecode:
+    .word   dvmSelfVerificationMemOpDecode
+#endif
+
+    .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
diff --git a/vm/compiler/template/mips/funop.S b/vm/compiler/template/mips/funop.S
new file mode 100644
index 0000000..0a984d7
--- /dev/null
+++ b/vm/compiler/template/mips/funop.S
@@ -0,0 +1,30 @@
+%default {"preinstr":""}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    $preinstr                           # optional op
+    $instr                              # v0<- op, a0-a3 changed
+.L${opcode}_set_vreg:
+    STORE(v0, rOBJ)                     # vAA<- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    $preinstr                           # optional op
+    $instr_f                            # fv0 = result
+.L${opcode}_set_vreg_f:
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
diff --git a/vm/compiler/template/mips/funopNarrower.S b/vm/compiler/template/mips/funopNarrower.S
new file mode 100644
index 0000000..7fbaf7b
--- /dev/null
+++ b/vm/compiler/template/mips/funopNarrower.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "load":"LOAD64_F(fa0, fa0f, a1)"}
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     */
+    move rINST, a0                      # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vB/vB+1
+    $preinstr                           # optional op
+    $instr                              # v0<- op, a0-a3 changed
+.L${opcode}_set_vreg:
+    STORE(v0, rINST)                    # vA<- v0
+#else
+    $load
+    $preinstr                           # optional op
+    $instr_f                            # fv0 = result
+.L${opcode}_set_vreg_f:
+    STORE_F(fv0, rINST)                 # vA<- fv0
+#endif
+    RETURN
diff --git a/vm/compiler/template/mips/funopWider.S b/vm/compiler/template/mips/funopWider.S
new file mode 100644
index 0000000..887e171
--- /dev/null
+++ b/vm/compiler/template/mips/funopWider.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "st_result":"STORE64_F(fv0, fv0f, rOBJ)"}
+    /*
+     * Generic 32bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "d0 = op s0".
+     *
+     * For: int-to-double, float-to-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     */
+    /* unop vA, vB */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vB
+    $preinstr                           # optional op
+    $instr                              # result<- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)   # vA/vA+1<- v0/v1
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vB
+    $preinstr                           # optional op
+    $instr_f
+
+.L${opcode}_set_vreg:
+    $st_result                          # vA/vA+1<- fv0/fv0f
+#endif
+    RETURN
diff --git a/vm/compiler/template/mips/header.S b/vm/compiler/template/mips/header.S
new file mode 100644
index 0000000..de4a051
--- /dev/null
+++ b/vm/compiler/template/mips/header.S
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
+#include "../../../mterp/common/mips-defines.h"
+#include "../../../mterp/common/jit-config.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef	__mips_hard_float
+#define		HARD_FLOAT
+#else
+#define		SOFT_FLOAT
+#endif
+
+/* MIPS definitions and declarations
+
+   reg	nick		purpose
+   s0	rPC		interpreted program counter, used for fetching instructions
+   s1	rFP		interpreted frame pointer, used for accessing locals and args
+   s2	rSELF		pointer to thread
+   s3	rIBASE		interpreted instruction base pointer, used for computed goto
+   s4	rINST		first 16-bit code unit of current instruction
+*/
+
+/* register offsets */
+#define r_ZERO      0
+#define r_AT        1
+#define r_V0        2
+#define r_V1        3
+#define r_A0        4
+#define r_A1        5
+#define r_A2        6
+#define r_A3        7
+#define r_T0        8
+#define r_T1        9
+#define r_T2        10
+#define r_T3        11
+#define r_T4        12
+#define r_T5        13
+#define r_T6        14
+#define r_T7        15
+#define r_S0        16
+#define r_S1        17
+#define r_S2        18
+#define r_S3        19
+#define r_S4        20
+#define r_S5        21
+#define r_S6        22
+#define r_S7        23
+#define r_T8        24
+#define r_T9        25
+#define r_K0        26
+#define r_K1        27
+#define r_GP        28
+#define r_SP        29
+#define r_FP        30
+#define r_RA        31
+#define r_F0        32
+#define r_F1        33
+#define r_F2        34
+#define r_F3        35
+#define r_F4        36
+#define r_F5        37
+#define r_F6        38
+#define r_F7        39
+#define r_F8        40
+#define r_F9        41
+#define r_F10       42
+#define r_F11       43
+#define r_F12       44
+#define r_F13       45
+#define r_F14       46
+#define r_F15       47
+#define r_F16       48
+#define r_F17       49
+#define r_F18       50
+#define r_F19       51
+#define r_F20       52
+#define r_F21       53
+#define r_F22       54
+#define r_F23       55
+#define r_F24       56
+#define r_F25       57
+#define r_F26       58
+#define r_F27       59
+#define r_F28       60
+#define r_F29       61
+#define r_F30       62
+#define r_F31       63
+
+/* single-purpose registers, given names for clarity */
+#define rPC	s0
+#define rFP	s1
+#define rSELF	s2
+#define rIBASE	s3
+#define rINST	s4
+#define rOBJ	s5
+#define rBIX	s6
+#define rTEMP	s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0     a0
+#define rARG1     a1
+#define rARG2     a2
+#define rARG3     a3
+#define rRESULT0  v0
+#define rRESULT1  v1
+#else
+#define rARG0     a1
+#define rARG1     a0
+#define rARG2     a3
+#define rARG3     a2
+#define rRESULT0  v1
+#define rRESULT1  v0
+#endif
+
+
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()	lw	rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF()	sw	rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF()	lw	rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF()	sw	rFP, offThread_curFrame(rSELF)
+
+#define EXPORT_PC() \
+	sw	rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+	subu	rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST()			lhu	rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count)	lhu     rINST, (_count*2)(rPC); \
+					addu	rPC, rPC, (_count * 2)
+
+#define FETCH_ADVANCE_INST_RB(rd)	addu	rPC, rPC, rd;	\
+					lhu     rINST, (rPC)
+
+#define FETCH(rd, _count)		lhu	rd, (_count * 2)(rPC)
+#define FETCH_S(rd, _count)		lh	rd, (_count * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count)            lbu     rd, (_count * 2)(rPC)
+#define FETCH_C(rd, _count)            lbu     rd, (_count * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count)            lbu     rd, (_count * 2 + 1)(rPC)
+#define FETCH_C(rd, _count)            lbu     rd, (_count * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd)		and	rd, rINST, 0xFF
+
+#define GOTO_OPCODE(rd)			sll  rd, rd, ${handler_size_bits};	\
+					addu rd, rIBASE, rd;	\
+					jr  rd
+
+
+#define LOAD(rd, rbase)			lw  rd, 0(rbase)
+#define LOAD_F(rd, rbase)		l.s rd, (rbase)
+#define STORE(rd, rbase)		sw  rd, 0(rbase)
+#define STORE_F(rd, rbase)		s.s rd, (rbase)
+
+#define GET_VREG(rd, rix)		LOAD_eas2(rd,rFP,rix)
+
+#define GET_VREG_F(rd, rix)		EAS2(AT, rFP, rix);		\
+					.set noat;  l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix)		STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst)	.set noreorder;		\
+					sll  dst, dst, ${handler_size_bits};	\
+					addu dst, rIBASE, dst;			\
+					sll  t8, rix, 2;	\
+					addu t8, t8, rFP;	\
+					jr  dst;		\
+					sw  rd, 0(t8);		\
+					.set reorder
+
+#define SET_VREG_F(rd, rix)		EAS2(AT, rFP, rix);		\
+					.set noat;  s.s	rd, (AT); .set at
+
+
+#define GET_OPA(rd)			srl     rd, rINST, 8
+#ifndef		MIPS32R2
+#define GET_OPA4(rd)			GET_OPA(rd);  and  rd, 0xf
+#else
+#define GET_OPA4(rd)			ext	rd, rd, 8, 4
+#endif
+#define GET_OPB(rd)			srl     rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd,off)		lw    rd, offThread_##off##(rSELF)
+
+#define LOAD_rSELF_method(rd)		LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd)	LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd)	LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd)		LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd)	LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd)		LOAD_rSELF_OFF(rd, bailPtr)
+
+#define GET_JIT_PROF_TABLE(rd)		LOAD_rSELF_OFF(rd,pJitProfTable)
+#define GET_JIT_THRESHOLD(rd)		LOAD_rSELF_OFF(rd,jitThreshold)
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd,rbase,roff,rshift)	.set noat;		\
+					sll  AT, roff, rshift;	\
+					addu rd, rbase, AT;	\
+					.set at
+
+#define EAS1(rd,rbase,roff)		EASN(rd,rbase,roff,1)
+#define EAS2(rd,rbase,roff)		EASN(rd,rbase,roff,2)
+#define EAS3(rd,rbase,roff)		EASN(rd,rbase,roff,3)
+#define EAS4(rd,rbase,roff)		EASN(rd,rbase,roff,4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd,rbase,roff,rshift)	.set noat;		\
+					srl  AT, roff, rshift;	\
+					addu rd, rbase, AT;	\
+					.set at
+
+#define LOAD_eas2(rd,rbase,roff)	EAS2(AT, rbase, roff);  \
+					.set noat;  lw  rd, 0(AT); .set at
+
+#define STORE_eas2(rd,rbase,roff)	EAS2(AT, rbase, roff);  \
+					.set noat;  sw  rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd,rbase,off)	lw	rd, off(rbase)
+#define LOADu2_RB_OFF(rd,rbase,off)	lhu	rd, off(rbase)
+#define STORE_RB_OFF(rd,rbase,off)	sw	rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo,rhi,rbase,off)	        sw	rlo, off(rbase);	\
+					        sw	rhi, (off+4)(rbase)
+#define LOAD64_off(rlo,rhi,rbase,off)	        lw	rlo, off(rbase);	\
+					        lw	rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo,rhi,rbase,off)	s.s	rlo, off(rbase);	\
+						s.s	rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo,rhi,rbase,off)		l.s	rlo, off(rbase);	\
+						l.s	rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo,rhi,rbase,off)	        sw	rlo, (off+4)(rbase);	\
+					        sw	rhi, (off)(rbase)
+#define LOAD64_off(rlo,rhi,rbase,off)	        lw	rlo, (off+4)(rbase);	\
+					        lw	rhi, (off)(rbase)
+#define STORE64_off_F(rlo,rhi,rbase,off)	s.s	rlo, (off+4)(rbase);	\
+						s.s	rhi, (off)(rbase)
+#define LOAD64_off_F(rlo,rhi,rbase,off)		l.s	rlo, (off+4)(rbase);	\
+						l.s	rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo,rhi,rbase)		STORE64_off(rlo,rhi,rbase,0)
+#define LOAD64(rlo,rhi,rbase)		LOAD64_off(rlo,rhi,rbase,0)
+
+#define STORE64_F(rlo,rhi,rbase)	STORE64_off_F(rlo,rhi,rbase,0)
+#define LOAD64_F(rlo,rhi,rbase)		LOAD64_off_F(rlo,rhi,rbase,0)
+
+#define STORE64_lo(rd,rbase)		sw	rd, 0(rbase)
+#define STORE64_hi(rd,rbase)		sw	rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd,rbase)		LOAD_RB_OFF(rd,rbase,offThread_exception)
+#define LOAD_base_offArrayObject_length(rd,rbase)	LOAD_RB_OFF(rd,rbase,offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd,rbase)   LOAD_RB_OFF(rd,rbase,offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd,rbase)	LOAD_RB_OFF(rd,rbase,offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd,rbase)	LOAD_RB_OFF(rd,rbase,offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd,rbase)		LOAD_RB_OFF(rd,rbase,offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd,rbase)		LOAD_RB_OFF(rd,rbase,offMethod_name)
+#define LOAD_base_offObject_clazz(rd,rbase)		LOAD_RB_OFF(rd,rbase,offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd,rbase)		LOADu2_RB_OFF(rd,rbase,offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd,rbase)		STORE_RB_OFF(rd,rbase,offThread_exception)
+
+
+#define	STACK_STORE(rd,off)	sw   rd, off(sp)
+#define	STACK_LOAD(rd,off)	lw   rd, off(sp)
+#define CREATE_STACK(n)	 	subu sp, sp, n
+#define DELETE_STACK(n)	 	addu sp, sp, n
+
+#define SAVE_RA(offset)	 	STACK_STORE(ra, offset)
+#define LOAD_RA(offset)	 	STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest,addr)	la   dest, addr
+#define LOAD_IMM(dest, imm)	li   dest, imm
+#define MOVE_REG(dest,src)	move dest, src
+#define	RETURN			jr   ra
+#define	STACK_SIZE		128
+
+#define STACK_OFFSET_ARG04	16
+#define STACK_OFFSET_GP		84
+#define STACK_OFFSET_rFP	112
+
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+        .cprestore STACK_OFFSET_GP
+
+#define JAL(func)		move rTEMP, ra;				\
+				jal  func;				\
+				move ra, rTEMP
+
+#define JALR(reg)		move rTEMP, ra;				\
+				jalr ra, reg;				\
+				move ra, rTEMP
+
+#define BAL(n)			bal  n
+
+#define	STACK_STORE_RA()  	CREATE_STACK(STACK_SIZE);		\
+				STACK_STORE(gp, STACK_OFFSET_GP);	\
+				STACK_STORE(ra, 124)
+
+#define	STACK_STORE_S0()  	STACK_STORE_RA();			\
+				STACK_STORE(s0, 116)
+
+#define	STACK_STORE_S0S1()  	STACK_STORE_S0();			\
+				STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define	STACK_LOAD_RA()		STACK_LOAD(ra, 124);			\
+				STACK_LOAD(gp, STACK_OFFSET_GP);	\
+				DELETE_STACK(STACK_SIZE)
+
+#define	STACK_LOAD_S0()  	STACK_LOAD(s0, 116);			\
+				STACK_LOAD_RA()
+
+#define	STACK_LOAD_S0S1()  	STACK_LOAD(s1, STACK_OFFSET_rFP);	\
+				STACK_LOAD_S0()
+
+#define STACK_STORE_FULL()	CREATE_STACK(STACK_SIZE);	\
+				STACK_STORE(ra, 124);		\
+				STACK_STORE(fp, 120);		\
+				STACK_STORE(s0, 116);		\
+				STACK_STORE(s1, STACK_OFFSET_rFP);	\
+				STACK_STORE(s2, 108);		\
+				STACK_STORE(s3, 104);		\
+				STACK_STORE(s4, 100);		\
+				STACK_STORE(s5, 96);		\
+				STACK_STORE(s6, 92);		\
+				STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL()	STACK_LOAD(gp, STACK_OFFSET_GP);	\
+				STACK_LOAD(s7, 88);	\
+				STACK_LOAD(s6, 92);	\
+				STACK_LOAD(s5, 96);	\
+				STACK_LOAD(s4, 100);	\
+				STACK_LOAD(s3, 104);	\
+				STACK_LOAD(s2, 108);	\
+				STACK_LOAD(s1, STACK_OFFSET_rFP);	\
+				STACK_LOAD(s0, 116);	\
+				STACK_LOAD(fp, 120);	\
+				STACK_LOAD(ra, 124);	\
+				DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define STACK_OFFSET_SCR   32
+#define SCRATCH_STORE(r,off) \
+    STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+    STACK_LOAD(r, STACK_OFFSET_SCR+off);
diff --git a/vm/compiler/template/mips/platform.S b/vm/compiler/template/mips/platform.S
new file mode 100644
index 0000000..8b0d23e
--- /dev/null
+++ b/vm/compiler/template/mips/platform.S
@@ -0,0 +1,6 @@
+/*
+ * ===========================================================================
+ *  CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 331d902..93a677e 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -170,8 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -272,8 +272,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -281,7 +281,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 /* ------------------------------ */
     .balign 4
@@ -330,8 +330,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -453,8 +453,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -463,8 +463,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -1623,7 +1623,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 #undef TEMPLATE_INLINE_PROFILING
 
@@ -1676,8 +1676,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -1807,8 +1807,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -1817,8 +1817,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}
 
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 
     ldmfd   sp!, {r0-r1}
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
     b       212f
 121:
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 212:
 
     @ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kCallsiteInterpreted
 #endif
-    mov     pc, r1
+    bx      r1
 
 /*
  * On entry:
@@ -1936,7 +1936,7 @@
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
-    mov     pc, r1                  @ branch to dvmMterpCommonExceptionThrown
+    bx      r1                  @ branch to dvmMterpCommonExceptionThrown
 
     .align  2
 .LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 044843e..b9de01f 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -170,8 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -272,8 +272,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -281,7 +281,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 /* ------------------------------ */
     .balign 4
@@ -330,8 +330,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -453,8 +453,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -463,8 +463,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -514,8 +514,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    ldr     ip, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    blx     ip
     bhi     .LTEMPLATE_CMPG_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -528,8 +528,8 @@
 .LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -558,8 +558,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    ldr     ip, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
+    blx     ip
     bhi     .LTEMPLATE_CMPL_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -572,8 +572,8 @@
 .LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -622,8 +622,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    ldr     ip, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    blx     ip
     bhi     .LTEMPLATE_CMPG_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -634,8 +634,8 @@
 .LTEMPLATE_CMPG_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -684,8 +684,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    ldr     ip, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
+    blx     ip
     bhi     .LTEMPLATE_CMPL_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -696,8 +696,8 @@
 .LTEMPLATE_CMPL_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    mov     lr, pc
-    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    ldr     ip, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
+    blx     ip
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -1239,8 +1239,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -1345,8 +1345,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -1354,7 +1354,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 #undef TEMPLATE_INLINE_PROFILING
 
@@ -1407,8 +1407,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -1538,8 +1538,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -1548,8 +1548,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1611,20 +1611,20 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}
 
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 
     ldmfd   sp!, {r0-r1}
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
     b       212f
 121:
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 212:
 
     @ native return; r10=newSaveArea
@@ -1650,7 +1650,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kCallsiteInterpreted
 #endif
-    mov     pc, r1
+    bx      r1
 
 /*
  * On entry:
@@ -1667,7 +1667,7 @@
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
-    mov     pc, r1                  @ branch to dvmMterpCommonExceptionThrown
+    bx      r1                  @ branch to dvmMterpCommonExceptionThrown
 
     .align  2
 .LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index ba798e0..23f2812 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -170,8 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -272,8 +272,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -281,7 +281,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 /* ------------------------------ */
     .balign 4
@@ -330,8 +330,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -453,8 +453,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -463,8 +463,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -1623,7 +1623,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 #undef TEMPLATE_INLINE_PROFILING
 
@@ -1676,8 +1676,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -1807,8 +1807,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -1817,8 +1817,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}
 
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 
     ldmfd   sp!, {r0-r1}
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
     b       212f
 121:
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 212:
 
     @ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kCallsiteInterpreted
 #endif
-    mov     pc, r1
+    bx      r1
 
 /*
  * On entry:
@@ -1936,7 +1936,7 @@
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
-    mov     pc, r1                  @ branch to dvmMterpCommonExceptionThrown
+    bx      r1                  @ branch to dvmMterpCommonExceptionThrown
 
     .align  2
 .LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 825ac40..360ebfa 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -170,8 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -272,8 +272,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -281,7 +281,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 /* ------------------------------ */
     .balign 4
@@ -330,8 +330,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -453,8 +453,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -463,8 +463,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceExit
+    ldr     ip, .LdvmFastMethodTraceExit
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -1623,7 +1623,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kInlineCacheMiss
 #endif
-    mov     pc, r10                         @ dvmJitToInterpTraceSelectNoChain
+    bx      r10                         @ dvmJitToInterpTraceSelectNoChain
 
 #undef TEMPLATE_INLINE_PROFILING
 
@@ -1676,8 +1676,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -1807,8 +1807,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -1817,8 +1817,8 @@
 #if defined(TEMPLATE_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rSELF
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
 #endif
     @ native return; r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    mov     lr, pc
-    ldr     pc, .LdvmFastMethodTraceEnter
+    ldr     ip, .LdvmFastMethodTraceEnter
+    blx     ip
     ldmfd   sp!, {r0-r3}
 
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 
     ldmfd   sp!, {r0-r1}
-    mov     lr, pc
-    ldr     pc, .LdvmFastNativeMethodTraceExit
+    ldr     ip, .LdvmFastNativeMethodTraceExit
+    blx     ip
     b       212f
 121:
-    mov     lr, pc
-    ldr     pc, [r2, #offMethod_nativeFunc]
+    ldr     ip, [r2, #offMethod_nativeFunc]
+    blx     ip
 212:
 
     @ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@
 #if defined(WITH_JIT_TUNING)
     mov     r0, #kCallsiteInterpreted
 #endif
-    mov     pc, r1
+    bx      r1
 
 /*
  * On entry:
@@ -1936,7 +1936,7 @@
     ldr     r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
     ldr     rIBASE, .LdvmAsmInstructionStart    @ same as above
     mov     rPC, r0                 @ reload the faulting Dalvik address
-    mov     pc, r1                  @ branch to dvmMterpCommonExceptionThrown
+    bx      r1                  @ branch to dvmMterpCommonExceptionThrown
 
     .align  2
 .LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-mips.S b/vm/compiler/template/out/CompilerTemplateAsm-mips.S
new file mode 100644
index 0000000..85a373e
--- /dev/null
+++ b/vm/compiler/template/out/CompilerTemplateAsm-mips.S
@@ -0,0 +1,3401 @@
+/*
+ * This file was generated automatically by gen-template.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
+#include "../../../mterp/common/mips-defines.h"
+#include "../../../mterp/common/jit-config.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef	__mips_hard_float
+#define		HARD_FLOAT
+#else
+#define		SOFT_FLOAT
+#endif
+
+/* MIPS definitions and declarations
+
+   reg	nick		purpose
+   s0	rPC		interpreted program counter, used for fetching instructions
+   s1	rFP		interpreted frame pointer, used for accessing locals and args
+   s2	rSELF		pointer to thread
+   s3	rIBASE		interpreted instruction base pointer, used for computed goto
+   s4	rINST		first 16-bit code unit of current instruction
+*/
+
+/* register offsets */
+#define r_ZERO      0
+#define r_AT        1
+#define r_V0        2
+#define r_V1        3
+#define r_A0        4
+#define r_A1        5
+#define r_A2        6
+#define r_A3        7
+#define r_T0        8
+#define r_T1        9
+#define r_T2        10
+#define r_T3        11
+#define r_T4        12
+#define r_T5        13
+#define r_T6        14
+#define r_T7        15
+#define r_S0        16
+#define r_S1        17
+#define r_S2        18
+#define r_S3        19
+#define r_S4        20
+#define r_S5        21
+#define r_S6        22
+#define r_S7        23
+#define r_T8        24
+#define r_T9        25
+#define r_K0        26
+#define r_K1        27
+#define r_GP        28
+#define r_SP        29
+#define r_FP        30
+#define r_RA        31
+#define r_F0        32
+#define r_F1        33
+#define r_F2        34
+#define r_F3        35
+#define r_F4        36
+#define r_F5        37
+#define r_F6        38
+#define r_F7        39
+#define r_F8        40
+#define r_F9        41
+#define r_F10       42
+#define r_F11       43
+#define r_F12       44
+#define r_F13       45
+#define r_F14       46
+#define r_F15       47
+#define r_F16       48
+#define r_F17       49
+#define r_F18       50
+#define r_F19       51
+#define r_F20       52
+#define r_F21       53
+#define r_F22       54
+#define r_F23       55
+#define r_F24       56
+#define r_F25       57
+#define r_F26       58
+#define r_F27       59
+#define r_F28       60
+#define r_F29       61
+#define r_F30       62
+#define r_F31       63
+
+/* single-purpose registers, given names for clarity */
+#define rPC	s0
+#define rFP	s1
+#define rSELF	s2
+#define rIBASE	s3
+#define rINST	s4
+#define rOBJ	s5
+#define rBIX	s6
+#define rTEMP	s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0     a0
+#define rARG1     a1
+#define rARG2     a2
+#define rARG3     a3
+#define rRESULT0  v0
+#define rRESULT1  v1
+#else
+#define rARG0     a1
+#define rARG1     a0
+#define rARG2     a3
+#define rARG3     a2
+#define rRESULT0  v1
+#define rRESULT1  v0
+#endif
+
+
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF()	lw	rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF()	sw	rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF()	lw	rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF()	sw	rFP, offThread_curFrame(rSELF)
+
+#define EXPORT_PC() \
+	sw	rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+	subu	rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST()			lhu	rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count)	lhu     rINST, (_count*2)(rPC); \
+					addu	rPC, rPC, (_count * 2)
+
+#define FETCH_ADVANCE_INST_RB(rd)	addu	rPC, rPC, rd;	\
+					lhu     rINST, (rPC)
+
+#define FETCH(rd, _count)		lhu	rd, (_count * 2)(rPC)
+#define FETCH_S(rd, _count)		lh	rd, (_count * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count)            lbu     rd, (_count * 2)(rPC)
+#define FETCH_C(rd, _count)            lbu     rd, (_count * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count)            lbu     rd, (_count * 2 + 1)(rPC)
+#define FETCH_C(rd, _count)            lbu     rd, (_count * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd)		and	rd, rINST, 0xFF
+
+#define GOTO_OPCODE(rd)			sll  rd, rd, -1000;	\
+					addu rd, rIBASE, rd;	\
+					jr  rd
+
+
+#define LOAD(rd, rbase)			lw  rd, 0(rbase)
+#define LOAD_F(rd, rbase)		l.s rd, (rbase)
+#define STORE(rd, rbase)		sw  rd, 0(rbase)
+#define STORE_F(rd, rbase)		s.s rd, (rbase)
+
+#define GET_VREG(rd, rix)		LOAD_eas2(rd,rFP,rix)
+
+#define GET_VREG_F(rd, rix)		EAS2(AT, rFP, rix);		\
+					.set noat;  l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix)		STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst)	.set noreorder;		\
+					sll  dst, dst, -1000;	\
+					addu dst, rIBASE, dst;			\
+					sll  t8, rix, 2;	\
+					addu t8, t8, rFP;	\
+					jr  dst;		\
+					sw  rd, 0(t8);		\
+					.set reorder
+
+#define SET_VREG_F(rd, rix)		EAS2(AT, rFP, rix);		\
+					.set noat;  s.s	rd, (AT); .set at
+
+
+#define GET_OPA(rd)			srl     rd, rINST, 8
+#ifndef		MIPS32R2
+#define GET_OPA4(rd)			GET_OPA(rd);  and  rd, 0xf
+#else
+#define GET_OPA4(rd)			ext	rd, rd, 8, 4
+#endif
+#define GET_OPB(rd)			srl     rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd,off)		lw    rd, offThread_##off##(rSELF)
+
+#define LOAD_rSELF_method(rd)		LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd)	LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd)	LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd)		LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd)	LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd)		LOAD_rSELF_OFF(rd, bailPtr)
+
+#define GET_JIT_PROF_TABLE(rd)		LOAD_rSELF_OFF(rd,pJitProfTable)
+#define GET_JIT_THRESHOLD(rd)		LOAD_rSELF_OFF(rd,jitThreshold)
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd,rbase,roff,rshift)	.set noat;		\
+					sll  AT, roff, rshift;	\
+					addu rd, rbase, AT;	\
+					.set at
+
+#define EAS1(rd,rbase,roff)		EASN(rd,rbase,roff,1)
+#define EAS2(rd,rbase,roff)		EASN(rd,rbase,roff,2)
+#define EAS3(rd,rbase,roff)		EASN(rd,rbase,roff,3)
+#define EAS4(rd,rbase,roff)		EASN(rd,rbase,roff,4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd,rbase,roff,rshift)	.set noat;		\
+					srl  AT, roff, rshift;	\
+					addu rd, rbase, AT;	\
+					.set at
+
+#define LOAD_eas2(rd,rbase,roff)	EAS2(AT, rbase, roff);  \
+					.set noat;  lw  rd, 0(AT); .set at
+
+#define STORE_eas2(rd,rbase,roff)	EAS2(AT, rbase, roff);  \
+					.set noat;  sw  rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd,rbase,off)	lw	rd, off(rbase)
+#define LOADu2_RB_OFF(rd,rbase,off)	lhu	rd, off(rbase)
+#define STORE_RB_OFF(rd,rbase,off)	sw	rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo,rhi,rbase,off)	        sw	rlo, off(rbase);	\
+					        sw	rhi, (off+4)(rbase)
+#define LOAD64_off(rlo,rhi,rbase,off)	        lw	rlo, off(rbase);	\
+					        lw	rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo,rhi,rbase,off)	s.s	rlo, off(rbase);	\
+						s.s	rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo,rhi,rbase,off)		l.s	rlo, off(rbase);	\
+						l.s	rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo,rhi,rbase,off)	        sw	rlo, (off+4)(rbase);	\
+					        sw	rhi, (off)(rbase)
+#define LOAD64_off(rlo,rhi,rbase,off)	        lw	rlo, (off+4)(rbase);	\
+					        lw	rhi, (off)(rbase)
+#define STORE64_off_F(rlo,rhi,rbase,off)	s.s	rlo, (off+4)(rbase);	\
+						s.s	rhi, (off)(rbase)
+#define LOAD64_off_F(rlo,rhi,rbase,off)		l.s	rlo, (off+4)(rbase);	\
+						l.s	rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo,rhi,rbase)		STORE64_off(rlo,rhi,rbase,0)
+#define LOAD64(rlo,rhi,rbase)		LOAD64_off(rlo,rhi,rbase,0)
+
+#define STORE64_F(rlo,rhi,rbase)	STORE64_off_F(rlo,rhi,rbase,0)
+#define LOAD64_F(rlo,rhi,rbase)		LOAD64_off_F(rlo,rhi,rbase,0)
+
+#define STORE64_lo(rd,rbase)		sw	rd, 0(rbase)
+#define STORE64_hi(rd,rbase)		sw	rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd,rbase)		LOAD_RB_OFF(rd,rbase,offThread_exception)
+#define LOAD_base_offArrayObject_length(rd,rbase)	LOAD_RB_OFF(rd,rbase,offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd,rbase)   LOAD_RB_OFF(rd,rbase,offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd,rbase)	LOAD_RB_OFF(rd,rbase,offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd,rbase)	LOAD_RB_OFF(rd,rbase,offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd,rbase)	LOAD_RB_OFF(rd,rbase,offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd,rbase)	LOAD_RB_OFF(rd,rbase,offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd,rbase)		LOAD_RB_OFF(rd,rbase,offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd,rbase)		LOAD_RB_OFF(rd,rbase,offMethod_name)
+#define LOAD_base_offObject_clazz(rd,rbase)		LOAD_RB_OFF(rd,rbase,offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd,rbase)		LOADu2_RB_OFF(rd,rbase,offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd,rbase)		STORE_RB_OFF(rd,rbase,offThread_exception)
+
+
+#define	STACK_STORE(rd,off)	sw   rd, off(sp)
+#define	STACK_LOAD(rd,off)	lw   rd, off(sp)
+#define CREATE_STACK(n)	 	subu sp, sp, n
+#define DELETE_STACK(n)	 	addu sp, sp, n
+
+#define SAVE_RA(offset)	 	STACK_STORE(ra, offset)
+#define LOAD_RA(offset)	 	STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest,addr)	la   dest, addr
+#define LOAD_IMM(dest, imm)	li   dest, imm
+#define MOVE_REG(dest,src)	move dest, src
+#define	RETURN			jr   ra
+#define	STACK_SIZE		128
+
+#define STACK_OFFSET_ARG04	16
+#define STACK_OFFSET_GP		84
+#define STACK_OFFSET_rFP	112
+
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+        .cprestore STACK_OFFSET_GP
+
+#define JAL(func)		move rTEMP, ra;				\
+				jal  func;				\
+				move ra, rTEMP
+
+#define JALR(reg)		move rTEMP, ra;				\
+				jalr ra, reg;				\
+				move ra, rTEMP
+
+#define BAL(n)			bal  n
+
+#define	STACK_STORE_RA()  	CREATE_STACK(STACK_SIZE);		\
+				STACK_STORE(gp, STACK_OFFSET_GP);	\
+				STACK_STORE(ra, 124)
+
+#define	STACK_STORE_S0()  	STACK_STORE_RA();			\
+				STACK_STORE(s0, 116)
+
+#define	STACK_STORE_S0S1()  	STACK_STORE_S0();			\
+				STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define	STACK_LOAD_RA()		STACK_LOAD(ra, 124);			\
+				STACK_LOAD(gp, STACK_OFFSET_GP);	\
+				DELETE_STACK(STACK_SIZE)
+
+#define	STACK_LOAD_S0()  	STACK_LOAD(s0, 116);			\
+				STACK_LOAD_RA()
+
+#define	STACK_LOAD_S0S1()  	STACK_LOAD(s1, STACK_OFFSET_rFP);	\
+				STACK_LOAD_S0()
+
+#define STACK_STORE_FULL()	CREATE_STACK(STACK_SIZE);	\
+				STACK_STORE(ra, 124);		\
+				STACK_STORE(fp, 120);		\
+				STACK_STORE(s0, 116);		\
+				STACK_STORE(s1, STACK_OFFSET_rFP);	\
+				STACK_STORE(s2, 108);		\
+				STACK_STORE(s3, 104);		\
+				STACK_STORE(s4, 100);		\
+				STACK_STORE(s5, 96);		\
+				STACK_STORE(s6, 92);		\
+				STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL()	STACK_LOAD(gp, STACK_OFFSET_GP);	\
+				STACK_LOAD(s7, 88);	\
+				STACK_LOAD(s6, 92);	\
+				STACK_LOAD(s5, 96);	\
+				STACK_LOAD(s4, 100);	\
+				STACK_LOAD(s3, 104);	\
+				STACK_LOAD(s2, 108);	\
+				STACK_LOAD(s1, STACK_OFFSET_rFP);	\
+				STACK_LOAD(s0, 116);	\
+				STACK_LOAD(fp, 120);	\
+				STACK_LOAD(ra, 124);	\
+				DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define STACK_OFFSET_SCR   32
+#define SCRATCH_STORE(r,off) \
+    STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+    STACK_LOAD(r, STACK_OFFSET_SCR+off);
+
+/* File: mips/platform.S */
+/*
+ * ===========================================================================
+ *  CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
+
+
+    .global dvmCompilerTemplateStart
+    .type   dvmCompilerTemplateStart, %function
+    .text
+
+dvmCompilerTemplateStart:
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_CMP_LONG
+dvmCompiler_TEMPLATE_CMP_LONG:
+/* File: mips/TEMPLATE_CMP_LONG.S */
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     *
+     * This code assumes the register pair ordering will depend on endianess (a1:a0 or a0:a1).
+     *    a1:a0 => vBB
+     *    a3:a2 => vCC
+     */
+    /* cmp-long vAA, vBB, vCC */
+    slt    t0, rARG1, rARG3             # compare hi
+    sgt    t1, rARG1, rARG3
+    subu   v0, t1, t0                   # v0<- (-1,1,0)
+    bnez   v0, .LTEMPLATE_CMP_LONG_finish
+                                        # at this point x.hi==y.hi
+    sltu   t0, rARG0, rARG2             # compare lo
+    sgtu   t1, rARG0, rARG2
+    subu   v0, t1, t0                   # v0<- (-1,1,0) for [< > =]
+.LTEMPLATE_CMP_LONG_finish:
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN
+dvmCompiler_TEMPLATE_RETURN:
+/* File: mips/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    # a0=rSELF
+    move    a0, rSELF
+    la      t9, dvmFastMethodTraceExit
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    SAVEAREA_FROM_FP(a0, rFP)           # a0<- saveArea (old)
+    lw      t0, offStackSaveArea_prevFrame(a0)     # t0<- saveArea->prevFrame
+    lbu     t1, offThread_breakFlags(rSELF)        # t1<- breakFlags
+    lw      rPC, offStackSaveArea_savedPc(a0)      # rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    lw      t2,  offStackSaveArea_returnAddr(a0)   # t2<- chaining cell ret
+#else
+    move    t2, zero                               # disable chaining
+#endif
+    lw      a2, offStackSaveArea_method - sizeofStackSaveArea(t0)
+                                                   # a2<- method we're returning to
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     a2, zero, 1f                           # bail to interpreter
+#else
+    bne     a2, zero, 2f
+    JALR(ra)                                       # punt to interpreter and compare state
+    # DOUG: assume this does not return ???
+2:
+#endif
+    la      t4, .LdvmJitToInterpNoChainNoProfile   # defined in footer.S
+    lw      a1, (t4)
+    move    rFP, t0                                # publish new FP
+    beq     a2, zero, 4f
+    lw      t0, offMethod_clazz(a2)                # t0<- method->clazz
+4:
+
+    sw      a2, offThread_method(rSELF)            # self->method = newSave->method
+    lw      a0, offClassObject_pDvmDex(t0)         # a0<- method->clazz->pDvmDex
+    sw      rFP, offThread_curFrame(rSELF)         # self->curFrame = fp
+    add     rPC, rPC, 3*2                          # publish new rPC
+    sw      a0, offThread_methodClassDex(rSELF)
+    movn    t2, zero, t1                           # check the breadFlags and
+                                                   # clear the chaining cell address
+    sw      t2, offThread_inJitCodeCache(rSELF)    # in code cache or not
+    beq     t2, zero, 3f                           # chaining cell exists?
+    JALR(t2)                                       # jump to the chaining cell
+    # DOUG: assume this does not return ???
+3:
+#if defined(WITH_JIT_TUNING)
+    li      a0, kCallsiteInterpreted
+#endif
+    j       a1                                     # callsite is interpreted
+1:
+    sw      zero, offThread_inJitCodeCache(rSELF)  # reset inJitCodeCache
+    SAVE_PC_TO_SELF()                              # SAVE_PC_FP_TO_SELF()
+    SAVE_FP_TO_SELF()
+    la      t4, .LdvmMterpStdBail                  # defined in footer.S
+    lw      a2, (t4)
+    move    a0, rSELF                              # Expecting rSELF in a0
+    JALR(a2)                                       # exit the interpreter
+    # DOUG: assume this does not return ???
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
+/* File: mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    lw     t0, offMethod_accessFlags(a0)          # t0<- methodToCall->accessFlags
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+
+2:
+    and    t6, t0, ACC_NATIVE
+    beqz   t6, 3f
+#if !defined(WITH_SELF_VERIFICATION)
+    j      .LinvokeNative
+#else
+    RETURN                                        # bail to the interpreter
+#endif
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     t0, .LdvmJitToInterpTraceSelectNoChain # defined in footer.S
+    lw     rTEMP, (t0)
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- method->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    # a0=methodToCall, a1=rSELF
+    move   a1, rSELF
+    la     t9, dvmFastMethodTraceEnter
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+
+    # Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    li     a0, kInlineCacheMiss
+#endif
+    jr     rTEMP                                  # dvmJitToInterpTraceSelectNoChain
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
+/* File: mips/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    # methodToCall is guaranteed to be non-native
+.LinvokeChain:
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    add    t2, ra, 8                              # setup the punt-to-interp address
+                                                  # 8 bytes skips branch and delay slot
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    jr     t2                                     # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    jr     t2                                     # bail to the interpreter
+
+2:
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- methodToCall->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    move   a1, rSELF
+    # a0=methodToCall, a1=rSELF
+    la     t9, dvmFastMethodTraceEnter
+    jalr   t9
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    RETURN                                        # return to the callee-chaining cell
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN:
+/* File: mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      u4 delay_slot;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr   : to branch to the chaining cell
+     *    - lr+8 : to punt to the interpreter
+     *    - lr+16: to fully resolve the callee and may rechain.
+     *             a3 <- class
+     */
+    # a0 = this, a1 = returnCell, a2 = predictedChainCell, rPC = dalvikCallsite
+    lw      a3, offObject_clazz(a0)     # a3 <- this->class
+    lw      rIBASE, 8(a2)                   # t0 <- predictedChainCell->clazz
+    lw      a0, 12(a2)                  # a0 <- predictedChainCell->method
+    lw      t1, offThread_icRechainCount(rSELF)    # t1 <- shared rechainCount
+
+#if defined(WITH_JIT_TUNING)
+    la      rINST, .LdvmICHitCount
+    #add     t2, t2, 1
+    bne    a3, rIBASE, 1f
+    nop
+    lw      t2, 0(rINST)
+    add     t2, t2, 1
+    sw      t2, 0(rINST)
+1:
+    #add     t2, t2, 1
+#endif
+    beq     a3, rIBASE, .LinvokeChain       # branch if predicted chain is valid
+    lw      rINST, offClassObject_vtable(a3)     # rINST <- this->class->vtable
+    beqz    rIBASE, 2f                      # initialized class or not
+    sub     a1, t1, 1                   # count--
+    sw      a1, offThread_icRechainCount(rSELF)   # write back to InterpState
+    b       3f
+2:
+    move    a1, zero
+3:
+    add     ra, ra, 16                  # return to fully-resolve landing pad
+    /*
+     * a1 <- count
+     * a2 <- &predictedChainCell
+     * a3 <- this->class
+     * rPC <- dPC
+     * rINST <- this->class->vtable
+     */
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
+/* File: mips/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    lw     rTEMP, offMethod_nativeFunc(a0)        # t9<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+2:
+#else
+    RETURN                                        # bail to the interpreter unconditionally
+#endif
+
+    # go ahead and transfer control to the native code
+    lw     t6, offThread_jniLocal_topCookie(rSELF)  # t6<- thread->localRef->...
+    sw     a1, offThread_curFrame(rSELF)          # self->curFrame = newFp
+    sw     zero, offThread_inJitCodeCache(rSELF)  # not in the jit code cache
+    sw     t6, (offStackSaveArea_localRefCookie - sizeofStackSaveArea)(a1)
+                                                  # newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(rBIX, a1)                    # rBIX<- new stack save area
+    move   a2, a0                                 # a2<- methodToCall
+    move   a0, a1                                 # a0<- newFp
+    add    a1, rSELF, offThread_retval            # a1<- &retval
+    move   a3, rSELF                              # a3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # a2: methodToCall
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    move   a0, a2
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastMethodTraceEnter
+    JALR(t9)                                      # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    move   rOBJ, a2                               # save a2
+#endif
+
+    JALR(rTEMP)                                   # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    move   a0, rOBJ
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastNativeMethodTraceExit
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+#endif
+
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw     a2, offStackSaveArea_returnAddr(rBIX)     # a2 = chaining cell ret addr
+    lw     a0, offStackSaveArea_localRefCookie(rBIX) # a0<- saved->top
+    lw     a1, offThread_exception(rSELF)            # check for exception
+    sw     rFP, offThread_curFrame(rSELF)            # self->curFrame = fp
+    sw     a0, offThread_jniLocal_topCookie(rSELF)   # new top <- old top
+    lw     a0, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+    # a0 = dalvikCallsitePC
+    bnez   a1, .LhandleException                     # handle exception if any
+
+    sw     a2, offThread_inJitCodeCache(rSELF)       # set the mode properly
+    beqz   a2, 3f
+    jr     a2                                        # go if return chaining cell still exist
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     a1, .LdvmJitToInterpTraceSelectNoChain    # defined in footer.S
+    lw     a1, (a1)
+    add    rPC, a0, 3*2                              # reconstruct new rPC (advance 3 dalvik instr)
+
+#if defined(WITH_JIT_TUNING)
+    li     a0, kCallsiteInterpreted
+#endif
+    jr     a1
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MUL_LONG
+dvmCompiler_TEMPLATE_MUL_LONG:
+/* File: mips/TEMPLATE_MUL_LONG.S */
+    /*
+     * Signed 64-bit integer multiply.
+     *
+     * For JIT: op1 in a0/a1, op2 in a2/a3, return in v0/v1
+     *
+     * Consider WXxYZ (a1a0 x a3a2) with a long multiply:
+     *
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     *
+     */
+    /* mul-long vAA, vBB, vCC */
+    mul     rRESULT1,rARG3,rARG0              #  v1= a3a0
+    multu   rARG2,rARG0
+    mfhi    t1
+    mflo    rRESULT0                          #  v0= a2a0
+    mul     t0,rARG2,rARG1                    #  t0= a2a1
+    addu    rRESULT1,rRESULT1,t1              #  v1= a3a0 + hi(a2a0)
+    addu    rRESULT1,rRESULT1,t0              #  v1= a3a0 + hi(a2a0) + a2a1;
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SHL_LONG
+dvmCompiler_TEMPLATE_SHL_LONG:
+/* File: mips/TEMPLATE_SHL_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* shl-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    sll     rRESULT0, rARG0, a2		#  rlo<- alo << (shift&31)
+    not     rRESULT1, a2		#  rhi<- 31-shift  (shift is 5b)
+    srl     rARG0, 1
+    srl     rARG0, rRESULT1		#  alo<- alo >> (32-(shift&31))
+    sll     rRESULT1, rARG1, a2		#  rhi<- ahi << (shift&31)
+    or      rRESULT1, rARG0		#  rhi<- rhi | alo
+    andi    a2, 0x20			#  shift< shift & 0x20
+    movn    rRESULT1, rRESULT0, a2	#  rhi<- rlo (if shift&0x20)
+    movn    rRESULT0, zero, a2		#  rlo<- 0  (if shift&0x20)
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SHR_LONG
+dvmCompiler_TEMPLATE_SHR_LONG:
+/* File: mips/TEMPLATE_SHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* shr-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    sra     rRESULT1, rARG1, a2		#  rhi<- ahi >> (shift&31)
+    srl     rRESULT0, rARG0, a2		#  rlo<- alo >> (shift&31)
+    sra     a3, rARG1, 31		#  a3<- sign(ah)
+    not     rARG0, a2			#  alo<- 31-shift (shift is 5b)
+    sll     rARG1, 1
+    sll     rARG1, rARG0		#  ahi<- ahi << (32-(shift&31))
+    or      rRESULT0, rARG1		#  rlo<- rlo | ahi
+    andi    a2, 0x20			#  shift & 0x20
+    movn    rRESULT0, rRESULT1, a2	#  rlo<- rhi (if shift&0x20)
+    movn    rRESULT1, a3, a2		#  rhi<- sign(ahi) (if shift&0x20)
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_USHR_LONG
+dvmCompiler_TEMPLATE_USHR_LONG:
+/* File: mips/TEMPLATE_USHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     */
+    /* ushr-long vAA:vBB(rARG1:rARG0), vCC(a2) - result in (rRESULT1:rRESULT0) */
+    srl     rRESULT1, rARG1, a2		#  rhi<- ahi >> (shift&31)
+    srl     rRESULT0, rARG0, a2		#  rlo<- alo >> (shift&31)
+    not     rARG0, a2			#  alo<- 31-n  (shift is 5b)
+    sll     rARG1, 1
+    sll     rARG1, rARG0		#  ahi<- ahi << (32-(shift&31))
+    or      rRESULT0, rARG1		#  rlo<- rlo | ahi
+    andi    a2, 0x20			#  shift & 0x20
+    movn    rRESULT0, rRESULT1, a2	#  rlo<- rhi (if shift&0x20)
+    movn    rRESULT1, zero, a2		#  rhi<- 0 (if shift&0x20)
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_ADD_FLOAT_VFP
+dvmCompiler_TEMPLATE_ADD_FLOAT_VFP:
+/* File: mips/TEMPLATE_ADD_FLOAT_VFP.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation. a0 = a1 op a2.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    LOAD(a1, a2)                        # a1<- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+                               # optional op
+    JAL(__addsf3)                              # v0 = result
+    STORE(v0, rOBJ)                     # vAA <- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    LOAD_F(fa1, a2)                     # fa1<- vCC
+    .if 0
+    # is second operand zero?
+    li.s        ft0, 0
+    c.eq.s      fcc0, ft0, fa1          # condition bit and comparision with 0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    add.s fv0, fa0, fa1                            # fv0 = result
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SUB_FLOAT_VFP
+dvmCompiler_TEMPLATE_SUB_FLOAT_VFP:
+/* File: mips/TEMPLATE_SUB_FLOAT_VFP.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation. a0 = a1 op a2.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    LOAD(a1, a2)                        # a1<- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+                               # optional op
+    JAL(__subsf3)                              # v0 = result
+    STORE(v0, rOBJ)                     # vAA <- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    LOAD_F(fa1, a2)                     # fa1<- vCC
+    .if 0
+    # is second operand zero?
+    li.s        ft0, 0
+    c.eq.s      fcc0, ft0, fa1          # condition bit and comparision with 0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    sub.s fv0, fa0, fa1                            # fv0 = result
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MUL_FLOAT_VFP
+dvmCompiler_TEMPLATE_MUL_FLOAT_VFP:
+/* File: mips/TEMPLATE_MUL_FLOAT_VFP.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation. a0 = a1 op a2.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    LOAD(a1, a2)                        # a1<- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+                               # optional op
+    JAL(__mulsf3)                              # v0 = result
+    STORE(v0, rOBJ)                     # vAA <- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    LOAD_F(fa1, a2)                     # fa1<- vCC
+    .if 0
+    # is second operand zero?
+    li.s        ft0, 0
+    c.eq.s      fcc0, ft0, fa1          # condition bit and comparision with 0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    mul.s fv0, fa0, fa1                            # fv0 = result
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_DIV_FLOAT_VFP
+dvmCompiler_TEMPLATE_DIV_FLOAT_VFP:
+/* File: mips/TEMPLATE_DIV_FLOAT_VFP.S */
+/* File: mips/fbinop.S */
+    /*
+     * Generic 32-bit binary float operation. a0 = a1 op a2.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+    LOAD(a1, a2)                        # a1<- vCC
+    .if 0
+    beqz    a1, common_errDivideByZero  # is second operand zero?
+    .endif
+                               # optional op
+    JAL(__divsf3)                              # v0 = result
+    STORE(v0, rOBJ)                     # vAA <- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+    LOAD_F(fa1, a2)                     # fa1<- vCC
+    .if 0
+    # is second operand zero?
+    li.s        ft0, 0
+    c.eq.s      fcc0, ft0, fa1          # condition bit and comparision with 0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    div.s fv0, fa0, fa1                            # fv0 = result
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP
+dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP:
+/* File: mips/TEMPLATE_ADD_DOUBLE_VFP.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    move t1, a2                         # save a2
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)            # a2/a3<- vCC/vCC+1
+    .if 0
+    or          t0, rARG2, rARG3        # second arg (a2-a3) is zero?
+    beqz        t0, common_errDivideByZero
+    .endif
+                               # optional op
+    JAL(__adddf3)                              # result<- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    LOAD64_F(fa0, fa0f, a1)
+    LOAD64_F(fa1, fa1f, a2)
+    .if 0
+    li.d        ft0, 0
+    c.eq.d      fcc0, fa1, ft0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    add.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP
+dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP:
+/* File: mips/TEMPLATE_SUB_DOUBLE_VFP.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    move t1, a2                         # save a2
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)            # a2/a3<- vCC/vCC+1
+    .if 0
+    or          t0, rARG2, rARG3        # second arg (a2-a3) is zero?
+    beqz        t0, common_errDivideByZero
+    .endif
+                               # optional op
+    JAL(__subdf3)                              # result<- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    LOAD64_F(fa0, fa0f, a1)
+    LOAD64_F(fa1, fa1f, a2)
+    .if 0
+    li.d        ft0, 0
+    c.eq.d      fcc0, fa1, ft0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    sub.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP
+dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP:
+/* File: mips/TEMPLATE_MUL_DOUBLE_VFP.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    move t1, a2                         # save a2
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)            # a2/a3<- vCC/vCC+1
+    .if 0
+    or          t0, rARG2, rARG3        # second arg (a2-a3) is zero?
+    beqz        t0, common_errDivideByZero
+    .endif
+                               # optional op
+    JAL(__muldf3)                              # result<- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    LOAD64_F(fa0, fa0f, a1)
+    LOAD64_F(fa1, fa1f, a2)
+    .if 0
+    li.d        ft0, 0
+    c.eq.d      fcc0, fa1, ft0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    mul.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP
+dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP:
+/* File: mips/TEMPLATE_DIV_DOUBLE_VFP.S */
+/* File: mips/fbinopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = op1 address
+     *     a2 = op2 address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    move t1, a2                         # save a2
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)            # a2/a3<- vCC/vCC+1
+    .if 0
+    or          t0, rARG2, rARG3        # second arg (a2-a3) is zero?
+    beqz        t0, common_errDivideByZero
+    .endif
+                               # optional op
+    JAL(__divdf3)                              # result<- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    LOAD64_F(fa0, fa0f, a1)
+    LOAD64_F(fa1, fa1f, a2)
+    .if 0
+    li.d        ft0, 0
+    c.eq.d      fcc0, fa1, ft0
+    bc1t        fcc0, common_errDivideByZero
+    .endif
+                               # optional op
+    div.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP
+dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP:
+/* File: mips/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S */
+/* File: mips/funopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     */
+    move rINST, a0                      # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vB/vB+1
+                               # optional op
+    JAL(__truncdfsf2)                              # v0<- op, a0-a3 changed
+.LTEMPLATE_DOUBLE_TO_FLOAT_VFP_set_vreg:
+    STORE(v0, rINST)                    # vA<- v0
+#else
+    LOAD64_F(fa0, fa0f, a1)
+                               # optional op
+    cvt.s.d  fv0,fa0                            # fv0 = result
+.LTEMPLATE_DOUBLE_TO_FLOAT_VFP_set_vreg_f:
+    STORE_F(fv0, rINST)                 # vA<- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP
+dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP:
+/* File: mips/TEMPLATE_DOUBLE_TO_INT_VFP.S */
+/* File: mips/funopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     */
+    move rINST, a0                      # save a0
+#ifdef  SOFT_FLOAT
+    move t0, a1                         # save a1
+    LOAD64(rARG0, rARG1, t0)            # a0/a1<- vB/vB+1
+                               # optional op
+    b    d2i_doconv                              # v0<- op, a0-a3 changed
+.LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg:
+    STORE(v0, rINST)                    # vA<- v0
+#else
+    LOAD64_F(fa0, fa0f, a1)
+                               # optional op
+    b    d2i_doconv                            # fv0 = result
+.LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg_f:
+    STORE_F(fv0, rINST)                 # vA<- fv0
+#endif
+    RETURN
+
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ * Use rBIX / rOBJ as global to hold arguments (they are not bound to a global var)
+ */
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+    la          t0, .LDOUBLE_TO_INT_max
+    LOAD64(rARG2, rARG3, t0)
+    move        rBIX, rARG0                       # save a0
+    move        rOBJ, rARG1                       #  and a1
+    JAL(__gedf2)                               # is arg >= maxint?
+
+    move        t0, v0
+    li          v0, ~0x80000000                # return maxint (7fffffff)
+    bgez        t0, .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg       # nonzero == yes
+
+    move        rARG0, rBIX                       # recover arg
+    move        rARG1, rOBJ
+    la          t0, .LDOUBLE_TO_INT_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)                               # is arg <= minint?
+
+    move        t0, v0
+    li          v0, 0x80000000                 # return minint (80000000)
+    blez        t0, .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg       # nonzero == yes
+
+    move        rARG0, rBIX                  # recover arg
+    move        rARG1, rOBJ
+    move        rARG2, rBIX                  # compare against self
+    move        rARG3, rOBJ
+    JAL(__nedf2)                        # is arg == self?
+
+    move        t0, v0                  # zero == no
+    li          v0, 0
+    bnez        t0, .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg        # return zero for NaN
+
+    move        rARG0, rBIX                  # recover arg
+    move        rARG1, rOBJ
+    JAL(__fixdfsi)                      # convert double to int
+    b           .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg
+#else
+    la          t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d     fcc0, fa1, fa0
+    l.s         fv0, .LDOUBLE_TO_INT_maxret
+    bc1t        .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg_f
+
+    la          t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d     fcc0, fa0, fa1
+    l.s         fv0, .LDOUBLE_TO_INT_minret
+    bc1t        .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg_f
+
+    mov.d       fa1, fa0
+    c.un.d      fcc0, fa0, fa1
+    li.s        fv0, 0
+    bc1t        .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg_f
+
+    trunc.w.d   fv0, fa0
+    b           .LTEMPLATE_DOUBLE_TO_INT_VFP_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+    .dword   0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword   0xc1e0000000000000                  # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word   0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word   0x80000000
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP
+dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP:
+/* File: mips/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S */
+/* File: mips/funopWider.S */
+    /*
+     * Generic 32bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "d0 = op s0".
+     *
+     * For: int-to-double, float-to-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     */
+    /* unop vA, vB */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vB
+                               # optional op
+    JAL(__extendsfdf2)                              # result<- op, a0-a3 changed
+
+.LTEMPLATE_FLOAT_TO_DOUBLE_VFP_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)   # vA/vA+1<- v0/v1
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vB
+                               # optional op
+    cvt.d.s fv0, fa0
+
+.LTEMPLATE_FLOAT_TO_DOUBLE_VFP_set_vreg:
+    STORE64_F(fv0, fv0f, rOBJ)                          # vA/vA+1<- fv0/fv0f
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP
+dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP:
+/* File: mips/TEMPLATE_FLOAT_TO_INT_VFP.S */
+/* File: mips/funop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+                               # optional op
+    b    f2i_doconv                              # v0<- op, a0-a3 changed
+.LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg:
+    STORE(v0, rOBJ)                     # vAA<- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+                               # optional op
+    b        f2i_doconv                            # fv0 = result
+.LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg_f:
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+        li      a1, 0x4f000000  # (float)maxint
+        move    rBIX, a0
+        JAL(__gesf2)            # is arg >= maxint?
+        move    t0, v0
+        li      v0, ~0x80000000 # return maxint (7fffffff)
+        bgez    t0, .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg
+
+        move    a0, rBIX                # recover arg
+        li      a1, 0xcf000000  # (float)minint
+        JAL(__lesf2)
+
+        move    t0, v0
+        li      v0, 0x80000000  # return minint (80000000)
+        blez    t0, .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg
+        move    a0, rBIX
+        move    a1, rBIX
+        JAL(__nesf2)
+
+        move    t0, v0
+        li      v0, 0           # return zero for NaN
+        bnez    t0, .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg
+
+        move    a0, rBIX
+        JAL(__fixsfsi)
+        b .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg
+#else
+        l.s             fa1, .LFLOAT_TO_INT_max
+        c.ole.s         fcc0, fa1, fa0
+        l.s             fv0, .LFLOAT_TO_INT_ret_max
+        bc1t            .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg_f
+
+        l.s             fa1, .LFLOAT_TO_INT_min
+        c.ole.s         fcc0, fa0, fa1
+        l.s             fv0, .LFLOAT_TO_INT_ret_min
+        bc1t            .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg_f
+
+        mov.s           fa1, fa0
+        c.un.s          fcc0, fa0, fa1
+        li.s            fv0, 0
+        bc1t            .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg_f
+
+        trunc.w.s       fv0, fa0
+        b .LTEMPLATE_FLOAT_TO_INT_VFP_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+        .word   0x4f000000
+.LFLOAT_TO_INT_min:
+        .word   0xcf000000
+.LFLOAT_TO_INT_ret_max:
+        .word   0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+        .word   0x80000000
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP
+dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP:
+/* File: mips/TEMPLATE_INT_TO_DOUBLE_VFP.S */
+/* File: mips/funopWider.S */
+    /*
+     * Generic 32bit-to-64bit floating point unary operation.  Provide an
+     * "instr" line that specifies an instruction that performs "d0 = op s0".
+     *
+     * For: int-to-double, float-to-double
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     */
+    /* unop vA, vB */
+    move rOBJ, a0                       # save a0
+#ifdef  SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vB
+                               # optional op
+    JAL(__floatsidf)                              # result<- op, a0-a3 changed
+
+.LTEMPLATE_INT_TO_DOUBLE_VFP_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)   # vA/vA+1<- v0/v1
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vB
+                               # optional op
+    cvt.d.w    fv0, fa0
+
+.LTEMPLATE_INT_TO_DOUBLE_VFP_set_vreg:
+    STORE64_F(fv0, fv0f, rOBJ)                          # vA/vA+1<- fv0/fv0f
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP
+dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP:
+/* File: mips/TEMPLATE_INT_TO_FLOAT_VFP.S */
+/* File: mips/funop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     *
+     * On entry:
+     *     a0 = target dalvik register address
+     *     a1 = src dalvik register address
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     *
+     */
+    move rOBJ, a0                       # save a0
+#ifdef SOFT_FLOAT
+    LOAD(a0, a1)                        # a0<- vBB
+                               # optional op
+    JAL(__floatsisf)                              # v0<- op, a0-a3 changed
+.LTEMPLATE_INT_TO_FLOAT_VFP_set_vreg:
+    STORE(v0, rOBJ)                     # vAA<- v0
+#else
+    LOAD_F(fa0, a1)                     # fa0<- vBB
+                               # optional op
+    cvt.s.w fv0, fa0                            # fv0 = result
+.LTEMPLATE_INT_TO_FLOAT_VFP_set_vreg_f:
+    STORE_F(fv0, rOBJ)                  # vAA <- fv0
+#endif
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP
+dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP:
+/* File: mips/TEMPLATE_CMPG_DOUBLE_VFP.S */
+/* File: mips/TEMPLATE_CMPL_DOUBLE_VFP.S */
+    /*
+     * Compare two double precision floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    move rOBJ, a0                       # save a0
+    move rBIX, a1                       # save a1
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__eqdf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__ltdf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__gtdf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+#else
+    LOAD64_F(fs0, fs0f, a0)             # fs0<- vBB
+    LOAD64_F(fs1, fs1f, a1)             # fs1<- vCC
+    c.olt.d     fcc0, fs0, fs1          # Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+    c.olt.d     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+    c.eq.d      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, TEMPLATE_CMPG_DOUBLE_VFP_finish
+#endif
+
+    li            rTEMP, 1
+
+TEMPLATE_CMPG_DOUBLE_VFP_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP
+dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP:
+/* File: mips/TEMPLATE_CMPL_DOUBLE_VFP.S */
+    /*
+     * Compare two double precision floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    move rOBJ, a0                       # save a0
+    move rBIX, a1                       # save a1
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__eqdf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__ltdf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
+    JAL(__gtdf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+#else
+    LOAD64_F(fs0, fs0f, a0)             # fs0<- vBB
+    LOAD64_F(fs1, fs1f, a1)             # fs1<- vCC
+    c.olt.d     fcc0, fs0, fs1          # Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+    c.olt.d     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+    c.eq.d      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, TEMPLATE_CMPL_DOUBLE_VFP_finish
+#endif
+
+    li     rTEMP, -1
+
+TEMPLATE_CMPL_DOUBLE_VFP_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP
+dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP:
+/* File: mips/TEMPLATE_CMPG_FLOAT_VFP.S */
+/* File: mips/TEMPLATE_CMPL_FLOAT_VFP.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    LOAD(rOBJ, a0)                      # rOBJ<- vBB
+    LOAD(rBIX, a1)                      # rBIX<- vCC
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__eqsf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, TEMPLATE_CMPG_FLOAT_VFP_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__ltsf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, TEMPLATE_CMPG_FLOAT_VFP_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__gtsf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, TEMPLATE_CMPG_FLOAT_VFP_finish
+#else
+    LOAD_F(fs0, a0)                     # fs0<- vBB
+    LOAD_F(fs1, a1)                     # fs1<- vCC
+    c.olt.s     fcc0, fs0, fs1          #Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, TEMPLATE_CMPG_FLOAT_VFP_finish
+    c.olt.s     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, TEMPLATE_CMPG_FLOAT_VFP_finish
+    c.eq.s      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, TEMPLATE_CMPG_FLOAT_VFP_finish
+#endif
+
+    li     rTEMP, 1
+
+TEMPLATE_CMPG_FLOAT_VFP_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
+
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP
+dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP:
+/* File: mips/TEMPLATE_CMPL_FLOAT_VFP.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * On entry:
+     *    a0 = &op1 [vBB]
+     *    a1 = &op2 [vCC]
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+#ifdef  SOFT_FLOAT
+    LOAD(rOBJ, a0)                      # rOBJ<- vBB
+    LOAD(rBIX, a1)                      # rBIX<- vCC
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__eqsf2)                        # v0<- (vBB == vCC)
+    li       rTEMP, 0                   # vAA<- 0
+    beqz     v0, TEMPLATE_CMPL_FLOAT_VFP_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__ltsf2)                        # a0<- (vBB < vCC)
+    li       rTEMP, -1                  # vAA<- -1
+    bltz     v0, TEMPLATE_CMPL_FLOAT_VFP_finish
+    move     a0, rOBJ                   # a0<- vBB
+    move     a1, rBIX                   # a1<- vCC
+    JAL(__gtsf2)                        # v0<- (vBB > vCC)
+    li      rTEMP, 1                    # vAA<- 1
+    bgtz    v0, TEMPLATE_CMPL_FLOAT_VFP_finish
+#else
+    LOAD_F(fs0, a0)                     # fs0<- vBB
+    LOAD_F(fs1, a1)                     # fs1<- vCC
+    c.olt.s     fcc0, fs0, fs1          #Is fs0 < fs1
+    li          rTEMP, -1
+    bc1t        fcc0, TEMPLATE_CMPL_FLOAT_VFP_finish
+    c.olt.s     fcc0, fs1, fs0
+    li          rTEMP, 1
+    bc1t        fcc0, TEMPLATE_CMPL_FLOAT_VFP_finish
+    c.eq.s      fcc0, fs0, fs1
+    li          rTEMP, 0
+    bc1t        fcc0, TEMPLATE_CMPL_FLOAT_VFP_finish
+#endif
+
+    li     rTEMP, -1
+
+TEMPLATE_CMPL_FLOAT_VFP_finish:
+    move     v0, rTEMP                  # v0<- vAA
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP
+dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP:
+/* File: mips/TEMPLATE_SQRT_DOUBLE_VFP.S */
+
+    /*
+     * 64-bit floating point sqrt operation.
+     * If the result is a NaN, bail out to library code to do
+     * the right thing.
+     *
+     * On entry:
+     *     a2 src addr of op1
+     * On exit:
+     *     v0,v1/fv0 = res
+     */
+#ifdef  SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)        # a0/a1<- vBB/vBB+1
+#else
+    LOAD64_F(fa0, fa0f, a2)         # fa0/fa0f<- vBB/vBB+1
+    sqrt.d	fv0, fa0
+    c.eq.d	fv0, fv0
+    bc1t	1f
+#endif
+    JAL(sqrt)
+1:
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
+dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
+/* File: mips/TEMPLATE_THROW_EXCEPTION_COMMON.S */
+    /*
+     * Throw an exception from JIT'ed code.
+     * On entry:
+     *    a0    Dalvik PC that raises the exception
+     */
+    j      .LhandleException
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MEM_OP_DECODE
+dvmCompiler_TEMPLATE_MEM_OP_DECODE:
+/* File: mips/TEMPLATE_MEM_OP_DECODE.S */
+#if defined(WITH_SELF_VERIFICATION)
+    /*
+     * This handler encapsulates heap memory ops for selfVerification mode.
+     *
+     * The call to the handler is inserted prior to a heap memory operation.
+     * This handler then calls a function to decode the memory op, and process
+     * it accordingly. Afterwards, the handler changes the return address to
+     * skip the memory op so it never gets executed.
+     */
+#ifdef HARD_FLOAT
+    /* push f0-f31 onto stack */
+    sw      f0, fr0*-4(sp)              # push f0
+    sw      f1, fr1*-4(sp)              # push f1
+    sw      f2, fr2*-4(sp)              # push f2
+    sw      f3, fr3*-4(sp)              # push f3
+    sw      f4, fr4*-4(sp)              # push f4
+    sw      f5, fr5*-4(sp)              # push f5
+    sw      f6, fr6*-4(sp)              # push f6
+    sw      f7, fr7*-4(sp)              # push f7
+    sw      f8, fr8*-4(sp)              # push f8
+    sw      f9, fr9*-4(sp)              # push f9
+    sw      f10, fr10*-4(sp)            # push f10
+    sw      f11, fr11*-4(sp)            # push f11
+    sw      f12, fr12*-4(sp)            # push f12
+    sw      f13, fr13*-4(sp)            # push f13
+    sw      f14, fr14*-4(sp)            # push f14
+    sw      f15, fr15*-4(sp)            # push f15
+    sw      f16, fr16*-4(sp)            # push f16
+    sw      f17, fr17*-4(sp)            # push f17
+    sw      f18, fr18*-4(sp)            # push f18
+    sw      f19, fr19*-4(sp)            # push f19
+    sw      f20, fr20*-4(sp)            # push f20
+    sw      f21, fr21*-4(sp)            # push f21
+    sw      f22, fr22*-4(sp)            # push f22
+    sw      f23, fr23*-4(sp)            # push f23
+    sw      f24, fr24*-4(sp)            # push f24
+    sw      f25, fr25*-4(sp)            # push f25
+    sw      f26, fr26*-4(sp)            # push f26
+    sw      f27, fr27*-4(sp)            # push f27
+    sw      f28, fr28*-4(sp)            # push f28
+    sw      f29, fr29*-4(sp)            # push f29
+    sw      f30, fr30*-4(sp)            # push f30
+    sw      f31, fr31*-4(sp)            # push f31
+
+    sub     sp, (32-0)*4                # adjust stack pointer
+#endif
+
+    /* push gp registers (except zero, gp, sp, and fp) */
+    .set noat
+    sw      AT, r_AT*-4(sp)             # push at
+    .set at
+    sw      v0, r_V0*-4(sp)             # push v0
+    sw      v1, r_V1*-4(sp)             # push v1
+    sw      a0, r_A0*-4(sp)             # push a0
+    sw      a1, r_A1*-4(sp)             # push a1
+    sw      a2, r_A2*-4(sp)             # push a2
+    sw      a3, r_A3*-4(sp)             # push a3
+    sw      t0, r_T0*-4(sp)             # push t0
+    sw      t1, r_T1*-4(sp)             # push t1
+    sw      t2, r_T2*-4(sp)             # push t2
+    sw      t3, r_T3*-4(sp)             # push t3
+    sw      t4, r_T4*-4(sp)             # push t4
+    sw      t5, r_T5*-4(sp)             # push t5
+    sw      t6, r_T6*-4(sp)             # push t6
+    sw      t7, r_T7*-4(sp)             # push t7
+    sw      s0, r_S0*-4(sp)             # push s0
+    sw      s1, r_S1*-4(sp)             # push s1
+    sw      s2, r_S2*-4(sp)             # push s2
+    sw      s3, r_S3*-4(sp)             # push s3
+    sw      s4, r_S4*-4(sp)             # push s4
+    sw      s5, r_S5*-4(sp)             # push s5
+    sw      s6, r_S6*-4(sp)             # push s6
+    sw      s7, r_S7*-4(sp)             # push s7
+    sw      t8, r_T8*-4(sp)             # push t8
+    sw      t9, r_T9*-4(sp)             # push t9
+    sw      k0, r_K0*-4(sp)             # push k0
+    sw      k1, r_K1*-4(sp)             # push k1
+    sw      ra, r_RA*-4(sp)             # push RA
+
+    # Note: even if we don't save all 32 registers, we still need to
+    #       adjust SP by 32 registers due to the way we are storing
+    #       the registers on the stack.
+    sub     sp, (32-0)*4                # adjust stack pointer
+
+    la     a2, .LdvmSelfVerificationMemOpDecode  # defined in footer.S
+    lw     a2, (a2)
+    move   a0, ra                       # a0<- link register
+    move   a1, sp                       # a1<- stack pointer
+    JALR(a2)
+
+    /* pop gp registers (except zero, gp, sp, and fp) */
+    # Note: even if we don't save all 32 registers, we still need to
+    #       adjust SP by 32 registers due to the way we are storing
+    #       the registers on the stack.
+    add     sp, (32-0)*4                # adjust stack pointer
+    .set noat
+    lw      AT, r_AT*-4(sp)             # pop at
+    .set at
+    lw      v0, r_V0*-4(sp)             # pop v0
+    lw      v1, r_V1*-4(sp)             # pop v1
+    lw      a0, r_A0*-4(sp)             # pop a0
+    lw      a1, r_A1*-4(sp)             # pop a1
+    lw      a2, r_A2*-4(sp)             # pop a2
+    lw      a3, r_A3*-4(sp)             # pop a3
+    lw      t0, r_T0*-4(sp)             # pop t0
+    lw      t1, r_T1*-4(sp)             # pop t1
+    lw      t2, r_T2*-4(sp)             # pop t2
+    lw      t3, r_T3*-4(sp)             # pop t3
+    lw      t4, r_T4*-4(sp)             # pop t4
+    lw      t5, r_T5*-4(sp)             # pop t5
+    lw      t6, r_T6*-4(sp)             # pop t6
+    lw      t7, r_T7*-4(sp)             # pop t7
+    lw      s0, r_S0*-4(sp)             # pop s0
+    lw      s1, r_S1*-4(sp)             # pop s1
+    lw      s2, r_S2*-4(sp)             # pop s2
+    lw      s3, r_S3*-4(sp)             # pop s3
+    lw      s4, r_S4*-4(sp)             # pop s4
+    lw      s5, r_S5*-4(sp)             # pop s5
+    lw      s6, r_S6*-4(sp)             # pop s6
+    lw      s7, r_S7*-4(sp)             # pop s7
+    lw      t8, r_T8*-4(sp)             # pop t8
+    lw      t9, r_T9*-4(sp)             # pop t9
+    lw      k0, r_K0*-4(sp)             # pop k0
+    lw      k1, r_K1*-4(sp)             # pop k1
+    lw      ra, r_RA*-4(sp)             # pop RA
+
+#ifdef HARD_FLOAT
+    /* pop f0-f31 from stack */
+    add     sp, (32-0)*4                # adjust stack pointer
+    lw      f0, fr0*-4(sp)              # pop f0
+    lw      f1, fr1*-4(sp)              # pop f1
+    lw      f2, fr2*-4(sp)              # pop f2
+    lw      f3, fr3*-4(sp)              # pop f3
+    lw      f4, fr4*-4(sp)              # pop f4
+    lw      f5, fr5*-4(sp)              # pop f5
+    lw      f6, fr6*-4(sp)              # pop f6
+    lw      f7, fr7*-4(sp)              # pop f7
+    lw      f8, fr8*-4(sp)              # pop f8
+    lw      f9, fr9*-4(sp)              # pop f9
+    lw      f10, fr10*-4(sp)            # pop f10
+    lw      f11, fr11*-4(sp)            # pop f11
+    lw      f12, fr12*-4(sp)            # pop f12
+    lw      f13, fr13*-4(sp)            # pop f13
+    lw      f14, fr14*-4(sp)            # pop f14
+    lw      f15, fr15*-4(sp)            # pop f15
+    lw      f16, fr16*-4(sp)            # pop f16
+    lw      f17, fr17*-4(sp)            # pop f17
+    lw      f18, fr18*-4(sp)            # pop f18
+    lw      f19, fr19*-4(sp)            # pop f19
+    lw      f20, fr20*-4(sp)            # pop f20
+    lw      f21, fr21*-4(sp)            # pop f21
+    lw      f22, fr22*-4(sp)            # pop f22
+    lw      f23, fr23*-4(sp)            # pop f23
+    lw      f24, fr24*-4(sp)            # pop f24
+    lw      f25, fr25*-4(sp)            # pop f25
+    lw      f26, fr26*-4(sp)            # pop f26
+    lw      f27, fr27*-4(sp)            # pop f27
+    lw      f28, fr28*-4(sp)            # pop f28
+    lw      f29, fr29*-4(sp)            # pop f29
+    lw      f30, fr30*-4(sp)            # pop f30
+    lw      f31, fr31*-4(sp)            # pop f31
+#endif
+
+    RETURN
+#endif
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_STRING_COMPARETO
+dvmCompiler_TEMPLATE_STRING_COMPARETO:
+/* File: mips/TEMPLATE_STRING_COMPARETO.S */
+    /*
+     * String's compareTo.
+     *
+     * Requires a0/a1 to have been previously checked for null.  Will
+     * return negative if this's string is < comp, 0 if they are the
+     * same and positive if >.
+     *
+     * IMPORTANT NOTE:
+     *
+     * This code relies on hard-coded offsets for string objects, and must be
+     * kept in sync with definitions in UtfString.h.  See asm-constants.h
+     *
+     * On entry:
+     *    a0:   this object pointer
+     *    a1:   comp object pointer
+     *
+     */
+
+     subu  v0, a0, a1                # Same?
+     bnez  v0, 1f
+     RETURN
+1:
+     lw    t0, STRING_FIELDOFF_OFFSET(a0)
+     lw    t1, STRING_FIELDOFF_OFFSET(a1)
+     lw    t2, STRING_FIELDOFF_COUNT(a0)
+     lw    a2, STRING_FIELDOFF_COUNT(a1)
+     lw    a0, STRING_FIELDOFF_VALUE(a0)
+     lw    a1, STRING_FIELDOFF_VALUE(a1)
+
+    /*
+     * At this point, we have this/comp:
+     *    offset: t0/t1
+     *    count:  t2/a2
+     *    value:  a0/a1
+     * We're going to compute
+     *    a3 <- countDiff
+     *    a2 <- minCount
+     */
+     subu  a3, t2, a2                # a3<- countDiff
+     sleu  t7, t2, a2
+     movn  a2, t2, t7                # a2<- minCount
+
+     /*
+      * Note: data pointers point to first element.
+      */
+     addu  a0, 16                    # point to contents[0]
+     addu  a1, 16                    # point to contents[0]
+
+     /* Now, build pointers to the string data */
+     sll   t7, t0, 1                 # multiply offset by 2
+     addu  a0, a0, t7
+     sll   t7, t1, 1                 # multiply offset by 2
+     addu  a1, a1, t7
+
+     /*
+      * At this point we have:
+      *   a0: *this string data
+      *   a1: *comp string data
+      *   a2: iteration count for comparison
+      *   a3: value to return if the first part of the string is equal
+      *   v0: reserved for result
+      *   t0-t5 available for loading string data
+      */
+
+     subu  a2, 2
+     bltz  a2, do_remainder2
+
+     /*
+      * Unroll the first two checks so we can quickly catch early mismatch
+      * on long strings (but preserve incoming alignment)
+      */
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     beqz  v0, 1f
+     RETURN
+1:
+     lhu   t2, 2(a0)
+     lhu   t3, 2(a1)
+     subu  v0, t2, t3
+     beqz  v0, 2f
+     RETURN
+2:
+     addu  a0, 4                     # offset to contents[2]
+     addu  a1, 4                     # offset to contents[2]
+     li    t7, 28
+     bgt   a2, t7, do_memcmp16
+     subu  a2, 3
+     bltz  a2, do_remainder
+
+loopback_triple:
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     beqz  v0, 1f
+     RETURN
+1:
+     lhu   t2, 2(a0)
+     lhu   t3, 2(a1)
+     subu  v0, t2, t3
+     beqz  v0, 2f
+     RETURN
+2:
+     lhu   t4, 4(a0)
+     lhu   t5, 4(a1)
+     subu  v0, t4, t5
+     beqz  v0, 3f
+     RETURN
+3:
+     addu  a0, 6                     # offset to contents[i+3]
+     addu  a1, 6                     # offset to contents[i+3]
+     subu  a2, 3
+     bgez  a2, loopback_triple
+
+do_remainder:
+     addu  a2, 3
+     beqz  a2, returnDiff
+
+loopback_single:
+     lhu   t0, 0(a0)
+     lhu   t1, 0(a1)
+     subu  v0, t0, t1
+     bnez  v0, 1f
+     addu  a0, 2                     # offset to contents[i+1]
+     addu  a1, 2                     # offset to contents[i+1]
+     subu  a2, 1
+     bnez  a2, loopback_single
+
+returnDiff:
+     move  v0, a3
+1:
+     RETURN
+
+do_remainder2:
+     addu  a2, 2
+     bnez  a2, loopback_single
+     move  v0, a3
+     RETURN
+
+    /* Long string case */
+do_memcmp16:
+     move  rOBJ, a3                  # save return value if strings are equal
+     JAL(__memcmp16)
+     seq   t0, v0, zero
+     movn  v0, rOBJ, t0              # overwrite return value if strings are equal
+     RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_STRING_INDEXOF
+dvmCompiler_TEMPLATE_STRING_INDEXOF:
+/* File: mips/TEMPLATE_STRING_INDEXOF.S */
+    /*
+     * String's indexOf.
+     *
+     * Requires a0 to have been previously checked for null.  Will
+     * return index of match of a1 in v0.
+     *
+     * IMPORTANT NOTE:
+     *
+     * This code relies on hard-coded offsets for string objects, and must be
+     * kept in sync wth definitions in UtfString.h  See asm-constants.h
+     *
+     * On entry:
+     *    a0:   string object pointer
+     *    a1:   char to match
+     *    a2:   Starting offset in string data
+     */
+
+     lw    t0, STRING_FIELDOFF_OFFSET(a0)
+     lw    t1, STRING_FIELDOFF_COUNT(a0)
+     lw    v0, STRING_FIELDOFF_VALUE(a0)
+
+    /*
+     * At this point, we have:
+     *    v0: object pointer
+     *    a1: char to match
+     *    a2: starting offset
+     *    t0: offset
+     *    t1: string length
+     */
+
+    /* Point to first element */
+     addu  v0, 16                    # point to contents[0]
+
+    /* Build pointer to start of string data */
+     sll   t7, t0, 1                 # multiply offset by 2
+     addu  v0, v0, t7
+
+    /* Save a copy of starting data in v1 */
+     move  v1, v0
+
+    /* Clamp start to [0..count] */
+     slt   t7, a2, zero
+     movn  a2, zero, t7
+     sgt   t7, a2, t1
+     movn  a2, t1, t7
+
+    /* Build pointer to start of data to compare */
+     sll   t7, a2, 1                # multiply offset by 2
+     addu  v0, v0, t7
+
+    /* Compute iteration count */
+     subu  a3, t1, a2
+
+    /*
+     * At this point we have:
+     *   v0: start of data to test
+     *   a1: char to compare
+     *   a3: iteration count
+     *   v1: original start of string
+     *   t0-t7 available for loading string data
+     */
+     subu  a3, 4
+     bltz  a3, indexof_remainder
+
+indexof_loop4:
+     lhu   t0, 0(v0)
+     beq   t0, a1, match_0
+     lhu   t0, 2(v0)
+     beq   t0, a1, match_1
+     lhu   t0, 4(v0)
+     beq   t0, a1, match_2
+     lhu   t0, 6(v0)
+     beq   t0, a1, match_3
+     addu  v0, 8                     # offset to contents[i+4]
+     subu  a3, 4
+     bgez  a3, indexof_loop4
+
+indexof_remainder:
+     addu  a3, 4
+     beqz  a3, indexof_nomatch
+
+indexof_loop1:
+     lhu   t0, 0(v0)
+     beq   t0, a1, match_0
+     addu  v0, 2                     # offset to contents[i+1]
+     subu  a3, 1
+     bnez  a3, indexof_loop1
+
+indexof_nomatch:
+     li    v0, -1
+     RETURN
+
+match_0:
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_1:
+     addu  v0, 2
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_2:
+     addu  v0, 4
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+match_3:
+     addu  v0, 6
+     subu  v0, v1
+     sra   v0, v0, 1                 # divide by 2
+     RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INTERPRET
+dvmCompiler_TEMPLATE_INTERPRET:
+/* File: mips/TEMPLATE_INTERPRET.S */
+    /*
+     * This handler transfers control to the interpeter without performing
+     * any lookups.  It may be called either as part of a normal chaining
+     * operation, or from the transition code in header.S.  We distinquish
+     * the two cases by looking at the link register.  If called from a
+     * translation chain, it will point to the chaining Dalvik PC.
+     * On entry:
+     *    ra - if NULL:
+     *        a1 - the Dalvik PC to begin interpretation.
+     *    else
+     *        [ra] contains Dalvik PC to begin interpretation
+     *    rSELF - pointer to thread
+     *    rFP - Dalvik frame pointer
+     */
+    la      t0, dvmJitToInterpPunt
+    move    a0, a1
+    beq     ra, zero, 1f
+    lw      a0, 0(ra)
+1:
+    jr      t0
+    # doesn't return
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MONITOR_ENTER
+dvmCompiler_TEMPLATE_MONITOR_ENTER:
+/* File: mips/TEMPLATE_MONITOR_ENTER.S */
+    /*
+     * Call out to the runtime to lock an object.  Because this thread
+     * may have been suspended in THREAD_MONITOR state and the Jit's
+     * translation cache subsequently cleared, we cannot return directly.
+     * Instead, unconditionally transition to the interpreter to resume.
+     *
+     * On entry:
+     *    a0 - self pointer
+     *    a1 - the object (which has already been null-checked by the caller
+     *    rPC - the Dalvik PC of the following instruction.
+     */
+    la     a2, .LdvmLockObject
+    lw     t9, (a2)
+    sw     zero, offThread_inJitCodeCache(a0)   # record that we're not returning
+    JALR(t9)                                    # dvmLockObject(self, obj)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    la     a2, .LdvmJitToInterpNoChain
+    lw     a2, (a2)
+
+    # Bail to interpreter - no chain [note - rPC still contains dPC]
+#if defined(WITH_JIT_TUNING)
+    li      a0, kHeavyweightMonitor
+#endif
+    jr      a2
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG
+dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG:
+/* File: mips/TEMPLATE_MONITOR_ENTER_DEBUG.S */
+    /*
+     * To support deadlock prediction, this version of MONITOR_ENTER
+     * will always call the heavyweight dvmLockObject, check for an
+     * exception and then bail out to the interpreter.
+     *
+     * On entry:
+     *    a0 - self pointer
+     *    a1 - the object (which has already been null-checked by the caller
+     *    rPC - the Dalvik PC of the following instruction.
+     *
+     */
+    la     a2, .LdvmLockObject
+    lw     t9, (a2)
+    sw     zero, offThread_inJitCodeCache(a0)   # record that we're not returning
+    JALR(t9)                                    # dvmLockObject(self, obj)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # test for exception
+    lw     a1, offThread_exception(rSELF)
+    beqz   a1, 1f
+    sub    a0, rPC, 2                           # roll dPC back to this monitor instruction
+    j      .LhandleException
+1:
+    # Bail to interpreter - no chain [note - rPC still contains dPC]
+#if defined(WITH_JIT_TUNING)
+    li     a0, kHeavyweightMonitor
+#endif
+    la     a2, .LdvmJitToInterpNoChain
+    lw     a2, (a2)
+    jr     a2
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RESTORE_STATE
+dvmCompiler_TEMPLATE_RESTORE_STATE:
+/* File: mips/TEMPLATE_RESTORE_STATE.S */
+    /*
+     * This handler restores state following a selfVerification memory access.
+     * On entry:
+     *    a0 - offset from rSELF to the 1st element of the coreRegs save array.
+     * Note: the following registers are not restored
+     *       zero, AT, gp, sp, fp, ra
+     */
+
+    add     a0, a0, rSELF               # pointer to heapArgSpace.coreRegs[0]
+#if 0
+    lw      zero, r_ZERO*4(a0)          # restore zero
+#endif
+    .set noat
+    lw      AT, r_AT*4(a0)              # restore at
+    .set at
+    lw      v0, r_V0*4(a0)              # restore v0
+    lw      v1, r_V1*4(a0)              # restore v1
+
+    lw      a1, r_A1*4(a0)              # restore a1
+    lw      a2, r_A2*4(a0)              # restore a2
+    lw      a3, r_A3*4(a0)              # restore a3
+
+    lw      t0, r_T0*4(a0)              # restore t0
+    lw      t1, r_T1*4(a0)              # restore t1
+    lw      t2, r_T2*4(a0)              # restore t2
+    lw      t3, r_T3*4(a0)              # restore t3
+    lw      t4, r_T4*4(a0)              # restore t4
+    lw      t5, r_T5*4(a0)              # restore t5
+    lw      t6, r_T6*4(a0)              # restore t6
+    lw      t7, r_T7*4(a0)              # restore t7
+
+    lw      s0, r_S0*4(a0)              # restore s0
+    lw      s1, r_S1*4(a0)              # restore s1
+    lw      s2, r_S2*4(a0)              # restore s2
+    lw      s3, r_S3*4(a0)              # restore s3
+    lw      s4, r_S4*4(a0)              # restore s4
+    lw      s5, r_S5*4(a0)              # restore s5
+    lw      s6, r_S6*4(a0)              # restore s6
+    lw      s7, r_S7*4(a0)              # restore s7
+
+    lw      t8, r_T8*4(a0)              # restore t8
+    lw      t9, r_T9*4(a0)              # restore t9
+
+    lw      k0, r_K0*4(a0)              # restore k0
+    lw      k1, r_K1*4(a0)              # restore k1
+
+#if 0
+    lw      gp, r_GP*4(a0)              # restore gp
+    lw      sp, r_SP*4(a0)              # restore sp
+    lw      fp, r_FP*4(a0)              # restore fp
+    lw      ra, r_RA*4(a0)              # restore ra
+#endif
+
+/* #ifdef HARD_FLOAT */
+#if 0
+    lw      f0, fr0*4(a0)               # restore f0
+    lw      f1, fr1*4(a0)               # restore f1
+    lw      f2, fr2*4(a0)               # restore f2
+    lw      f3, fr3*4(a0)               # restore f3
+    lw      f4, fr4*4(a0)               # restore f4
+    lw      f5, fr5*4(a0)               # restore f5
+    lw      f6, fr6*4(a0)               # restore f6
+    lw      f7, fr7*4(a0)               # restore f7
+    lw      f8, fr8*4(a0)               # restore f8
+    lw      f9, fr9*4(a0)               # restore f9
+    lw      f10, fr10*4(a0)             # restore f10
+    lw      f11, fr11*4(a0)             # restore f11
+    lw      f12, fr12*4(a0)             # restore f12
+    lw      f13, fr13*4(a0)             # restore f13
+    lw      f14, fr14*4(a0)             # restore f14
+    lw      f15, fr15*4(a0)             # restore f15
+    lw      f16, fr16*4(a0)             # restore f16
+    lw      f17, fr17*4(a0)             # restore f17
+    lw      f18, fr18*4(a0)             # restore f18
+    lw      f19, fr19*4(a0)             # restore f19
+    lw      f20, fr20*4(a0)             # restore f20
+    lw      f21, fr21*4(a0)             # restore f21
+    lw      f22, fr22*4(a0)             # restore f22
+    lw      f23, fr23*4(a0)             # restore f23
+    lw      f24, fr24*4(a0)             # restore f24
+    lw      f25, fr25*4(a0)             # restore f25
+    lw      f26, fr26*4(a0)             # restore f26
+    lw      f27, fr27*4(a0)             # restore f27
+    lw      f28, fr28*4(a0)             # restore f28
+    lw      f29, fr29*4(a0)             # restore f29
+    lw      f30, fr30*4(a0)             # restore f30
+    lw      f31, fr31*4(a0)             # restore f31
+#endif
+
+    lw      a0, r_A1*4(a0)              # restore a0
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_SAVE_STATE
+dvmCompiler_TEMPLATE_SAVE_STATE:
+/* File: mips/TEMPLATE_SAVE_STATE.S */
+    /*
+     * This handler performs a register save for selfVerification mode.
+     * On entry:
+     *    Top of stack + 4: a1 value to save
+     *    Top of stack + 0: a0 value to save
+     *    a0 - offset from rSELF to the beginning of the heapArgSpace record
+     *    a1 - the value of regMap
+     *
+     * The handler must save regMap, r0-r31, f0-f31 if FPU, and then return with
+     * r0-r31 with their original values (note that this means a0 and a1 must take
+     * the values on the stack - not the ones in those registers on entry.
+     * Finally, the two registers previously pushed must be popped.
+     * Note: the following registers are not saved
+     *       zero, AT, gp, sp, fp, ra
+     */
+    add     a0, a0, rSELF               # pointer to heapArgSpace
+    sw      a1, 0(a0)                   # save regMap
+    add     a0, a0, 4                   # pointer to coreRegs
+#if 0
+    sw      zero, r_ZERO*4(a0)          # save zero
+#endif
+    .set noat
+    sw      AT, r_AT*4(a0)              # save at
+    .set at
+    sw      v0, r_V0*4(a0)              # save v0
+    sw      v1, r_V1*4(a0)              # save v1
+
+    lw      a1, 0(sp)                   # recover a0 value
+    sw      a1, r_A0*4(a0)              # save a0
+    lw      a1, 4(sp)                   # recover a1 value
+    sw      a1, r_A1*4(a0)              # save a1
+    sw      a2, r_A2*4(a0)              # save a2
+    sw      a3, r_A3*4(a0)              # save a3
+
+    sw      t0, r_T0*4(a0)              # save t0
+    sw      t1, r_T1*4(a0)              # save t1
+    sw      t2, r_T2*4(a0)              # save t2
+    sw      t3, r_T3*4(a0)              # save t3
+    sw      t4, r_T4*4(a0)              # save t4
+    sw      t5, r_T5*4(a0)              # save t5
+    sw      t6, r_T6*4(a0)              # save t6
+    sw      t7, r_T7*4(a0)              # save t7
+
+    sw      s0, r_S0*4(a0)              # save s0
+    sw      s1, r_S1*4(a0)              # save s1
+    sw      s2, r_S2*4(a0)              # save s2
+    sw      s3, r_S3*4(a0)              # save s3
+    sw      s4, r_S4*4(a0)              # save s4
+    sw      s5, r_S5*4(a0)              # save s5
+    sw      s6, r_S6*4(a0)              # save s6
+    sw      s7, r_S7*4(a0)              # save s7
+
+    sw      t8, r_T8*4(a0)              # save t8
+    sw      t9, r_T9*4(a0)              # save t9
+
+    sw      k0, r_K0*4(a0)              # save k0
+    sw      k1, r_K1*4(a0)              # save k1
+
+#if 0
+    sw      gp, r_GP*4(a0)              # save gp
+    sw      sp, r_SP*4(a0)              # save sp (need to adjust??? )
+    sw      fp, r_FP*4(a0)              # save fp
+    sw      ra, r_RA*4(a0)              # save ra
+#endif
+
+/* #ifdef HARD_FLOAT */
+#if 0
+    sw      f0, fr0*4(a0)               # save f0
+    sw      f1, fr1*4(a0)               # save f1
+    sw      f2, fr2*4(a0)               # save f2
+    sw      f3, fr3*4(a0)               # save f3
+    sw      f4, fr4*4(a0)               # save f4
+    sw      f5, fr5*4(a0)               # save f5
+    sw      f6, fr6*4(a0)               # save f6
+    sw      f7, fr7*4(a0)               # save f7
+    sw      f8, fr8*4(a0)               # save f8
+    sw      f9, fr9*4(a0)               # save f9
+    sw      f10, fr10*4(a0)             # save f10
+    sw      f11, fr11*4(a0)             # save f11
+    sw      f12, fr12*4(a0)             # save f12
+    sw      f13, fr13*4(a0)             # save f13
+    sw      f14, fr14*4(a0)             # save f14
+    sw      f15, fr15*4(a0)             # save f15
+    sw      f16, fr16*4(a0)             # save f16
+    sw      f17, fr17*4(a0)             # save f17
+    sw      f18, fr18*4(a0)             # save f18
+    sw      f19, fr19*4(a0)             # save f19
+    sw      f20, fr20*4(a0)             # save f20
+    sw      f21, fr21*4(a0)             # save f21
+    sw      f22, fr22*4(a0)             # save f22
+    sw      f23, fr23*4(a0)             # save f23
+    sw      f24, fr24*4(a0)             # save f24
+    sw      f25, fr25*4(a0)             # save f25
+    sw      f26, fr26*4(a0)             # save f26
+    sw      f27, fr27*4(a0)             # save f27
+    sw      f28, fr28*4(a0)             # save f28
+    sw      f29, fr29*4(a0)             # save f29
+    sw      f30, fr30*4(a0)             # save f30
+    sw      f31, fr31*4(a0)             # save f31
+#endif
+
+    lw      a1, 0(sp)                   # recover a0 value
+    lw      a1, 4(sp)                   # recover a1 value
+    sub     sp, sp, 8                   # adjust stack ptr
+    RETURN
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: mips/TEMPLATE_PERIODIC_PROFILING.S */
+    /*
+     * Increment profile counter for this trace, and decrement
+     * sample counter.  If sample counter goes below zero, turn
+     * off profiling.
+     *
+     * On entry
+     * (ra-16) is address of pointer to counter.  Note: the counter
+     *    actually exists 16 bytes before the return target for mips.
+     *     - 4 bytes for prof count addr.
+     *     - 4 bytes for chain cell offset (2bytes 32 bit aligned).
+     *     - 4 bytes for call TEMPLATE_PERIODIC_PROFILING.
+     *     - 4 bytes for call delay slot.
+     */
+     lw     a0, -16(ra)
+     lw     a1, offThread_pProfileCountdown(rSELF)
+     lw     a2, 0(a0)                   # get counter
+     lw     a3, 0(a1)                   # get countdown timer
+     addu   a2, 1
+     sub    a3, 1                       # FIXME - bug in ARM code???
+     bltz   a3, .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+     sw     a2, 0(a0)
+     sw     a3, 0(a1)
+     RETURN
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+     move   rTEMP, ra                   # preserve ra
+     la     a0, dvmJitTraceProfilingOff
+     JALR(a0)
+     jr     rTEMP
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: mips/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: mips/TEMPLATE_RETURN.S */
+    /*
+     * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+     * If the stored value in returnAddr
+     * is non-zero, the caller is compiled by the JIT thus return to the
+     * address in the code cache following the invoke instruction. Otherwise
+     * return to the special dvmJitToInterpNoChain entry point.
+     */
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    # a0=rSELF
+    move    a0, rSELF
+    la      t9, dvmFastMethodTraceExit
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    SAVEAREA_FROM_FP(a0, rFP)           # a0<- saveArea (old)
+    lw      t0, offStackSaveArea_prevFrame(a0)     # t0<- saveArea->prevFrame
+    lbu     t1, offThread_breakFlags(rSELF)        # t1<- breakFlags
+    lw      rPC, offStackSaveArea_savedPc(a0)      # rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+    lw      t2,  offStackSaveArea_returnAddr(a0)   # t2<- chaining cell ret
+#else
+    move    t2, zero                               # disable chaining
+#endif
+    lw      a2, offStackSaveArea_method - sizeofStackSaveArea(t0)
+                                                   # a2<- method we're returning to
+#if !defined(WITH_SELF_VERIFICATION)
+    beq     a2, zero, 1f                           # bail to interpreter
+#else
+    bne     a2, zero, 2f
+    JALR(ra)                                       # punt to interpreter and compare state
+    # DOUG: assume this does not return ???
+2:
+#endif
+    la      t4, .LdvmJitToInterpNoChainNoProfile   # defined in footer.S
+    lw      a1, (t4)
+    move    rFP, t0                                # publish new FP
+    beq     a2, zero, 4f
+    lw      t0, offMethod_clazz(a2)                # t0<- method->clazz
+4:
+
+    sw      a2, offThread_method(rSELF)            # self->method = newSave->method
+    lw      a0, offClassObject_pDvmDex(t0)         # a0<- method->clazz->pDvmDex
+    sw      rFP, offThread_curFrame(rSELF)         # self->curFrame = fp
+    add     rPC, rPC, 3*2                          # publish new rPC
+    sw      a0, offThread_methodClassDex(rSELF)
+    movn    t2, zero, t1                           # check the breadFlags and
+                                                   # clear the chaining cell address
+    sw      t2, offThread_inJitCodeCache(rSELF)    # in code cache or not
+    beq     t2, zero, 3f                           # chaining cell exists?
+    JALR(t2)                                       # jump to the chaining cell
+    # DOUG: assume this does not return ???
+3:
+#if defined(WITH_JIT_TUNING)
+    li      a0, kCallsiteInterpreted
+#endif
+    j       a1                                     # callsite is interpreted
+1:
+    sw      zero, offThread_inJitCodeCache(rSELF)  # reset inJitCodeCache
+    SAVE_PC_TO_SELF()                              # SAVE_PC_FP_TO_SELF()
+    SAVE_FP_TO_SELF()
+    la      t4, .LdvmMterpStdBail                  # defined in footer.S
+    lw      a2, (t4)
+    move    a0, rSELF                              # Expecting rSELF in a0
+    JALR(a2)                                       # exit the interpreter
+    # DOUG: assume this does not return ???
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: mips/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: mips/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+    /*
+     * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+     * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+     * runtime-resolved callee.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    lw     t0, offMethod_accessFlags(a0)          # t0<- methodToCall->accessFlags
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+
+2:
+    and    t6, t0, ACC_NATIVE
+    beqz   t6, 3f
+#if !defined(WITH_SELF_VERIFICATION)
+    j      .LinvokeNative
+#else
+    RETURN                                        # bail to the interpreter
+#endif
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     t0, .LdvmJitToInterpTraceSelectNoChain # defined in footer.S
+    lw     rTEMP, (t0)
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- method->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    # a0=methodToCall, a1=rSELF
+    move   a1, rSELF
+    la     t9, dvmFastMethodTraceEnter
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+
+    # Start executing the callee
+#if defined(WITH_JIT_TUNING)
+    li     a0, kInlineCacheMiss
+#endif
+    jr     rTEMP                                  # dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: mips/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: mips/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+    /*
+     * For monomorphic callsite, setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    # methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lh     a2, offMethod_outsSize(a0)             # a2<- methodToCall->outsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    add    t2, ra, 8                              # setup the punt-to-interp address
+                                                  # 8 bytes skips branch and delay slot
+    sll    t6, a2, 2                              # multiply outsSize by 4 (4 bytes per reg)
+    sub    t0, t0, t6                             # t0<- bottom (newsave-outsSize)
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    jr     t2                                     # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    lw     t9, offMethod_clazz(a0)                # t9<- methodToCall->clazz
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    beqz   t8, 2f                                 # breakFlags != 0
+    jr     t2                                     # bail to the interpreter
+
+2:
+    lw     a3, offClassObject_pDvmDex(t9)         # a3<- methodToCall->clazz->pDvmDex
+
+    # Update "thread" values for the new method
+    sw     a0, offThread_method(rSELF)            # self->method = methodToCall
+    sw     a3, offThread_methodClassDex(rSELF)    # self->methodClassDex = ...
+    move   rFP, a1                                # fp = newFp
+    sw     rFP, offThread_curFrame(rSELF)         # self->curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # preserve a0-a2 and ra
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(ra, 12)
+
+    move   a1, rSELF
+    # a0=methodToCall, a1=rSELF
+    la     t9, dvmFastMethodTraceEnter
+    jalr   t9
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a2 and ra
+    SCRATCH_LOAD(ra, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+#endif
+    RETURN                                        # return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: mips/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+    /*
+     * For polymorphic callsite, check whether the cached class pointer matches
+     * the current one. If so setup the Dalvik frame and return to the
+     * Thumb code through the link register to transfer control to the callee
+     * method through a dedicated chaining cell.
+     *
+     * The predicted chaining cell is declared in ArmLIR.h with the
+     * following layout:
+     *
+     *  typedef struct PredictedChainingCell {
+     *      u4 branch;
+     *      u4 delay_slot;
+     *      const ClassObject *clazz;
+     *      const Method *method;
+     *      u4 counter;
+     *  } PredictedChainingCell;
+     *
+     * Upon returning to the callsite:
+     *    - lr   : to branch to the chaining cell
+     *    - lr+8 : to punt to the interpreter
+     *    - lr+16: to fully resolve the callee and may rechain.
+     *             a3 <- class
+     */
+    # a0 = this, a1 = returnCell, a2 = predictedChainCell, rPC = dalvikCallsite
+    lw      a3, offObject_clazz(a0)     # a3 <- this->class
+    lw      rIBASE, 8(a2)                   # t0 <- predictedChainCell->clazz
+    lw      a0, 12(a2)                  # a0 <- predictedChainCell->method
+    lw      t1, offThread_icRechainCount(rSELF)    # t1 <- shared rechainCount
+
+#if defined(WITH_JIT_TUNING)
+    la      rINST, .LdvmICHitCount
+    #add     t2, t2, 1
+    bne    a3, rIBASE, 1f
+    nop
+    lw      t2, 0(rINST)
+    add     t2, t2, 1
+    sw      t2, 0(rINST)
+1:
+    #add     t2, t2, 1
+#endif
+    beq     a3, rIBASE, .LinvokeChainProf       # branch if predicted chain is valid
+    lw      rINST, offClassObject_vtable(a3)     # rINST <- this->class->vtable
+    beqz    rIBASE, 2f                      # initialized class or not
+    sub     a1, t1, 1                   # count--
+    sw      a1, offThread_icRechainCount(rSELF)   # write back to InterpState
+    b       3f
+2:
+    move    a1, zero
+3:
+    add     ra, ra, 16                  # return to fully-resolve landing pad
+    /*
+     * a1 <- count
+     * a2 <- &predictedChainCell
+     * a3 <- this->class
+     * rPC <- dPC
+     * rINST <- this->class->vtable
+     */
+    RETURN
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+    .balign 4
+    .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: mips/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: mips/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+    # a0 = methodToCall, a1 = returnCell, rPC = dalvikCallsite
+    lh     t7, offMethod_registersSize(a0)        # t7<- methodToCall->regsSize
+    lw     t9, offThread_interpStackEnd(rSELF)    # t9<- interpStackEnd
+    lbu    t8, offThread_breakFlags(rSELF)        # t8<- breakFlags
+    move   a3, a1                                 # a3<- returnCell
+    SAVEAREA_FROM_FP(a1, rFP)                     # a1<- stack save area
+    sll    t6, t7, 2                              # multiply regsSize by 4 (4 bytes per reg)
+    sub    a1, a1, t6                             # a1<- newFp(old savearea-regsSize)
+    SAVEAREA_FROM_FP(t0, a1)                      # t0<- stack save area
+    bgeu   t0, t9, 1f                             # bottom < interpStackEnd?
+    RETURN                                        # return to raise stack overflow excep.
+
+1:
+    # a1 = newFP, a0 = methodToCall, a3 = returnCell, rPC = dalvikCallsite
+    sw     rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+    sw     rPC, (offStackSaveArea_savedPc - sizeofStackSaveArea)(a1)
+    lw     rPC, offMethod_insns(a0)               # rPC<- methodToCall->insns
+
+    # set up newSaveArea
+    sw     rFP, (offStackSaveArea_prevFrame - sizeofStackSaveArea)(a1)
+    sw     a3, (offStackSaveArea_returnAddr - sizeofStackSaveArea)(a1)
+    sw     a0, (offStackSaveArea_method - sizeofStackSaveArea)(a1)
+    lw     rTEMP, offMethod_nativeFunc(a0)        # t9<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+    beqz   t8, 2f                                 # breakFlags != 0
+    RETURN                                        # bail to the interpreter
+2:
+#else
+    RETURN                                        # bail to the interpreter unconditionally
+#endif
+
+    # go ahead and transfer control to the native code
+    lw     t6, offThread_jniLocal_topCookie(rSELF)  # t6<- thread->localRef->...
+    sw     a1, offThread_curFrame(rSELF)          # self->curFrame = newFp
+    sw     zero, offThread_inJitCodeCache(rSELF)  # not in the jit code cache
+    sw     t6, (offStackSaveArea_localRefCookie - sizeofStackSaveArea)(a1)
+                                                  # newFp->localRefCookie=top
+    SAVEAREA_FROM_FP(rBIX, a1)                    # rBIX<- new stack save area
+    move   a2, a0                                 # a2<- methodToCall
+    move   a0, a1                                 # a0<- newFp
+    add    a1, rSELF, offThread_retval            # a1<- &retval
+    move   a3, rSELF                              # a3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+    # a2: methodToCall
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+
+    move   a0, a2
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastMethodTraceEnter
+    JALR(t9)                                      # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    move   rOBJ, a2                               # save a2
+#endif
+
+    JALR(rTEMP)                                   # off to the native code
+    lw     gp, STACK_OFFSET_GP(sp)
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+    move   a0, rOBJ
+    move   a1, rSELF
+    # a0=JNIMethod, a1=rSELF
+    la      t9, dvmFastNativeMethodTraceExit
+    JALR(t9)
+    lw     gp, STACK_OFFSET_GP(sp)
+#endif
+
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw     a2, offStackSaveArea_returnAddr(rBIX)     # a2 = chaining cell ret addr
+    lw     a0, offStackSaveArea_localRefCookie(rBIX) # a0<- saved->top
+    lw     a1, offThread_exception(rSELF)            # check for exception
+    sw     rFP, offThread_curFrame(rSELF)            # self->curFrame = fp
+    sw     a0, offThread_jniLocal_topCookie(rSELF)   # new top <- old top
+    lw     a0, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+    # a0 = dalvikCallsitePC
+    bnez   a1, .LhandleException                     # handle exception if any
+
+    sw     a2, offThread_inJitCodeCache(rSELF)       # set the mode properly
+    beqz   a2, 3f
+    jr     a2                                        # go if return chaining cell still exist
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     a1, .LdvmJitToInterpTraceSelectNoChain    # defined in footer.S
+    lw     a1, (a1)
+    add    rPC, a0, 3*2                              # reconstruct new rPC (advance 3 dalvik instr)
+
+#if defined(WITH_JIT_TUNING)
+    li     a0, kCallsiteInterpreted
+#endif
+    jr     a1
+
+#undef TEMPLATE_INLINE_PROFILING
+
+    .size   dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
+/* File: mips/footer.S */
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  4
+.LinvokeNative:
+    # Prep for the native call
+    # a1 = newFP, a0 = methodToCall
+    lw     t9, offThread_jniLocal_topCookie(rSELF)  # t9<- thread->localRef->...
+    sw     zero, offThread_inJitCodeCache(rSELF)    # not in jit code cache
+    sw     a1, offThread_curFrame(rSELF)            # self->curFrame = newFp
+    sw     t9, (offStackSaveArea_localRefCookie - sizeofStackSaveArea)(a1)
+                                                 # newFp->localRefCookie=top
+    lhu     ra, offThread_subMode(rSELF)
+    SAVEAREA_FROM_FP(rBIX, a1)                   # rBIX<- new stack save area
+
+    move    a2, a0                               # a2<- methodToCall
+    move    a0, a1                               # a0<- newFp
+    add     a1, rSELF, offThread_retval          # a1<- &retval
+    move    a3, rSELF                            # a3<- self
+    andi    ra, kSubModeMethodTrace
+    beqz    ra, 121f
+    # a2: methodToCall
+    # preserve a0-a3
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    move    rTEMP, a2                            # preserve a2
+
+    move    a0, rTEMP
+    move    a1, rSELF
+    la      t9, dvmFastMethodTraceEnter
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    # restore a0-a3
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    lw      t9, offMethod_nativeFunc(a2)
+    JALR(t9)                                      # call methodToCall->nativeFunc
+    lw      gp, STACK_OFFSET_GP(sp)
+
+    move    a0, rTEMP
+    move    a1, rSELF
+    la      t9, dvmFastNativeMethodTraceExit
+    JALR(t9)
+    lw      gp, STACK_OFFSET_GP(sp)
+    b       212f
+
+121:
+    lw      t9, offMethod_nativeFunc(a2)
+    JALR(t9)                                     # call methodToCall->nativeFunc
+    lw      gp, STACK_OFFSET_GP(sp)
+
+212:
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw     a2, offStackSaveArea_returnAddr(rBIX)     # a2 = chaining cell ret addr
+    lw     a0, offStackSaveArea_localRefCookie(rBIX) # a0<- saved->top
+    lw     a1, offThread_exception(rSELF)            # check for exception
+    sw     rFP, offThread_curFrame(rSELF)            # self->curFrame = fp
+    sw     a0, offThread_jniLocal_topCookie(rSELF)   # new top <- old top
+    lw     a0, offStackSaveArea_savedPc(rBIX)        # reload rPC
+
+    # a0 = dalvikCallsitePC
+    bnez   a1, .LhandleException                     # handle exception if any
+
+    sw     a2, offThread_inJitCodeCache(rSELF)       # set the mode properly
+    beqz   a2, 3f
+    jr     a2                                        # go if return chaining cell still exist
+
+3:
+    # continue executing the next instruction through the interpreter
+    la     a1, .LdvmJitToInterpTraceSelectNoChain    # defined in footer.S
+    lw     a1, (a1)
+    add    rPC, a0, 3*2                              # reconstruct new rPC
+
+#if defined(WITH_JIT_TUNING)
+    li     a0, kCallsiteInterpreted
+#endif
+    jr     a1
+
+
+/*
+ * On entry:
+ * a0  Faulting Dalvik PC
+ */
+.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+    la     t0, .LdeadFood
+    lw     t0, (t0)                  # should not see this under self-verification mode
+    jr     t0
+.LdeadFood:
+    .word   0xdeadf00d
+#endif
+    sw     zero, offThread_inJitCodeCache(rSELF)  # in interpreter land
+    la     a1, .LdvmMterpCommonExceptionThrown  # PIC way of getting &func
+    lw     a1, (a1)
+    la     rIBASE, .LdvmAsmInstructionStart     # PIC way of getting &func
+    lw     rIBASE, (rIBASE)
+    move   rPC, a0                              # reload the faulting Dalvid address
+    jr     a1                                   # branch to dvmMterpCommonExeceptionThrown
+
+    .align  4
+.LdvmAsmInstructionStart:
+    .word   dvmAsmInstructionStart
+.LdvmJitToInterpNoChainNoProfile:
+    .word   dvmJitToInterpNoChainNoProfile
+.LdvmJitToInterpTraceSelectNoChain:
+    .word   dvmJitToInterpTraceSelectNoChain
+.LdvmJitToInterpNoChain:
+    .word   dvmJitToInterpNoChain
+.LdvmMterpStdBail:
+    .word   dvmMterpStdBail
+.LdvmMterpCommonExceptionThrown:
+    .word   dvmMterpCommonExceptionThrown
+.LdvmLockObject:
+    .word   dvmLockObject
+#if defined(WITH_JIT_TUNING)
+.LdvmICHitCount:
+    .word   gDvmICHitCount
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+.LdvmSelfVerificationMemOpDecode:
+    .word   dvmSelfVerificationMemOpDecode
+#endif
+
+    .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
+
diff --git a/vm/compiler/template/rebuild.sh b/vm/compiler/template/rebuild.sh
index f04d097..60f45be 100755
--- a/vm/compiler/template/rebuild.sh
+++ b/vm/compiler/template/rebuild.sh
@@ -19,4 +19,4 @@
 # generated as part of the build.
 #
 set -e
-for arch in ia32 armv5te armv5te-vfp armv7-a armv7-a-neon; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
+for arch in ia32 armv5te armv5te-vfp armv7-a armv7-a-neon mips; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
diff --git a/vm/interp/Interp.cpp b/vm/interp/Interp.cpp
index de85fa1..bb88870 100644
--- a/vm/interp/Interp.cpp
+++ b/vm/interp/Interp.cpp
@@ -1483,8 +1483,10 @@
             newValue.ctl.breakFlags |= kInterpSingleStep;
         if (newValue.ctl.subMode & SAFEPOINT_BREAK_MASK)
             newValue.ctl.breakFlags |= kInterpSafePoint;
+#ifndef DVM_NO_ASM_INTERP
         newValue.ctl.curHandlerTable = (newValue.ctl.breakFlags) ?
             thread->altHandlerTable : thread->mainHandlerTable;
+#endif
     } while (dvmQuasiAtomicCas64(oldValue.all, newValue.all,
              &thread->interpBreak.all) != 0);
 }
@@ -1556,12 +1558,16 @@
     Thread* thread;
     uint8_t breakFlags;
     uint8_t subMode;
+#ifndef DVM_NO_ASM_INTERP
     void* handlerTable;
+#endif
 
     dvmLockThreadList(self);
     breakFlags = self->interpBreak.ctl.breakFlags;
     subMode = self->interpBreak.ctl.subMode;
+#ifndef DVM_NO_ASM_INTERP
     handlerTable = self->interpBreak.ctl.curHandlerTable;
+#endif
     for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
         if (subMode != thread->interpBreak.ctl.subMode) {
             LOGD("Warning: subMode mismatch - %#x:%#x, tid[%d]",
@@ -1571,11 +1577,13 @@
             LOGD("Warning: breakFlags mismatch - %#x:%#x, tid[%d]",
                 breakFlags,thread->interpBreak.ctl.breakFlags,thread->threadId);
          }
+#ifndef DVM_NO_ASM_INTERP
         if (handlerTable != thread->interpBreak.ctl.curHandlerTable) {
             LOGD("Warning: curHandlerTable mismatch - %#x:%#x, tid[%d]",
                 (int)handlerTable,(int)thread->interpBreak.ctl.curHandlerTable,
                 thread->threadId);
          }
+#endif
 #if defined(WITH_JIT)
          if (thread->pJitProfTable != gDvmJit.pProfTable) {
              LOGD("Warning: pJitProfTable mismatch - %#x:%#x, tid[%d]",
diff --git a/vm/jdwp/JdwpEvent.cpp b/vm/jdwp/JdwpEvent.cpp
index f11777b..cbb181d 100644
--- a/vm/jdwp/JdwpEvent.cpp
+++ b/vm/jdwp/JdwpEvent.cpp
@@ -921,7 +921,7 @@
 {
     JdwpSuspendPolicy suspendPolicy = SP_NONE;
 
-    assert(threadId = dvmDbgGetThreadSelfId());
+    assert(threadId == dvmDbgGetThreadSelfId());
 
     /*
      * I don't think this can happen.
diff --git a/vm/mterp/Mterp.cpp b/vm/mterp/Mterp.cpp
index 6220e81..0cd30fd 100644
--- a/vm/mterp/Mterp.cpp
+++ b/vm/mterp/Mterp.cpp
@@ -52,7 +52,11 @@
      * which one did, but if any one is too big the total size will
      * overflow.
      */
+#if defined(__mips__)
+    const int width = 128;
+#else
     const int width = 64;
+#endif
     int interpSize = (uintptr_t) dvmAsmInstructionEnd -
                      (uintptr_t) dvmAsmInstructionStart;
     if (interpSize != 0 && interpSize != kNumPackedOpcodes*width) {
diff --git a/vm/mterp/Mterp.h b/vm/mterp/Mterp.h
index b27e4f7..6762f67 100644
--- a/vm/mterp/Mterp.h
+++ b/vm/mterp/Mterp.h
@@ -35,15 +35,9 @@
 /*
  * Local entry and exit points.  The platform-specific implementation must
  * provide these two.
- *
- * dvmMterpStdRun() returns the "changeInterp" argument from dvmMterpStdBail(),
- * indicating whether we want to bail out of the interpreter or just switch
- * between "standard" and "debug" mode.
- *
- * The "mterp" interpreter is always "standard".
  */
-extern "C" bool dvmMterpStdRun(Thread* self);
-extern "C" void dvmMterpStdBail(Thread* self, bool changeInterp);
+extern "C" void dvmMterpStdRun(Thread* self);
+extern "C" void dvmMterpStdBail(Thread* self);
 
 /*
  * Helper for common_printMethod(), invoked from the assembly
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 4afe471..5c7062f 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -684,8 +684,8 @@
 
     cmp     lr, #0                      @ any special SubModes active?
     bne     11f                         @ go handle them if so
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 7:
 
     @ native return; r10=newSaveArea
@@ -711,8 +711,8 @@
     ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
 
     @ Call the native method
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 
     @ Restore the pre-call arguments
     ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/c/OP_PACKED_SWITCH.cpp b/vm/mterp/c/OP_PACKED_SWITCH.cpp
index 3922e46..c820e80 100644
--- a/vm/mterp/c/OP_PACKED_SWITCH.cpp
+++ b/vm/mterp/c/OP_PACKED_SWITCH.cpp
@@ -6,7 +6,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
diff --git a/vm/mterp/c/OP_SPARSE_SWITCH.cpp b/vm/mterp/c/OP_SPARSE_SWITCH.cpp
index f48d06e..9ca16ad 100644
--- a/vm/mterp/c/OP_SPARSE_SWITCH.cpp
+++ b/vm/mterp/c/OP_SPARSE_SWITCH.cpp
@@ -6,7 +6,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
diff --git a/vm/mterp/c/gotoTargets.cpp b/vm/mterp/c/gotoTargets.cpp
index 9d90046..8b6c18f 100644
--- a/vm/mterp/c/gotoTargets.cpp
+++ b/vm/mterp/c/gotoTargets.cpp
@@ -961,7 +961,8 @@
             self->interpSave.method = curMethod;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            self->interpSave.curFrame = fp = newFp;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -979,7 +980,7 @@
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPreNativeInvoke(methodToCall, self, fp);
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -993,12 +994,13 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPostNativeInvoke(methodToCall, self, fp);
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             /* pop frame off */
             dvmPopJniLocals(self, newSaveArea);
-            self->interpSave.curFrame = fp;
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
 
             /*
              * If the native code threw an exception, or interpreted code
diff --git a/vm/mterp/c/header.cpp b/vm/mterp/c/header.cpp
index c7e727e..2787188 100644
--- a/vm/mterp/c/header.cpp
+++ b/vm/mterp/c/header.cpp
@@ -61,6 +61,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
diff --git a/vm/mterp/c/opcommon.cpp b/vm/mterp/c/opcommon.cpp
index 104d856..f2e0ecb 100644
--- a/vm/mterp/c/opcommon.cpp
+++ b/vm/mterp/c/opcommon.cpp
@@ -525,7 +525,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -552,7 +552,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -596,7 +596,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -623,7 +623,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -673,7 +673,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -696,7 +696,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -719,7 +719,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -742,6 +742,6 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index 1313de6..d39afe8 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -40,7 +40,7 @@
  * data structures.  Some versions of gcc will hold small enumerated types
  * in a char instead of an int.
  */
-#if defined(__ARM_EABI__)
+#if defined(__ARM_EABI__) || defined(__mips__)
 # define MTERP_NO_UNALIGN_64
 #endif
 #if defined(HAVE_SHORT_ENUMS)
@@ -152,7 +152,11 @@
 MTERP_OFFSET(offThread_methodClassDex,    Thread, interpSave.methodClassDex, 12)
 /* make sure all JValue union members are stored at the same offset */
 MTERP_OFFSET(offThread_retval,            Thread, interpSave.retval, 16)
+#ifdef HAVE_BIG_ENDIAN
+MTERP_OFFSET(offThread_retval_z,          Thread, interpSave.retval.z, 19)
+#else
 MTERP_OFFSET(offThread_retval_z,          Thread, interpSave.retval.z, 16)
+#endif
 MTERP_OFFSET(offThread_retval_i,          Thread, interpSave.retval.i, 16)
 MTERP_OFFSET(offThread_retval_j,          Thread, interpSave.retval.j, 16)
 MTERP_OFFSET(offThread_retval_l,          Thread, interpSave.retval.l, 16)
@@ -193,7 +197,7 @@
 MTERP_OFFSET(offThread_jniLocal_topCookie, \
                                 Thread, jniLocalRefTable.segmentState.all, 168)
 #if defined(WITH_SELF_VERIFICATION)
-MTERP_OFFSET(offThread_shadowSpace,       Thread, shadowSpace, 192)
+MTERP_OFFSET(offThread_shadowSpace,       Thread, shadowSpace, 188)
 #endif
 #else
 MTERP_OFFSET(offThread_jniLocal_topCookie, \
diff --git a/vm/mterp/common/mips-defines.h b/vm/mterp/common/mips-defines.h
new file mode 100644
index 0000000..1e11a30
--- /dev/null
+++ b/vm/mterp/common/mips-defines.h
@@ -0,0 +1,3 @@
+#define fcc0    $fcc0
+#define fcc1    $fcc1
+
diff --git a/vm/mterp/config-mips b/vm/mterp/config-mips
new file mode 100644
index 0000000..8c50858
--- /dev/null
+++ b/vm/mterp/config-mips
@@ -0,0 +1,68 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS architecture targets.
+#
+
+handler-style computed-goto
+handler-size 128
+# Need to specify split-ops to generate alt-ops at the end after
+# importing other files.
+split-ops
+
+# source for the instruction table stub
+asm-stub mips/stub.S
+
+# source for alternate entry stub
+asm-alt-stub mips/alt_stub.S
+
+# file header and basic definitions
+import c/header.cpp
+import mips/header.S
+
+# C pre-processor defines for stub C instructions
+import cstubs/stubdefs.cpp
+
+# highly-platform-specific defs
+import mips/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+import c/opcommon.cpp
+
+# arch-specific entry point to interpreter
+import mips/entry.S
+
+# opcode list; argument to op-start is default directory
+op-start mips
+
+# OP_BREAKPOINT needs explicit testing
+    op OP_BREAKPOINT c
+
+# OP_DISPATCH_FF needs explicit testing
+    op OP_DISPATCH_FF c
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+import c/gotoTargets.cpp
+
+# end of defs; include this when cstubs/stubdefs.cpp is included
+import cstubs/enddefs.cpp
+
+# common subroutines for asm
+import mips/footer.S
+import mips/debug.cpp
+alt-ops
diff --git a/vm/mterp/cstubs/entry.cpp b/vm/mterp/cstubs/entry.cpp
index 350bd86..90b6cea 100644
--- a/vm/mterp/cstubs/entry.cpp
+++ b/vm/mterp/cstubs/entry.cpp
@@ -2,7 +2,7 @@
  * Handler function table, one entry per opcode.
  */
 #undef H
-#define H(_op) dvmMterp_##_op
+#define H(_op) (const void*) dvmMterp_##_op
 DEFINE_GOTO_TABLE(gDvmMterpHandlers)
 
 #undef H
@@ -21,12 +21,12 @@
 {
     jmp_buf jmpBuf;
 
-    self->bailPtr = &jmpBuf;
+    self->interpSave.bailPtr = &jmpBuf;
 
     /* We exit via a longjmp */
     if (setjmp(jmpBuf)) {
         LOGVV("mterp threadid=%d returning", dvmThreadSelf()->threadId);
-        return
+        return;
     }
 
     /* run until somebody longjmp()s out */
@@ -40,8 +40,8 @@
          * FINISH code.  For allstubs, we must do an explicit check
          * in the interpretation loop.
          */
-        if (self-interpBreak.ctl.subMode) {
-            dvmCheckBefore(pc, fp, self, curMethod);
+        if (self->interpBreak.ctl.subMode) {
+            dvmCheckBefore(pc, fp, self);
         }
         Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
         (void) gDvmMterpHandlerNames;   /* avoid gcc "defined but not used" */
@@ -56,6 +56,6 @@
  */
 void dvmMterpStdBail(Thread* self)
 {
-    jmp_buf* pJmpBuf = self->bailPtr;
+    jmp_buf* pJmpBuf = (jmp_buf*) self->interpSave.bailPtr;
     longjmp(*pJmpBuf, 1);
 }
diff --git a/vm/mterp/cstubs/stubdefs.cpp b/vm/mterp/cstubs/stubdefs.cpp
index 0bffd49..1a851b4 100644
--- a/vm/mterp/cstubs/stubdefs.cpp
+++ b/vm/mterp/cstubs/stubdefs.cpp
@@ -82,6 +82,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -118,7 +120,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
diff --git a/vm/mterp/gen-mterp.py b/vm/mterp/gen-mterp.py
index e0b1b2d..ec7acaf 100755
--- a/vm/mterp/gen-mterp.py
+++ b/vm/mterp/gen-mterp.py
@@ -25,6 +25,7 @@
 interp_defs_file = "../../libdex/DexOpcodes.h" # need opcode list
 kNumPackedOpcodes = 512 # TODO: Derive this from DexOpcodes.h.
 
+splitops = False
 verbose = False
 handler_size_bits = -1000
 handler_size_bytes = -1000
@@ -217,7 +218,18 @@
     in_op_start = 2
 
     loadAndEmitOpcodes()
+    if splitops == False:
+        if generate_alt_table:
+            loadAndEmitAltOpcodes()
+            if style == "jump-table":
+                emitJmpTable("dvmAsmInstructionStart", label_prefix);
+                emitJmpTable("dvmAsmAltInstructionStart", alt_label_prefix);
 
+def genaltop(tokens):
+    if in_op_start != 2:
+       raise DataParseError("alt-op can be specified only after op-end")
+    if len(tokens) != 1:
+        raise DataParseError("opEnd takes no arguments")
     if generate_alt_table:
         loadAndEmitAltOpcodes()
         if style == "jump-table":
@@ -307,7 +319,6 @@
         asm_fp.write("    .balign 4\n")
         asm_fp.write("dvmAsmSisterStart:\n")
         asm_fp.writelines(sister_list)
-
         asm_fp.write("\n    .size   dvmAsmSisterStart, .-dvmAsmSisterStart\n")
         asm_fp.write("    .global dvmAsmSisterEnd\n")
         asm_fp.write("dvmAsmSisterEnd:\n\n")
@@ -593,6 +604,10 @@
                 opEntry(tokens)
             elif tokens[0] == "handler-style":
                 setHandlerStyle(tokens)
+            elif tokens[0] == "alt-ops":
+                genaltop(tokens)
+            elif tokens[0] == "split-ops":
+                splitops = True
             else:
                 raise DataParseError, "unrecognized command '%s'" % tokens[0]
             if style == None:
diff --git a/vm/mterp/mips/ALT_OP_DISPATCH_FF.S b/vm/mterp/mips/ALT_OP_DISPATCH_FF.S
new file mode 100644
index 0000000..0c542a0
--- /dev/null
+++ b/vm/mterp/mips/ALT_OP_DISPATCH_FF.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ * Unlike other alt stubs, we don't want to call dvmCheckBefore() here.
+ * Instead, just treat this as a trampoline to reach the real alt
+ * handler (which will do the dvmCheckBefore() call.
+ */
+    mov     ip, rINST, lsr #8           @ ip<- extended opcode
+    add     ip, ip, #256                @ add offset for extended opcodes
+    GOTO_OPCODE(ip)                     @ go to proper extended handler
+
diff --git a/vm/mterp/mips/OP_ADD_DOUBLE.S b/vm/mterp/mips/OP_ADD_DOUBLE.S
new file mode 100644
index 0000000..1d5cebc
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__adddf3)", "instr_f":"add.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..499961f
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__adddf3)", "instr_f":"add.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_FLOAT.S b/vm/mterp/mips/OP_ADD_FLOAT.S
new file mode 100644
index 0000000..18c94f4
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__addsf3)", "instr_f":"add.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S b/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S
new file mode 100644
index 0000000..0a39770
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__addsf3)", "instr_f":"add.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_INT.S b/vm/mterp/mips/OP_ADD_INT.S
new file mode 100644
index 0000000..dcbbb7e
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_2ADDR.S b/vm/mterp/mips/OP_ADD_INT_2ADDR.S
new file mode 100644
index 0000000..8bb3b0c
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_LIT16.S b/vm/mterp/mips/OP_ADD_INT_LIT16.S
new file mode 100644
index 0000000..de45f81
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_LIT8.S b/vm/mterp/mips/OP_ADD_INT_LIT8.S
new file mode 100644
index 0000000..feaaaa2
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_LONG.S b/vm/mterp/mips/OP_ADD_LONG.S
new file mode 100644
index 0000000..d57e1cf
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_LONG.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ *  The compiler generates the following sequence for
+ *  [v1 v0] =  [a1 a0] + [a3 a2];
+ *    addu v0,a2,a0
+ *    addu a1,a3,a1
+ *    sltu v1,v0,a2
+ *    addu v1,v1,a1
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/vm/mterp/mips/OP_ADD_LONG_2ADDR.S b/vm/mterp/mips/OP_ADD_LONG_2ADDR.S
new file mode 100644
index 0000000..6a87119
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_LONG_2ADDR.S
@@ -0,0 +1,5 @@
+%verify "executed"
+/*
+ *See OP_ADD_LONG.S for details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/vm/mterp/mips/OP_AGET.S b/vm/mterp/mips/OP_AGET.S
new file mode 100644
index 0000000..e1b182a
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET.S
@@ -0,0 +1,31 @@
+%default { "load":"lw", "shift":"2" }
+%verify "executed"
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if $shift
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $load a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
diff --git a/vm/mterp/mips/OP_AGET_BOOLEAN.S b/vm/mterp/mips/OP_AGET_BOOLEAN.S
new file mode 100644
index 0000000..d38c466
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lbu", "shift":"0" }
diff --git a/vm/mterp/mips/OP_AGET_BYTE.S b/vm/mterp/mips/OP_AGET_BYTE.S
new file mode 100644
index 0000000..2c0b0be
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_AGET_CHAR.S b/vm/mterp/mips/OP_AGET_CHAR.S
new file mode 100644
index 0000000..9146b97
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lhu", "shift":"1" }
diff --git a/vm/mterp/mips/OP_AGET_OBJECT.S b/vm/mterp/mips/OP_AGET_OBJECT.S
new file mode 100644
index 0000000..16d500d
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S"
diff --git a/vm/mterp/mips/OP_AGET_SHORT.S b/vm/mterp/mips/OP_AGET_SHORT.S
new file mode 100644
index 0000000..ba4c939
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_AGET_WIDE.S b/vm/mterp/mips/OP_AGET_WIDE.S
new file mode 100644
index 0000000..896ea4f
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_WIDE.S
@@ -0,0 +1,27 @@
+%verify "executed"
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+.L${opcode}_finish:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64_off(a2, a3, a0, offArrayObject_contents)
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a2, a3, rOBJ)                  #  vAA/vAA+1 <- a2/a3
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_AND_INT.S b/vm/mterp/mips/OP_AND_INT.S
new file mode 100644
index 0000000..721129b
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_2ADDR.S b/vm/mterp/mips/OP_AND_INT_2ADDR.S
new file mode 100644
index 0000000..4563705
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_LIT16.S b/vm/mterp/mips/OP_AND_INT_LIT16.S
new file mode 100644
index 0000000..81c0a04
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_LIT8.S b/vm/mterp/mips/OP_AND_INT_LIT8.S
new file mode 100644
index 0000000..61c1c9d
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_LONG.S b/vm/mterp/mips/OP_AND_LONG.S
new file mode 100644
index 0000000..8249617
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_AND_LONG_2ADDR.S b/vm/mterp/mips/OP_AND_LONG_2ADDR.S
new file mode 100644
index 0000000..f9bf88f
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_APUT.S b/vm/mterp/mips/OP_APUT.S
new file mode 100644
index 0000000..7839b69
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT.S
@@ -0,0 +1,27 @@
+%default { "store":"sw", "shift":"2" }
+%verify "executed"
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if $shift
+    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $store a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_APUT_BOOLEAN.S b/vm/mterp/mips/OP_APUT_BOOLEAN.S
new file mode 100644
index 0000000..eeb9747
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_APUT_BYTE.S b/vm/mterp/mips/OP_APUT_BYTE.S
new file mode 100644
index 0000000..eeb9747
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_APUT_CHAR.S b/vm/mterp/mips/OP_APUT_CHAR.S
new file mode 100644
index 0000000..4c57fb1
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_APUT_OBJECT.S b/vm/mterp/mips/OP_APUT_OBJECT.S
new file mode 100644
index 0000000..1d5b06e
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_OBJECT.S
@@ -0,0 +1,50 @@
+%verify "executed"
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t1)                            #  t1 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(rINST, a2)                    #  rINST <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    GET_VREG(rBIX, t1)                     #  rBIX <- vAA
+    # null array object?
+    beqz      rINST, common_errNullObject  #  yes, bail
+
+    LOAD_base_offArrayObject_length(a3, rINST) #  a3 <- arrayObj->length
+    EAS2(rOBJ, rINST, a1)                  #  rOBJ <- arrayObj + index*width
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    /*
+     * On entry:
+     *  rINST = vBB (arrayObj)
+     *  rBIX = vAA (obj)
+     *  rOBJ = offset into array (vBB + vCC * width)
+     */
+    bnez      rBIX, .L${opcode}_checks     #  yes, skip type checks
+.L${opcode}_finish:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sw        rBIX, offArrayObject_contents(rOBJ) #  vBB[vCC] <- vAA
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%break
+.L${opcode}_checks:
+    LOAD_base_offObject_clazz(a0, rBIX)    #  a0 <- obj->clazz
+    LOAD_base_offObject_clazz(a1, rINST)   #  a1 <- arrayObj->clazz
+    JAL(dvmCanPutArrayElement)             #  test object type vs. array type
+    beqz      v0, .L${opcode}_throw        #  okay ?
+    lw        a2, offThread_cardTable(rSELF)
+    srl       t1, rINST, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)
+    b         .L${opcode}_finish           #  yes, skip type checks
+.L${opcode}_throw:
+    LOAD_base_offObject_clazz(a0, rBIX)    #  a0 <- obj->clazz
+    LOAD_base_offObject_clazz(a1, rINST)   #  a1 <- arrayObj->clazz
+    EXPORT_PC()
+    JAL(dvmThrowArrayStoreExceptionIncompatibleElement)
+    b         common_exceptionThrown
diff --git a/vm/mterp/mips/OP_APUT_SHORT.S b/vm/mterp/mips/OP_APUT_SHORT.S
new file mode 100644
index 0000000..4c57fb1
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_APUT_WIDE.S b/vm/mterp/mips/OP_APUT_WIDE.S
new file mode 100644
index 0000000..0046cd5
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_WIDE.S
@@ -0,0 +1,27 @@
+%verify "executed"
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t0 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64_off(a2, a3, a0, offArrayObject_contents) #  a2/a3 <- vBB[vCC]
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_ARRAY_LENGTH.S b/vm/mterp/mips/OP_ARRAY_LENGTH.S
new file mode 100644
index 0000000..9416011
--- /dev/null
+++ b/vm/mterp/mips/OP_ARRAY_LENGTH.S
@@ -0,0 +1,14 @@
+%verify "executed"
+    /*
+     * Return the length of an array.
+     */
+    GET_OPB(a1)                            #  a1 <- B
+    GET_OPA4(a2)                           #  a2 <- A+
+    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
+    # is object null?
+    beqz      a0, common_errNullObject     #  yup, fail
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- array length
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
+
diff --git a/vm/mterp/mips/OP_BREAKPOINT.S b/vm/mterp/mips/OP_BREAKPOINT.S
new file mode 100644
index 0000000..3624810
--- /dev/null
+++ b/vm/mterp/mips/OP_BREAKPOINT.S
@@ -0,0 +1,15 @@
+%verify "executed"
+    /*
+     * Breakpoint handler.
+     *
+     * Restart this instruction with the original opcode.  By
+     * the time we get here, the breakpoint will have already been
+     * handled.
+     */
+    move    a0, rPC
+    JAL(dvmGetOriginalOpcode)           # (rPC)
+    FETCH(rINST, 0)                     # reload OP_BREAKPOINT + rest of inst
+    lw      a1, offThread_mainHandlerTable(rSELF)
+    and     rINST, 0xff00
+    or      rINST, rINST, a0
+    GOTO_OPCODE_BASE(a1, a0)
diff --git a/vm/mterp/mips/OP_CHECK_CAST.S b/vm/mterp/mips/OP_CHECK_CAST.S
new file mode 100644
index 0000000..f29a51f
--- /dev/null
+++ b/vm/mterp/mips/OP_CHECK_CAST.S
@@ -0,0 +1,71 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    # check-cast vAA, class                /* BBBB */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a2, 1)                           #  a2 <- BBBB
+    GET_VREG(rOBJ, a3)                     #  rOBJ <- object
+    LOAD_rSELF_methodClassDex(a0)          #  a0 <- pDvmDex
+    LOAD_base_offDvmDex_pResClasses(a0, a0) #  a0 <- pDvmDex->pResClasses
+    # is object null?
+    beqz      rOBJ, .L${opcode}_okay       #  null obj, cast always succeeds
+    LOAD_eas2(a1, a0, a2)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .L${opcode}_resolve      #  not resolved, do it now
+.L${opcode}_resolved:
+    # same class (trivial success)?
+    bne       a0, a1, .L${opcode}_fullcheck #  no, do full check
+.L${opcode}_okay:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from BBBB
+     *  rOBJ holds object
+     */
+.L${opcode}_fullcheck:
+    move      rBIX,a1                      #  avoid ClassObject getting clobbered
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    # failed?
+    bnez      v0, .L${opcode}_okay         #  no, success
+    b         .L${opcode}_castfailure
+%break
+
+.L${opcode}_castfailure:
+    # A cast has failed. We need to throw a ClassCastException with the
+    # class of the object that failed to be cast.
+    EXPORT_PC()                            #  about to throw
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    move      a1,rBIX                      #  r1<- desired class
+    JAL(dvmThrowClassCastException)
+    b         common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a2   holds BBBB
+     *  rOBJ holds object
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      a1, a2                       #  a1 <- BBBB
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    b         .L${opcode}_resolved         #  pick up where we left off
diff --git a/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S b/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S
new file mode 100644
index 0000000..966ffab
--- /dev/null
+++ b/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S
@@ -0,0 +1,84 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/ jumbo vBBBB, class #AAAAAAAA */
+    FETCH(a0, 1)                        # a0<- aaaa (lo)
+    FETCH(a2, 2)                        # a2<- AAAA (hi)
+    FETCH(a3, 3)                        # a3<- BBBB
+    sll    a2,a2,16
+    or     a2, a0, a2                   # a2<- AAAAaaaa
+
+    GET_VREG(rOBJ, a3)                          # rOBJ<- object
+    LOAD_rSELF_methodClassDex(a0)   # a0<- pDvmDex
+    LOAD_base_offDvmDex_pResClasses(a0, a0) # a0<- pDvmDex->pResClasses
+                                                # is object null?
+    beqz     rOBJ, .L${opcode}_okay             # null obj, cast always succeeds
+    LOAD_eas2(a1, a0, a2)           # a1<- resolved class
+    LOAD_base_offObject_clazz(a0, rOBJ)   # a0<- obj->clazz
+                                                # have we resolved this before?
+    beqz    a1, .L${opcode}_resolve             # not resolved, do it now
+.L${opcode}_resolved:
+                                                # same class (trivial success)?
+    bne     a0, a1, .L${opcode}_fullcheck       # no, do full check
+    b       .L${opcode}_okay                    # yes, finish up
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from BBBB
+     *  rOBJ holds object
+     */
+.L${opcode}_fullcheck:
+    move      rBIX,a1                      #  avoid ClassObject getting clobbered
+    JAL(dvmInstanceofNonTrivial)    # v0<- boolean result
+                                                # failed?
+    bnez    v0, .L${opcode}_okay                # no, success
+    b       .L${opcode}_castfailure
+
+%break
+
+
+.L${opcode}_castfailure:
+    # A cast has failed.  We need to throw a ClassCastException with the
+    # class of the object that failed to be cast.
+    EXPORT_PC()                                 # about to throw
+    LOAD_base_offObject_clazz(a0, rOBJ)         # a0<- obj->clazz
+    move      a1,rBIX                      #  r1<- desired class
+    JAL(dvmThrowClassCastException)
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get next opcode
+     *
+     */
+.L${opcode}_okay:
+    FETCH_ADVANCE_INST(4)                       # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                         # extract opcode from rINST
+    GOTO_OPCODE(t0)                             # jump to next instruction
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a2   holds AAAAAAAA
+     *  rOBJ holds object
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                                 # resolve() could throw
+    LOAD_rSELF_method(a3)           # a3<- self->method
+    move     a1, a2                             # a1<- AAAAAAAA
+    li       a2, 0                              # a2<- false
+    LOAD_base_offMethod_clazz(a0, a3)   # a0<- method->clazz
+    JAL(dvmResolveClass)    # v0<- resolved ClassObject ptr
+                                                # got null?
+    beqz     v0, common_exceptionThrown         # yes, handle exception
+    move     a1, v0                             # a1<- class resolved from AAAAAAAA
+    LOAD_base_offObject_clazz(a0, rOBJ)   # a0<- obj->clazz
+    b       .L${opcode}_resolved                # pick up where we left off
+
+
diff --git a/vm/mterp/mips/OP_CMPG_DOUBLE.S b/vm/mterp/mips/OP_CMPG_DOUBLE.S
new file mode 100644
index 0000000..8e740e3
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_CMPL_DOUBLE.S" { "naninst":"li rTEMP, 1" }
diff --git a/vm/mterp/mips/OP_CMPG_FLOAT.S b/vm/mterp/mips/OP_CMPG_FLOAT.S
new file mode 100644
index 0000000..2c4e97b
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_CMPL_FLOAT.S" { "naninst":"li rTEMP, 1" }
diff --git a/vm/mterp/mips/OP_CMPL_DOUBLE.S b/vm/mterp/mips/OP_CMPL_DOUBLE.S
new file mode 100644
index 0000000..63bb005
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPL_DOUBLE.S
@@ -0,0 +1,70 @@
+%default { "naninst":"li rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s0 <- BB
+    srl       rBIX, a0, 8                  #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s0 <- &fp[BB]
+    EAS2(rBIX, rFP, rBIX)                  #  t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__eqdf2)                           #  cmp <=: C clear if <, Z set if eq
+    li        rTEMP, 0
+    beqz      v0, ${opcode}_finish
+
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__ltdf2)
+    li        rTEMP, -1
+    bltz      v0, ${opcode}_finish
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    b         ${opcode}_continue
+#else
+    LOAD64_F(fs0, fs0f, rOBJ)
+    LOAD64_F(fs1, fs1f, rBIX)
+    c.olt.d   fcc0, fs0, fs1
+    li        rTEMP, -1
+    bc1t      fcc0, ${opcode}_finish
+    c.olt.d   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, ${opcode}_finish
+    c.eq.d    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, ${opcode}_finish
+    b         ${opcode}_nan
+#endif
+%break
+
+${opcode}_nan:
+    $naninst
+    b         ${opcode}_finish
+
+#ifdef SOFT_FLOAT
+${opcode}_continue:
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__gtdf2)                           #  fallthru
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    blez      v0, ${opcode}_nan            #  fall thru for finish
+#endif
+
+${opcode}_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
diff --git a/vm/mterp/mips/OP_CMPL_FLOAT.S b/vm/mterp/mips/OP_CMPL_FLOAT.S
new file mode 100644
index 0000000..6e07084
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPL_FLOAT.S
@@ -0,0 +1,82 @@
+%default { "naninst":"li rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+#ifdef SOFT_FLOAT
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- vBB
+    GET_VREG(rBIX, a3)                     #  rBIX <- vCC
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__eqsf2)                           #  a0 <- (vBB == vCC)
+    li        rTEMP, 0                     # set rTEMP to 0
+    beqz      v0, ${opcode}_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__ltsf2)                           #  a0 <- (vBB < vCC)
+    li        rTEMP, -1
+    bltz      v0, ${opcode}_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    b         ${opcode}_continue
+#else
+    GET_VREG_F(fs0, a2)
+    GET_VREG_F(fs1, a3)
+    c.olt.s   fcc0, fs0, fs1               # Is fs0 < fs1
+    li        rTEMP, -1
+    bc1t      fcc0, ${opcode}_finish
+    c.olt.s   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, ${opcode}_finish
+    c.eq.s    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, ${opcode}_finish
+    b         ${opcode}_nan
+
+#endif
+
+%break
+
+${opcode}_nan:
+    $naninst
+    b         ${opcode}_finish
+
+#ifdef SOFT_FLOAT
+${opcode}_continue:
+    JAL(__gtsf2)                           #  v0 <- (vBB > vCC)
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    bgtz      v0, ${opcode}_finish
+    b         ${opcode}_nan
+#endif
+
+${opcode}_finish:
+    GET_OPA(t0)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG(rTEMP, t0)                    #  vAA <- rTEMP
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)
diff --git a/vm/mterp/mips/OP_CMP_LONG.S b/vm/mterp/mips/OP_CMP_LONG.S
new file mode 100644
index 0000000..fcdfce7
--- /dev/null
+++ b/vm/mterp/mips/OP_CMP_LONG.S
@@ -0,0 +1,40 @@
+%verify "executed"
+%verify "basic lt, gt, eq"
+%verify "hi equal, lo <=>"
+%verify "lo equal, hi <=>"
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;	# (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;	# (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    slt       t0, a1, a3                   #  compare hi
+    sgt       t1, a1, a3
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
+    bnez      v0, .L${opcode}_finish
+    # at this point x.hi==y.hi
+    sltu      t0, a0, a2                   #  compare lo
+    sgtu      t1, a0, a2
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST.S b/vm/mterp/mips/OP_CONST.S
new file mode 100644
index 0000000..309b52a
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    # const vAA,                           /* +BBBBbbbb */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a1, a1, 16
+    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_16.S b/vm/mterp/mips/OP_CONST_16.S
new file mode 100644
index 0000000..69732f4
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_16.S
@@ -0,0 +1,8 @@
+%verify "executed"
+    # const/16 vAA,                        /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_4.S b/vm/mterp/mips/OP_CONST_4.S
new file mode 100644
index 0000000..833e373
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_4.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    # const/4 vA,                          /* +B */
+    sll       a1, rINST, 16                #  a1 <- Bxxx0000
+    GET_OPA(a0)                            #  a0 <- A+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
+    and       a0, a0, 15
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
+
diff --git a/vm/mterp/mips/OP_CONST_CLASS.S b/vm/mterp/mips/OP_CONST_CLASS.S
new file mode 100644
index 0000000..f63d7c3
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_CLASS.S
@@ -0,0 +1,31 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+    # const/class vAA, Class               /* BBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- dvmDex->pResClasses
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResClasses[BBBB]
+
+    bnez      v0, .L${opcode}_resolve      #  v0!=0 => resolved-ok
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  a1: BBBB (Class ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- Class reference
+    # failed==0?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.L${opcode}_resolve:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+
diff --git a/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S b/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S
new file mode 100644
index 0000000..05604b9
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S
@@ -0,0 +1,34 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- dvmDex->pResClasses
+    sll       a1,a1,16
+    or        a1, a0, a1                  # a1<- AAAAaaaa
+    FETCH(rOBJ, 3)                        # rOBJ<- BBBB
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResClasses[BBBB]
+
+    bnez      v0, .L${opcode}_resolve      #  v0!=0 => resolved-ok
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  a1: AAAAAAAA (Class ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- Class reference
+    # failed==0?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.L${opcode}_resolve:
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vBBBB <- v0
+
+
diff --git a/vm/mterp/mips/OP_CONST_HIGH16.S b/vm/mterp/mips/OP_CONST_HIGH16.S
new file mode 100644
index 0000000..04c6d5d
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_HIGH16.S
@@ -0,0 +1,9 @@
+%verify "executed"
+    # const/high16 vAA,                    /* +BBBB0000 */
+    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a0, a0, 16                   #  a0 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_STRING.S b/vm/mterp/mips/OP_CONST_STRING.S
new file mode 100644
index 0000000..f59b1d6
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_STRING.S
@@ -0,0 +1,33 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+    # const/string vAA, String             /* BBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResStrings(a2, a2) #  a2 <- dvmDex->pResStrings
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResStrings[BBBB]
+    # not yet resolved?
+    bnez      v0, .L${opcode}_resolve
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  a1:   BBBB (String ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveString)                  #  v0 <- String reference
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.L${opcode}_resolve:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+
+
+
+
diff --git a/vm/mterp/mips/OP_CONST_STRING_JUMBO.S b/vm/mterp/mips/OP_CONST_STRING_JUMBO.S
new file mode 100644
index 0000000..0c3d0bd
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_STRING_JUMBO.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+    # const/string vAA, String             /* BBBBBBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResStrings(a2, a2) #  a2 <- dvmDex->pResStrings
+    sll       a1, a1, 16
+    or        a1, a1, a0                   #  a1 <- BBBBbbbb
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResStrings[BBBB]
+    bnez      v0, .L${opcode}_resolve
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  a1: BBBBBBBB (String ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveString)                  #  v0 <- String reference
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.L${opcode}_resolve:
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t1)            #  vAA <- v0
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE.S b/vm/mterp/mips/OP_CONST_WIDE.S
new file mode 100644
index 0000000..ba1c462
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE.S
@@ -0,0 +1,17 @@
+%verify "executed"
+    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
+    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
+    sll       a1, 16 #
+    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    FETCH(a3, 4)                           #  a3 <- HHHH (high)
+    GET_OPA(t1)                            #  t1 <- AA
+    sll       a3, 16
+    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, t1)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_16.S b/vm/mterp/mips/OP_CONST_WIDE_16.S
new file mode 100644
index 0000000..d431529
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_16.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    # const-wide/16 vAA,                   /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_32.S b/vm/mterp/mips/OP_CONST_WIDE_32.S
new file mode 100644
index 0000000..9cb9a3f
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_32.S
@@ -0,0 +1,14 @@
+%verify "executed"
+    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a2, a2, 16
+    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S b/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S
new file mode 100644
index 0000000..c56cd26
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S
@@ -0,0 +1,12 @@
+%verify "executed"
+    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    li        a0, 0                        #  a0 <- 00000000
+    sll       a1, 16                       #  a1 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_DISPATCH_FF.S b/vm/mterp/mips/OP_DISPATCH_FF.S
new file mode 100644
index 0000000..0503c33
--- /dev/null
+++ b/vm/mterp/mips/OP_DISPATCH_FF.S
@@ -0,0 +1,4 @@
+%verify "executed"
+    srl     t0, rINST, 8                # t0<- extended opcode
+    addu    t0, t0, 256                 # add offset for extended opcodes
+    GOTO_OPCODE(t0)                     # go to proper extended handler
diff --git a/vm/mterp/mips/OP_DIV_DOUBLE.S b/vm/mterp/mips/OP_DIV_DOUBLE.S
new file mode 100644
index 0000000..a7e0302
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__divdf3)", "instr_f":"div.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..18e28d7
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__divdf3)", "instr_f":"div.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_FLOAT.S b/vm/mterp/mips/OP_DIV_FLOAT.S
new file mode 100644
index 0000000..59bb8d6
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__divsf3)", "instr_f":"div.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S b/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S
new file mode 100644
index 0000000..a0a546f
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__divsf3)", "instr_f":"div.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_INT.S b/vm/mterp/mips/OP_DIV_INT.S
new file mode 100644
index 0000000..b845475
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_2ADDR.S b/vm/mterp/mips/OP_DIV_INT_2ADDR.S
new file mode 100644
index 0000000..1f13ad8
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_LIT16.S b/vm/mterp/mips/OP_DIV_INT_LIT16.S
new file mode 100644
index 0000000..d75d210
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_LIT8.S b/vm/mterp/mips/OP_DIV_INT_LIT8.S
new file mode 100644
index 0000000..384eb0d
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_LONG.S b/vm/mterp/mips/OP_DIV_LONG.S
new file mode 100644
index 0000000..bb39d2a
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_LONG.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide.S" { "arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_DIV_LONG_2ADDR.S b/vm/mterp/mips/OP_DIV_LONG_2ADDR.S
new file mode 100644
index 0000000..8e751b6
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_LONG_2ADDR.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide2addr.S" {"arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S b/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S
new file mode 100644
index 0000000..f1e04ea
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"JAL(__truncdfsf2)", "instr_f":"cvt.s.d fv0, fa0"}
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_INT.S b/vm/mterp/mips/OP_DOUBLE_TO_INT.S
new file mode 100644
index 0000000..33199c4
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_INT.S
@@ -0,0 +1,80 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"b d2i_doconv", "instr_f":"b d2i_doconv"}
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ * Use rBIX / rTEMP as global to hold arguments (they are not bound to a global var)
+ */
+%break
+
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64(rARG2, rARG3, t0)
+    move      rBIX, rARG0                  #  save a0
+    move      rTEMP, rARG1                 #  and a1
+    JAL(__gedf2)                           #  is arg >= maxint?
+
+    move      t0, v0
+    li        v0, ~0x80000000              #  return maxint (7fffffff)
+    bgez      t0, .L${opcode}_set_vreg     #  nonzero == yes
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)                           #  is arg <= minint?
+
+    move      t0, v0
+    li        v0, 0x80000000               #  return minint (80000000)
+    blez      t0, .L${opcode}_set_vreg     #  nonzero == yes
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    move      rARG2, rBIX                  #  compare against self
+    move      rARG3, rTEMP
+    JAL(__nedf2)                           #  is arg == self?
+
+    move      t0, v0                       #  zero == no
+    li        v0, 0
+    bnez      t0, .L${opcode}_set_vreg     #  return zero for NaN
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    JAL(__fixdfsi)                         #  convert double to int
+    b         .L${opcode}_set_vreg
+#else
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1t      .L${opcode}_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1t      .L${opcode}_set_vreg_f
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .L${opcode}_set_vreg_f
+
+    trunc.w.d  fv0, fa0
+    b         .L${opcode}_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+    .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword 0xc1e0000000000000              #  minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word 0x80000000
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_LONG.S b/vm/mterp/mips/OP_DOUBLE_TO_LONG.S
new file mode 100644
index 0000000..153d557
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_LONG.S
@@ -0,0 +1,76 @@
+%verify "executed"
+%include "mips/unflopWide.S" {"instr":"b d2l_doconv", "st_result":"STORE64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+d2l_doconv:
+#ifdef SOFT_FLOAT
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64(rARG2, rARG3, t0)
+    move      rBIX, rARG0                  #  save a0
+    move      rTEMP, rARG1                 #  and a1
+    JAL(__gedf2)
+
+    move      t1, v0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bgez      t1, .L${opcode}_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)
+
+    move      t1, v0
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    blez      t1, .L${opcode}_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    move      rARG2, rBIX
+    move      rARG3, rTEMP
+    JAL(__nedf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bnez      t0, .L${opcode}_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    JAL(__fixdfdi)
+
+#else
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .L${opcode}_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .L${opcode}_set_vreg
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .L${opcode}_set_vreg
+    JAL(__fixdfdi)
+#endif
+    b         .L${opcode}_set_vreg
+
+
+.LDOUBLE_TO_LONG_max:
+    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+    .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+    .dword 0x8000000000000000
diff --git a/vm/mterp/mips/OP_EXECUTE_INLINE.S b/vm/mterp/mips/OP_EXECUTE_INLINE.S
new file mode 100644
index 0000000..cbc8917
--- /dev/null
+++ b/vm/mterp/mips/OP_EXECUTE_INLINE.S
@@ -0,0 +1,104 @@
+%verify "executed"
+%verify "exception handled"
+    /*
+     * Execute a "native inline" instruction.
+     *
+     * We need to call an InlineOp4Func:
+     *  bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+     *
+     * The first four args are in a0-a3, pointer to return value storage
+     * is on the stack.  The function's return value is a flag that tells
+     * us if an exception was thrown.
+     *
+     * TUNING: could maintain two tables, pointer in Thread and
+     * swap if profiler/debuggger active.
+     */
+    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+    lhu       a2, offThread_subMode(rSELF)
+    FETCH(rBIX, 1)                         #  rBIX <- BBBB
+    EXPORT_PC()                            #  can throw
+    and       a2, kSubModeDebugProfile     #  Any going on?
+    bnez      a2, .L${opcode}_debugmode    #  yes - take slow path
+.L${opcode}_resume:
+    addu      a1, rSELF, offThread_retval  #  a1 <- &self->retval
+    GET_OPB(a0)                            #  a0 <- B
+    # Stack should have 16/20 available
+    sw        a1, STACK_OFFSET_ARG04(sp)   #  push &self->retval
+    BAL(.L${opcode}_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    # test boolean result of inline
+    beqz      v0, common_exceptionThrown   #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+%break
+
+    /*
+     * Extract args, call function.
+     *  a0 = #of args (0-4)
+     *  rBIX = call index
+     *
+     * Other ideas:
+     * - Use a jump table from the main piece to jump directly into the
+     *   AND/LW pairs.  Costs a data load, saves a branch.
+     * - Have five separate pieces that do the loading, so we can work the
+     *   interleave a little better.  Increases code size.
+     */
+.L${opcode}_continue:
+    FETCH(rINST, 2)                        #  rINST <- FEDC
+    beq       a0, 0, 0f
+    beq       a0, 1, 1f
+    beq       a0, 2, 2f
+    beq       a0, 3, 3f
+    beq       a0, 4, 4f
+    JAL(common_abort)                      #  too many arguments
+
+4:
+    and       t0, rINST, 0xf000            #  isolate F
+    ESRN(t1, rFP, t0, 10)
+    lw        a3, 0(t1)                    #  a3 <- vF (shift right 12, left 2)
+3:
+    and       t0, rINST, 0x0f00            #  isolate E
+    ESRN(t1, rFP, t0, 6)
+    lw        a2, 0(t1)                    #  a2 <- vE
+2:
+    and       t0, rINST, 0x00f0            #  isolate D
+    ESRN(t1, rFP, t0, 2)
+    lw        a1, 0(t1)                    #  a1 <- vD
+1:
+    and       t0, rINST, 0x000f            #  isolate C
+    EASN(t1, rFP, t0, 2)
+    lw        a0, 0(t1)                    #  a0 <- vC
+0:
+    la        rINST, gDvmInlineOpsTable    #  table of InlineOperation
+    EAS4(t1, rINST, rBIX)                  #  t1 <- rINST + rBIX<<4
+    lw        t9, 0(t1)
+    jr        t9                           #  sizeof=16, "func" is first entry
+    # (not reached)
+
+    /*
+     * We're debugging or profiling.
+     * rBIX: opIndex
+     */
+.L${opcode}_debugmode:
+    move      a0, rBIX
+    JAL(dvmResolveInlineNative)
+    beqz      v0, .L${opcode}_resume       #  did it resolve? no, just move on
+    move      rOBJ, v0                     #  remember method
+    move      a0, v0
+    move      a1, rSELF
+    JAL(dvmFastMethodTraceEnter)           #  (method, self)
+    addu      a1, rSELF, offThread_retval  #  a1<- &self->retval
+    GET_OPB(a0)                            #  a0 <- B
+    # Stack should have 16/20 available
+    sw        a1, 16(sp)                   #  push &self->retval
+    BAL(.L${opcode}_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    move      rINST, v0                    #  save result of inline
+    move      a0, rOBJ                     #  a0<- method
+    move      a1, rSELF                    #  a1<- self
+    JAL(dvmFastMethodTraceExit)            #  (method, self)
+    beqz      v0, common_exceptionThrown   #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S
new file mode 100644
index 0000000..3c95a8c
--- /dev/null
+++ b/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S
@@ -0,0 +1,92 @@
+%verify "executed"
+%verify "exception handled"
+    /*
+     * Execute a "native inline" instruction, using "/range" semantics.
+     * Same idea as execute-inline, but we get the args differently.
+     *
+     * We need to call an InlineOp4Func:
+     *  bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+     *
+     * The first four args are in a0-a3, pointer to return value storage
+     * is on the stack.  The function's return value is a flag that tells
+     * us if an exception was thrown.
+     */
+    /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+    lhu       a2, offThread_subMode(rSELF)
+    FETCH(rBIX, 1)                       # rBIX<- BBBB
+    EXPORT_PC()                          # can throw
+    and       a2, kSubModeDebugProfile   # Any going on?
+    bnez      a2, .L${opcode}_debugmode  # yes - take slow path
+.L${opcode}_resume:
+    addu      a1, rSELF, offThread_retval # a1<- &self->retval
+    GET_OPA(a0)
+    sw        a1, STACK_OFFSET_ARG04(sp)  # push &self->retval
+    BAL(.L${opcode}_continue)             # make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)     #  restore gp
+    beqz      v0, common_exceptionThrown  # returned false, handle exception
+    FETCH_ADVANCE_INST(3)                 # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                   # extract opcode from rINST
+    GOTO_OPCODE(t0)                       # jump to next instruction
+
+%break
+
+    /*
+     * Extract args, call function.
+     *  a0 = #of args (0-4)
+     *  rBIX = call index
+     *  ra = return addr, above  [DO NOT JAL out of here w/o preserving ra]
+     */
+.L${opcode}_continue:
+    FETCH(rOBJ, 2)                       # rOBJ <- CCCC
+    beq       a0, 0, 0f
+    beq       a0, 1, 1f
+    beq       a0, 2, 2f
+    beq       a0, 3, 3f
+    beq       a0, 4, 4f
+    JAL(common_abort)                      #  too many arguments
+
+4:
+    add       t0, rOBJ, 3
+    GET_VREG(a3, t0)
+3:
+    add       t0, rOBJ, 2
+    GET_VREG(a2, t0)
+2:
+    add       t0, rOBJ, 1
+    GET_VREG(a1, t0)
+1:
+    GET_VREG(a0, rOBJ)
+0:
+    la        rOBJ, gDvmInlineOpsTable      # table of InlineOperation
+    EAS4(t1, rOBJ, rBIX)                    # t1 <- rINST + rBIX<<4
+    lw        t9, 0(t1)
+    jr        t9                            # sizeof=16, "func" is first entry
+    # not reached
+
+    /*
+     * We're debugging or profiling.
+     * rBIX: opIndex
+     */
+.L${opcode}_debugmode:
+    move      a0, rBIX
+    JAL(dvmResolveInlineNative)
+    beqz      v0, .L${opcode}_resume       #  did it resolve? no, just move on
+    move      rOBJ, v0                     #  remember method
+    move      a0, v0
+    move      a1, rSELF
+    JAL(dvmFastMethodTraceEnter)           #  (method, self)
+    addu      a1, rSELF, offThread_retval  #  a1<- &self->retval
+    GET_OPA(a0)                            #  a0 <- A
+    # Stack should have 16/20 available
+    sw        a1, 16(sp)                   #  push &self->retval
+    move      rINST, rOBJ                  #  rINST<- method
+    BAL(.L${opcode}_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    move      rOBJ, v0                     #  save result of inline
+    move      a0, rINST                    #  a0<- method
+    move      a1, rSELF                    #  a1<- self
+    JAL(dvmFastNativeMethodTraceExit)      #  (method, self)
+    beqz      rOBJ, common_exceptionThrown #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S
new file mode 100644
index 0000000..2cb225d
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S
@@ -0,0 +1,120 @@
+%default { "isrange":"0" }
+%verify "executed"
+%verify "unimplemented array type"
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    EXPORT_PC()                            #  need for resolve and alloc
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+    GET_OPA(rOBJ)                          #  rOBJ <- AA or BA
+    # already resolved?
+    bnez      a0, .L${opcode}_continue     #  yes, continue on
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .L${opcode}_continue
+%break
+
+    /*
+     * On entry:
+     *  a0 holds array class
+     *  rOBJ holds AA or BA
+     */
+.L${opcode}_continue:
+    LOAD_base_offClassObject_descriptor(a3, a0) #  a3 <- arrayClass->descriptor
+    li        a2, ALLOC_DONT_TRACK         #  a2 <- alloc flags
+    lbu       rINST, 1(a3)                 #  rINST <- descriptor[1]
+    .if $isrange
+    move      a1, rOBJ                     #  a1 <- AA (length)
+    .else
+    srl       a1, rOBJ, 4                  #  rOBJ <- B (length)
+    .endif
+    seq       t0, rINST, 'I'               #  array of ints?
+    seq       t1, rINST, 'L'               #  array of objects?
+    or        t0, t1
+    seq       t1, rINST, '['               #  array of arrays?
+    or        t0, t1
+    move      rBIX, a1                     #  save length in rBIX
+    beqz      t0, .L${opcode}_notimpl      #  no, not handled yet
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(arClass, length, flags)
+    # null return?
+    beqz      v0, common_exceptionThrown   #  alloc failed, handle exception
+
+    FETCH(a1, 2)                           #  a1 <- FEDC or CCCC
+    sw        v0, offThread_retval(rSELF)  #  retval.l <- new array
+    sw        rINST, (offThread_retval+4)(rSELF) #  retval.h <- type
+    addu      a0, v0, offArrayObject_contents #  a0 <- newArray->contents
+    subu      rBIX, rBIX, 1                #  length--, check for neg
+    FETCH_ADVANCE_INST(3)                  #  advance to next instr, load rINST
+    bltz      rBIX, 2f                     #  was zero, bail
+
+    # copy values from registers into the array
+    # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+    move      t0, rBIX
+    .if $isrange
+    EAS2(a2, rFP, a1)                      #  a2 <- &fp[CCCC]
+1:
+    lw        a3, 0(a2)                    #  a3 <- *a2++
+    addu      a2, 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, (a0)                     #  *contents++ = vX
+    addu      a0, 4
+    bgez      t0, 1b
+
+    # continue at 2
+    .else
+    slt       t1, t0, 4                    #  length was initially 5?
+    and       a2, rOBJ, 15                 #  a2 <- A
+    bnez      t1, 1f                       #  <= 4 args, branch
+    GET_VREG(a3, a2)                       #  a3 <- vA
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 16(a0)                   #  contents[4] = vA
+1:
+    and       a2, a1, 15                   #  a2 <- F/E/D/C
+    GET_VREG(a3, a2)                       #  a3 <- vF/vE/vD/vC
+    srl       a1, a1, 4                    #  a1 <- next reg in low 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 0(a0)                    #  *contents++ = vX
+    addu      a0, a0, 4
+    bgez      t0, 1b
+    # continue at 2
+    .endif
+
+2:
+    lw        a0, offThread_retval(rSELF)  #  a0 <- object
+    lw        a1, (offThread_retval+4)(rSELF) #  a1 <- type
+    seq       t1, a1, 'I'                  #  Is int array?
+    bnez      t1, 3f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t3, a0, GC_CARD_SHIFT
+    addu      t2, a2, t3
+    sb        a2, (t2)
+3:
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    GOTO_OPCODE(t0)                        #  execute it
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.L${opcode}_notimpl:
+    la        a0, .LstrFilledNewArrayNotImpl
+    JAL(dvmThrowInternalError)
+    b         common_exceptionThrown
+
+    /*
+     * Ideally we'd only define this once, but depending on layout we can
+     * exceed the range of the load above.
+     */
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..a546db2
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,95 @@
+%default { "isrange":"0" }
+%verify "executed"
+%verify "unimplemented array type"
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+     /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # r0<- aaaa (lo)
+    FETCH(a1, 2)                           # r1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+    GET_OPA(rOBJ)                          #  rOBJ <- AA or BA
+    EXPORT_PC()                            #  need for resolve and alloc
+    # already resolved?
+    bnez      a0, .L${opcode}_continue     #  yes, continue on
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .L${opcode}_continue
+%break
+
+    /*
+     * On entry:
+     *  a0 holds array class
+     *  rOBJ holds AA or BA
+     */
+.L${opcode}_continue:
+    LOAD_base_offClassObject_descriptor(a3, a0) #  a3 <- arrayClass->descriptor
+    li        a2, ALLOC_DONT_TRACK         #  a2 <- alloc flags
+    lbu       rINST, 1(a3)                 #  rINST <- descriptor[1]
+    FETCH(a1, 3)                           # a1<- BBBB (length)
+    seq       t0, rINST, 'I'               #  array of ints?
+    seq       t1, rINST, 'L'               #  array of objects?
+    or        t0, t1
+    seq       t1, rINST, '['               #  array of arrays?
+    or        t0, t1
+    move      rBIX, a1                     #  save length in rBIX
+    beqz      t0, .L${opcode}_notimpl      #  no, not handled yet
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(arClass, length, flags)
+    # null return?
+    beqz      v0, common_exceptionThrown   #  alloc failed, handle exception
+
+    FETCH(a1, 4)                           #  a1 CCCC
+    sw        v0, offThread_retval(rSELF)  #  retval.l <- new array
+    sw        rINST, (offThread_retval+4)(rSELF) #  retval.h <- type
+    addu      a0, v0, offArrayObject_contents #  a0 <- newArray->contents
+    subu      rBIX, rBIX, 1                #  length--, check for neg
+    FETCH_ADVANCE_INST(5)                  #  advance to next instr, load rINST
+    bltz      rBIX, 2f                     #  was zero, bail
+
+    # copy values from registers into the array
+    # a0=array, a1=CCCC, t0=BBBB(length)
+    move      t0, rBIX
+    EAS2(a2, rFP, a1)                      #  a2 <- &fp[CCCC]
+1:
+    lw        a3, 0(a2)                    #  a3 <- *a2++
+    addu      a2, 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, (a0)                     #  *contents++ = vX
+    addu      a0, 4
+    bgez      t0, 1b
+
+2:
+    lw        a0, offThread_retval(rSELF)  #  a0 <- object
+    lw        a1, (offThread_retval+4)(rSELF) #  a1 <- type
+    seq       t1, a1, 'I'                  #  Is int array?
+    bnez      t1, 3f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t3, a0, GC_CARD_SHIFT
+    addu      t2, a2, t3
+    sb        a2, (t2)
+3:
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    GOTO_OPCODE(t0)                        #  execute it
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.L${opcode}_notimpl:
+    la        a0, .LstrFilledNewArrayNotImpl
+    JAL(dvmThrowInternalError)
+    b         common_exceptionThrown
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S
new file mode 100644
index 0000000..9611796
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_FILLED_NEW_ARRAY.S" { "isrange":"1" }
diff --git a/vm/mterp/mips/OP_FILL_ARRAY_DATA.S b/vm/mterp/mips/OP_FILL_ARRAY_DATA.S
new file mode 100644
index 0000000..7a97799
--- /dev/null
+++ b/vm/mterp/mips/OP_FILL_ARRAY_DATA.S
@@ -0,0 +1,16 @@
+%verify "executed"
+    /* fill-array-data vAA, +BBBBBBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
+    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
+    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
+    EXPORT_PC()
+    JAL(dvmInterpHandleFillArrayData)      #  fill the array with predefined data
+    # 0 means an exception is thrown
+    beqz      v0, common_exceptionThrown   #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S b/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S
new file mode 100644
index 0000000..1e2120d
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"JAL(__extendsfdf2)", "instr_f":"cvt.d.s fv0, fa0"}
diff --git a/vm/mterp/mips/OP_FLOAT_TO_INT.S b/vm/mterp/mips/OP_FLOAT_TO_INT.S
new file mode 100644
index 0000000..166d685
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_INT.S
@@ -0,0 +1,63 @@
+%verify "executed"
+%include "mips/unflop.S" {"instr":"b f2i_doconv", "instr_f":"b f2i_doconv"}
+%break
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+    li        a1, 0x4f000000               #  (float)maxint
+    move      rBIX, a0
+    JAL(__gesf2)                           #  is arg >= maxint?
+    move      t0, v0
+    li        v0, ~0x80000000              #  return maxint (7fffffff)
+    bgez      t0, .L${opcode}_set_vreg
+
+    move      a0, rBIX                     #  recover arg
+    li        a1, 0xcf000000               #  (float)minint
+    JAL(__lesf2)
+
+    move      t0, v0
+    li        v0, 0x80000000               #  return minint (80000000)
+    blez      t0, .L${opcode}_set_vreg
+    move      a0, rBIX
+    move      a1, rBIX
+    JAL(__nesf2)
+
+    move      t0, v0
+    li        v0, 0                        #  return zero for NaN
+    bnez      t0, .L${opcode}_set_vreg
+
+    move      a0, rBIX
+    JAL(__fixsfsi)
+    b         .L${opcode}_set_vreg
+#else
+    l.s       fa1, .LFLOAT_TO_INT_max
+    c.ole.s   fcc0, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1t      .L${opcode}_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    c.ole.s   fcc0, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1t      .L${opcode}_set_vreg_f
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .L${opcode}_set_vreg_f
+
+    trunc.w.s  fv0, fa0
+    b         .L${opcode}_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+    .word 0x4f000000
+.LFLOAT_TO_INT_min:
+    .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+    .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+    .word 0x80000000
+
diff --git a/vm/mterp/mips/OP_FLOAT_TO_LONG.S b/vm/mterp/mips/OP_FLOAT_TO_LONG.S
new file mode 100644
index 0000000..3e76027
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_LONG.S
@@ -0,0 +1,65 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"b f2l_doconv", "instr_f":"b f2l_doconv", "st_result":"STORE64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+f2l_doconv:
+#ifdef SOFT_FLOAT
+    li        a1, 0x5f000000
+    move      rBIX, a0
+    JAL(__gesf2)
+
+    move      t0, v0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bgez      t0, .L${opcode}_set_vreg
+
+    move      a0, rBIX
+    li        a1, 0xdf000000
+    JAL(__lesf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    blez      t0, .L${opcode}_set_vreg
+
+    move      a0, rBIX
+    move      a1, rBIX
+    JAL(__nesf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bnez      t0, .L${opcode}_set_vreg
+
+    move      a0, rBIX
+    JAL(__fixsfdi)
+
+#else
+    l.s       fa1, .LLONG_TO_max
+    c.ole.s   fcc0, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1t      .L${opcode}_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1t      .L${opcode}_set_vreg
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .L${opcode}_set_vreg
+
+    JAL(__fixsfdi)
+#endif
+
+    b         .L${opcode}_set_vreg
+
+.LLONG_TO_max:
+    .word 0x5f000000
+
+.LLONG_TO_min:
+    .word 0xdf000000
diff --git a/vm/mterp/mips/OP_GOTO.S b/vm/mterp/mips/OP_GOTO.S
new file mode 100644
index 0000000..27c20e3
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO.S
@@ -0,0 +1,23 @@
+%verify "executed"
+%verify "forward and backward"
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       a1, a0, 24                   #  a1 <- ssssssAA (sign-extended)
+    addu      a2, a1, a1                   #  a2 <- byte offset
+    /* If backwards branch refresh rBASE */
+    bgez      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bltz      a1, common_testUpdateProfile #  (a0) check for trace hotness
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_GOTO_16.S b/vm/mterp/mips/OP_GOTO_16.S
new file mode 100644
index 0000000..22c29da
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO_16.S
@@ -0,0 +1,21 @@
+%verify "executed"
+%verify "forward and backward"
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S(a0, 1)                         #  a0 <- ssssAAAA (sign-extended)
+    addu      a1, a0, a0                   #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bltz      a1, common_testUpdateProfile #  (a0) hot trace head?
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_GOTO_32.S b/vm/mterp/mips/OP_GOTO_32.S
new file mode 100644
index 0000000..84598c2
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO_32.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "forward, backward, self"
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        a0, a0, a1                   #  a0 <- AAAAaaaa
+    addu      a1, a0, a0                   #  a1 <- byte offset
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgtz      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    blez      a1, common_testUpdateProfile # (a0) hot trace head?
+#else
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a0, 2f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+2:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_IF_EQ.S b/vm/mterp/mips/OP_IF_EQ.S
new file mode 100644
index 0000000..183ec1b
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_EQ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/mips/OP_IF_EQZ.S b/vm/mterp/mips/OP_IF_EQZ.S
new file mode 100644
index 0000000..5587291
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_EQZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/mips/OP_IF_GE.S b/vm/mterp/mips/OP_IF_GE.S
new file mode 100644
index 0000000..19bc86f
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/mips/OP_IF_GEZ.S b/vm/mterp/mips/OP_IF_GEZ.S
new file mode 100644
index 0000000..5d4fa0f
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/mips/OP_IF_GT.S b/vm/mterp/mips/OP_IF_GT.S
new file mode 100644
index 0000000..8335bd3
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/mips/OP_IF_GTZ.S b/vm/mterp/mips/OP_IF_GTZ.S
new file mode 100644
index 0000000..3c70c35
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/mips/OP_IF_LE.S b/vm/mterp/mips/OP_IF_LE.S
new file mode 100644
index 0000000..c1524f9
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/mips/OP_IF_LEZ.S b/vm/mterp/mips/OP_IF_LEZ.S
new file mode 100644
index 0000000..fa930aa
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/mips/OP_IF_LT.S b/vm/mterp/mips/OP_IF_LT.S
new file mode 100644
index 0000000..fbda8bc
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/mips/OP_IF_LTZ.S b/vm/mterp/mips/OP_IF_LTZ.S
new file mode 100644
index 0000000..e93dd62
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/mips/OP_IF_NE.S b/vm/mterp/mips/OP_IF_NE.S
new file mode 100644
index 0000000..c484ede
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_NE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/mips/OP_IF_NEZ.S b/vm/mterp/mips/OP_IF_NEZ.S
new file mode 100644
index 0000000..24cbb6b
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_NEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/mips/OP_IGET.S b/vm/mterp/mips/OP_IGET.S
new file mode 100644
index 0000000..ba4fada
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET.S
@@ -0,0 +1,49 @@
+%default { "load":"lw", "barrier":"     # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish
+    b         common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    $load a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    $barrier                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_BOOLEAN.S b/vm/mterp/mips/OP_IGET_BOOLEAN.S
new file mode 100644
index 0000000..4f32dbf
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..1bb6233
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_BYTE.S b/vm/mterp/mips/OP_IGET_BYTE.S
new file mode 100644
index 0000000..f699e87
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BYTE.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S b/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..a59ee92
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_CHAR.S b/vm/mterp/mips/OP_IGET_CHAR.S
new file mode 100644
index 0000000..cb3a03b
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_CHAR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S b/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..408daca
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_JUMBO.S b/vm/mterp/mips/OP_IGET_JUMBO.S
new file mode 100644
index 0000000..49920b9
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_JUMBO.S
@@ -0,0 +1,55 @@
+%default { "load":"lw", "barrier":"     # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .L${opcode}_resolved        # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    $load a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    $barrier                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_OBJECT.S b/vm/mterp/mips/OP_IGET_OBJECT.S
new file mode 100644
index 0000000..4f32dbf
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S b/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..1bb6233
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S b/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S
new file mode 100644
index 0000000..e4f7d00
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_QUICK.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S
new file mode 100644
index 0000000..30c6774
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..00bab92
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_QUICK.S b/vm/mterp/mips/OP_IGET_QUICK.S
new file mode 100644
index 0000000..4490796
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    /* For: iget-quick, iget-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1 #
+    lw        a0, 0(t0)                    #  a0 <- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_SHORT.S b/vm/mterp/mips/OP_IGET_SHORT.S
new file mode 100644
index 0000000..f699e87
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_SHORT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S b/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..a59ee92
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_VOLATILE.S b/vm/mterp/mips/OP_IGET_VOLATILE.S
new file mode 100644
index 0000000..30c6774
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..ed06737
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_WIDE.S b/vm/mterp/mips/OP_IGET_WIDE.S
new file mode 100644
index 0000000..2cdf80c
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE.S
@@ -0,0 +1,49 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Wide 32-bit instance field get.
+     */
+    # iget-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test return code
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish
+    b         common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if $volatile
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[A]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S b/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..97819a7
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S
@@ -0,0 +1,57 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll    a2,a2,16
+    or     a1, a1, a2                      # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[CCCC], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .L${opcode}_resolved           # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+    # test return code
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish
+    b         common_exceptionThrown
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if $volatile
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[BBBB] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_QUICK.S b/vm/mterp/mips/OP_IGET_WIDE_QUICK.S
new file mode 100644
index 0000000..f4d8fdb
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    # iget-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1                   #  t0 <- a3 + a1
+    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S
new file mode 100644
index 0000000..1804fb1
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..801aa84
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_INSTANCE_OF.S b/vm/mterp/mips/OP_INSTANCE_OF.S
new file mode 100644
index 0000000..f296d44
--- /dev/null
+++ b/vm/mterp/mips/OP_INSTANCE_OF.S
@@ -0,0 +1,82 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    # instance-of vA, vB, class            /* CCCC */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB (object)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- pDvmDex
+    # is object null?
+    beqz      a0, .L${opcode}_store        #  null obj, not an instance, store a0
+    FETCH(a3, 1)                           #  a3 <- CCCC
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- pDvmDex->pResClasses
+    LOAD_eas2(a1, a2, a3)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .L${opcode}_resolve      #  not resolved, do it now
+.L${opcode}_resolved:                   #  a0=obj->clazz, a1=resolved class
+    # same class (trivial success)?
+    beq       a0, a1, .L${opcode}_trivial  #  yes, trivial finish
+    b         .L${opcode}_fullcheck        #  no, do full check
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  rOBJ holds A
+     */
+.L${opcode}_trivial:
+    li        a0, 1                        #  indicate success
+    # fall thru
+    /*
+     * a0   holds boolean result
+     * rOBJ holds A
+     */
+.L${opcode}_store:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG(a0, rOBJ)                     #  vA <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+%break
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0   holds obj->clazz
+     *  a1   holds class resolved from BBBB
+     *  rOBJ holds A
+     */
+.L${opcode}_fullcheck:
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    move      a0, v0                       #  fall through to ${opcode}_store
+    b         .L${opcode}_store
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a3   holds BBBB
+     *  rOBJ holds A
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    move      a1, a3                       #  a1 <- BBBB
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB (object)
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    b         .L${opcode}_resolved         #  pick up where we left off
+
diff --git a/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S b/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S
new file mode 100644
index 0000000..c55a30c
--- /dev/null
+++ b/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S
@@ -0,0 +1,96 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(a3, 4)                           # a3<- vCCCC
+    FETCH(rOBJ, 3)                         # rOBJ<- vBBBB
+    GET_VREG(a0, a3)                       #  a0 <- vCCCC (object)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- pDvmDex
+    # is object null?
+    beqz      a0, .L${opcode}_store        #  null obj, not an instance, store a0
+    FETCH(a1, 1)                           # r1<- aaaa (lo)
+    FETCH(a3, 2)                           # r3<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- pDvmDex->pResClasses
+    sll     a3,a3,16
+    or      a3, a1, a3                     # a3<- AAAAaaaa
+
+    LOAD_eas2(a1, a2, a3)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .L${opcode}_resolve      #  not resolved, do it now
+    b       .L${opcode}_resolved           # resolved, continue
+
+%break
+
+     /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.L${opcode}_resolved:                   #  a0=obj->clazz, a1=resolved class
+    # same class (trivial success)?
+    beq       a0, a1, .L${opcode}_trivial  #  yes, trivial finish
+    # fall through to ${opcode}_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from AAAAAAAA
+     *  rOBJ holds BBBB
+     */
+.L${opcode}_fullcheck:
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    move      a0, v0
+    b         .L${opcode}_store            #  go to ${opcode}_store
+
+.L${opcode}_trivial:
+    li        a0, 1                        #  indicate success
+    # fall thru
+    /*
+     * a0   holds boolean result
+     * rOBJ holds BBBB
+     */
+.L${opcode}_store:
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, rOBJ)                     #  vBBBB <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a3   holds AAAAAAAA
+     *  rOBJ holds BBBB
+     */
+.L${opcode}_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    move      a1, a3                       #  a1 <- AAAAAAAA
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    FETCH(ra, 4)                           #  a3<- vCCCC
+    move       a1, a0                       #  a1<- class resolved from AAAAAAAA
+
+    GET_VREG(a0, a3)                       #  a0 <- vCCCC (object)
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    b         .L${opcode}_resolved         #  pick up where we left off
+
diff --git a/vm/mterp/mips/OP_INT_TO_BYTE.S b/vm/mterp/mips/OP_INT_TO_BYTE.S
new file mode 100644
index 0000000..e9edb97
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
diff --git a/vm/mterp/mips/OP_INT_TO_CHAR.S b/vm/mterp/mips/OP_INT_TO_CHAR.S
new file mode 100644
index 0000000..5da74da
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/vm/mterp/mips/OP_INT_TO_DOUBLE.S b/vm/mterp/mips/OP_INT_TO_DOUBLE.S
new file mode 100644
index 0000000..5ee4813
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"JAL(__floatsidf)", "instr_f":"cvt.d.w fv0, fa0"}
diff --git a/vm/mterp/mips/OP_INT_TO_FLOAT.S b/vm/mterp/mips/OP_INT_TO_FLOAT.S
new file mode 100644
index 0000000..9cf7c48
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflop.S" {"instr":"JAL(__floatsisf)", "instr_f":"cvt.s.w fv0, fa0"}
diff --git a/vm/mterp/mips/OP_INT_TO_LONG.S b/vm/mterp/mips/OP_INT_TO_LONG.S
new file mode 100644
index 0000000..5691ea5
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/vm/mterp/mips/OP_INT_TO_SHORT.S b/vm/mterp/mips/OP_INT_TO_SHORT.S
new file mode 100644
index 0000000..d1fc349
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT.S b/vm/mterp/mips/OP_INVOKE_DIRECT.S
new file mode 100644
index 0000000..9bbf334
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT.S
@@ -0,0 +1,42 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+    .if (!$isrange)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    # already resolved?
+    bnez      a0, 1f                       #  resolved, call the function
+
+    lw        a3, offThread_method(rSELF)  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_DIRECT            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+
+1:
+    bnez      rOBJ, common_invokeMethod${routine} #  a0=method, rOBJ="this"
+    b         common_errNullObject         #  yes, throw exception
+
+
+
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S b/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S
new file mode 100644
index 0000000..afe70b7
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S
@@ -0,0 +1,43 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+     /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # a1<- AAAAaaaa
+    FETCH(rBIX, 4)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+    .if (!$isrange)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    # already resolved?
+    bnez      a0, 1f                       #  resolved, call the function
+
+    lw        a3, offThread_method(rSELF)  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_DIRECT            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+
+1:
+    bnez      rOBJ, common_invokeMethodJumbo #  a0=method, rOBJ="this"
+    b         common_errNullObject         #  yes, throw exception
+
+
+
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S b/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S
new file mode 100644
index 0000000..ef88011
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_DIRECT.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE.S b/vm/mterp/mips/OP_INVOKE_INTERFACE.S
new file mode 100644
index 0000000..0924093
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE.S
@@ -0,0 +1,28 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(a2, 2)                           #  a2 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!$isrange)
+    and       a2, 15                       #  a2 <- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- first arg ("this")
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- methodClassDex
+    LOAD_rSELF_method(a2)                  #  a2 <- method
+    # null obj?
+    beqz      rOBJ, common_errNullObject   #  yes, fail
+    LOAD_base_offObject_clazz(a0, rOBJ)      #  a0 <- thisPtr->clazz
+    JAL(dvmFindInterfaceMethodInCache)     #  v0 <- call(class, ref, method, dex)
+    move      a0, v0
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         common_invokeMethod${routine} #  (a0=method, rOBJ="this")
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S b/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S
new file mode 100644
index 0000000..b055d69
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S
@@ -0,0 +1,25 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(a2, 4)                           # a2<- CCCC
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    EXPORT_PC()                            #  must export for invoke
+    sll       a1,a1,16
+    or        a1, a0, a1                   #  a1<- AAAAaaaa
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- first arg ("this")
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- methodClassDex
+    LOAD_rSELF_method(a2)                  #  a2 <- method
+    # null obj?
+    beqz      rOBJ, common_errNullObject   #  yes, fail
+    LOAD_base_offObject_clazz(a0, rOBJ)      #  a0 <- thisPtr->clazz
+    JAL(dvmFindInterfaceMethodInCache)     #  v0 <- call(class, ref, method, dex)
+    move      a0, v0
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         common_invokeMethodJumbo #  (a0=method, rOBJ="this")
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S b/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S
new file mode 100644
index 0000000..6257c8a
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_INTERFACE.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S
new file mode 100644
index 0000000..bd7c46d
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_OBJECT_INIT_RANGE.S" {"jumbo":"1", "cccc":"4"}
diff --git a/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S
new file mode 100644
index 0000000..df0d6c9
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -0,0 +1,48 @@
+%default { "jumbo":"0", "cccc":"2" }
+%verify "executed"
+%verify "finalizable class"
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it unless a debugger is active.
+     */
+    FETCH(a1, ${cccc})                  # a1<- CCCC
+    GET_VREG(a0, a1)                    # a0<- "this" ptr
+    # check for NULL
+    beqz    a0, common_errNullObject    # export PC and throw NPE
+    LOAD_base_offObject_clazz(a1, a0)   # a1<- obj->clazz
+    LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+    and     a2, CLASS_ISFINALIZABLE     # is this class finalizable?
+    beqz    a2, .L${opcode}_finish      # no, go
+
+.L${opcode}_setFinal:
+    EXPORT_PC()                         # can throw
+    JAL(dvmSetFinalizable)              # call dvmSetFinalizable(obj)
+    LOAD_offThread_exception(a0, rSELF)	# a0<- self->exception
+    # exception pending?
+    bnez    a0, common_exceptionThrown  # yes, handle it
+
+.L${opcode}_finish:
+    lhu     a1, offThread_subMode(rSELF)
+    and     a1, kSubModeDebuggerActive  # debugger active?
+    bnez    a1, .L${opcode}_debugger    # Yes - skip optimization
+    FETCH_ADVANCE_INST(${cccc}+1)       # advance to next instr, load rINST
+    GET_INST_OPCODE(t0)                 # t0<- opcode from rINST
+    GOTO_OPCODE(t0)                     # execute it
+
+%break
+    /*
+     * A debugger is attached, so we need to go ahead and do
+     * this.  For simplicity, we'll just jump directly to the
+     * corresponding handler.  Note that we can't use
+     * rIBASE here because it may be in single-step mode.
+     * Load the primary table base directly.
+     */
+.L${opcode}_debugger:
+    lw      a1, offThread_mainHandlerTable(rSELF)
+    .if $jumbo
+    li      t0, OP_INVOKE_DIRECT_JUMBO
+    .else
+    li      t0, OP_INVOKE_DIRECT_RANGE
+    .endif
+    GOTO_OPCODE_BASE(a1, t0)            # execute it
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC.S b/vm/mterp/mips/OP_INVOKE_STATIC.S
new file mode 100644
index 0000000..ba2d7cc
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC.S
@@ -0,0 +1,54 @@
+%default { "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    li      rOBJ, 0                        #  null "this" in delay slot
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX<- &resolved_metherToCall
+#endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, common_invokeMethod${routine} #  yes, continue on
+    b         .L${opcode}_resolve
+%break
+
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_STATIC            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we're actively building a trace.  If so,
+     * we need to keep this instruction out of it.
+     * rBIX: &resolved_methodToCall
+     */
+    lhu       a2, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  null, handle exception
+    and       a2, kSubModeJitTraceBuild    #  trace under construction?
+    beqz      a2, common_invokeMethod${routine} #  no, (a0=method, rOBJ="this")
+    lw        a1, 0(rBIX)                  #  reload resolved method
+    # finished resloving?
+    bnez      a1, common_invokeMethod${routine} #  yes, (a0=method, rOBJ="this")
+    move      rBIX, a0                     #  preserve method
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    move      a0, rBIX
+    b         common_invokeMethod${routine} #  whew, finally!
+#else
+    # got null?
+    bnez      v0, common_invokeMethod${routine} #  (a0=method, rOBJ="this")
+    b         common_exceptionThrown       #  yes, handle exception
+#endif
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S b/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S
new file mode 100644
index 0000000..80576a2
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S
@@ -0,0 +1,53 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a static method call.
+     */
+     /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # r1<- AAAAaaaa
+    li      rOBJ, 0                       #  null "this" in delay slot
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX<- &resolved_metherToCall
+#endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, common_invokeMethodJumboNoThis #  (a0 = method)
+    b         .L${opcode}_resolve
+%break
+
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_STATIC            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we're actively building a trace.  If so,
+     * we need to keep this instruction out of it.
+     * rBIX: &resolved_methodToCall
+     */
+    lhu        a2, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  null, handle exception
+    and       a2, kSubModeJitTraceBuild    #  trace under construction?
+    beqz      a2, common_invokeMethodJumboNoThis #  no, (a0=method, rOBJ="this")
+    lw        a1, 0(rBIX)                  #  reload resolved method
+    # finished resloving?
+    bnez      a1, common_invokeMethodJumboNoThis #  yes, (a0=method, rOBJ="this")
+    move      rBIX, a0                     #  preserve method
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    move      a0, rBIX
+    b         common_invokeMethodJumboNoThis #  whew, finally!
+#else
+    # got null?
+    bnez      v0, common_invokeMethodJumboNoThis #  (a0=method, rOBJ="this")
+    b         common_exceptionThrown       #  yes, handle exception
+#endif
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S b/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S
new file mode 100644
index 0000000..9b45216
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_STATIC.S" { "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER.S b/vm/mterp/mips/OP_INVOKE_SUPER.S
new file mode 100644
index 0000000..6b44380
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER.S
@@ -0,0 +1,60 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    .if (!$isrange)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this" ptr
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    # null "this"?
+    LOAD_rSELF_method(t1)                  #  t1 <- current method
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    # cmp a0, 0; already resolved?
+    LOAD_base_offMethod_clazz(rBIX, t1)    #  rBIX <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    bnez      a0, .L${opcode}_continue     #  resolved, continue on
+
+    move      a0, rBIX                     #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .L${opcode}_continue
+%break
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX = method->clazz
+     */
+.L${opcode}_continue:
+    LOAD_base_offClassObject_super(a1, rBIX) #  a1 <- method->clazz->super
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    LOAD_base_offClassObject_vtableCount(a3, a1) #  a3 <- super->vtableCount
+    EXPORT_PC()                            #  must export for invoke
+    # compare (methodIndex, vtableCount)
+    bgeu      a2, a3, .L${opcode}_nsm      #  method not present in superclass
+    LOAD_base_offClassObject_vtable(a1, a1) #  a1 <- ...clazz->super->vtable
+    LOAD_eas2(a0, a1, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethod${routine} #  continue on
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  a0 = resolved base method
+     */
+.L${opcode}_nsm:
+    LOAD_base_offMethod_name(a1, a0)       #  a1 <- method name
+    b         common_errNoSuchMethod
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S b/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S
new file mode 100644
index 0000000..5794cb1
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S
@@ -0,0 +1,56 @@
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle a "super" method call.
+    */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(t0, 4)                           # t0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this" ptr
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    # null "this"?
+    LOAD_rSELF_method(t1)                  #  t1 <- current method
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    # cmp a0, 0; already resolved?
+    LOAD_base_offMethod_clazz(rBIX, t1)    #  rBIX <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    bnez      a0, .L${opcode}_continue     #  resolved, continue on
+
+    move      a0, rBIX                     #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .L${opcode}_continue
+%break
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX = method->clazz
+     */
+.L${opcode}_continue:
+    LOAD_base_offClassObject_super(a1, rBIX) #  a1 <- method->clazz->super
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    LOAD_base_offClassObject_vtableCount(a3, a1) #  a3 <- super->vtableCount
+    EXPORT_PC()                            #  must export for invoke
+    # compare (methodIndex, vtableCount)
+    bgeu      a2, a3, .L${opcode}_nsm      #  method not present in superclass
+    LOAD_base_offClassObject_vtable(a1, a1) #  a1 <- ...clazz->super->vtable
+    LOAD_eas2(a0, a1, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodJumbo     #  a0=method rOBJ="this"
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  a0 = resolved base method
+     */
+.L${opcode}_nsm:
+    LOAD_base_offMethod_name(a1, a0)       #  a1 <- method name
+    b         common_errNoSuchMethod
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S
new file mode 100644
index 0000000..eb5465a
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S
@@ -0,0 +1,26 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    .if (!$isrange)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offMethod_clazz(a2, a2)      #  a2 <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    LOAD_base_offClassObject_super(a2, a2) #  a2 <- method->clazz->super
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this"
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- ...clazz->super->vtable
+    # is "this" null ?
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- super->vtable[BBBB]
+    beqz      rOBJ, common_errNullObject   #  "this" is null, throw exception
+    b         common_invokeMethod${routine} #  (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S
new file mode 100644
index 0000000..ade7bba
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_SUPER_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S b/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S
new file mode 100644
index 0000000..7821d31
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_SUPER.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL.S
new file mode 100644
index 0000000..9f6d2c3
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL.S
@@ -0,0 +1,48 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    .if (!$isrange)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, .L${opcode}_continue     #  yes, continue on
+
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    bnez      v0, .L${opcode}_continue     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+%break
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.L${opcode}_continue:
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a3, rOBJ)    #  a3 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a3, a3) #  a3 <- thisPtr->clazz->vtable
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethod${routine} #  (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S
new file mode 100644
index 0000000..6bcde34
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S
@@ -0,0 +1,44 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+    /*
+     * Handle a virtual method call.
+     */
+     /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, .L${opcode}_continue     #  yes, continue on
+
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    bnez      v0, .L${opcode}_continue     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+%break
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.L${opcode}_continue:
+    FETCH(rBIX,4)                          #  rBIX <- CCCC
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a3, rOBJ)    #  a3 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a3, a3) #  a3 <- thisPtr->clazz->vtable
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodJumbo     #  (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S
new file mode 100644
index 0000000..1952b70
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S
@@ -0,0 +1,23 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "null object"
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(a3, 2)                           #  a3 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!$isrange)
+    and       a3, a3, 15                   #  a3 <- C (or stays CCCC)
+    .endif
+    GET_VREG(rOBJ, a3)                     #  rOBJ <- vC ("this" ptr)
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a2, rOBJ)    #  a2 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- thisPtr->clazz->vtable
+    EXPORT_PC()                            #  invoke must export
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- vtable[BBBB]
+    b         common_invokeMethod${routine} #  (a0=method, r9="this")
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
new file mode 100644
index 0000000..8048895
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_VIRTUAL_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S
new file mode 100644
index 0000000..5f86b4b
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_VIRTUAL.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_IPUT.S b/vm/mterp/mips/OP_IPUT.S
new file mode 100644
index 0000000..626cc92
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT.S
@@ -0,0 +1,50 @@
+%default { "store":"sw","postbarrier":"    #  noop", "prebarrier":"    #  noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish       #  yes, finish up
+    b         common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+    $prebarrier                            #  releasing store
+    $store a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    $postbarrier
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_BOOLEAN.S b/vm/mterp/mips/OP_IPUT_BOOLEAN.S
new file mode 100644
index 0000000..4f09dab
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..8457c29
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_BYTE.S b/vm/mterp/mips/OP_IPUT_BYTE.S
new file mode 100644
index 0000000..4f09dab
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S b/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..8457c29
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_CHAR.S b/vm/mterp/mips/OP_IPUT_CHAR.S
new file mode 100644
index 0000000..4f09dab
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S b/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..8457c29
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_JUMBO.S b/vm/mterp/mips/OP_IPUT_JUMBO.S
new file mode 100644
index 0000000..2d05984
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_JUMBO.S
@@ -0,0 +1,58 @@
+%default { "store":"sw","postbarrier":"# noop ", "prebarrier":"    #  noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .L${opcode}_resolved           # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to ${opcode}_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+    $prebarrier                            #  releasing store
+    $store a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    $postbarrier
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT.S b/vm/mterp/mips/OP_IPUT_OBJECT.S
new file mode 100644
index 0000000..0382fa8
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT.S
@@ -0,0 +1,56 @@
+%default { "store":"sw", "postbarrier":"    #  noop", "prebarrier":"    #  noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * 32-bit instance field put.
+     *
+     * for: iput-object, iput-object-volatile
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish       #  yes, finish up
+    b         common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+    $prebarrier                            #  releasing store
+    $store a0, (t2)                        #  obj.field (32 bits) <- a0
+    $postbarrier
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S b/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..ce82ff8
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S
@@ -0,0 +1,60 @@
+%default { "store":"sw", "postbarrier":"    #  noop", "prebarrier":"    #  noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll      a1,a1,16
+    or       a1, a1, a2                    # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b         .L${opcode}_resolved
+
+%break
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to ${opcode}_finish
+
+.L${opcode}_finish:
+    #BAL(common_squeak${sqnum})
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                              # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+    $prebarrier                            #  releasing store
+    $store a0, (t2)                        #  obj.field (32 bits) <- a0
+    $postbarrier
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S b/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S
new file mode 100644
index 0000000..eb0afb4
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S
@@ -0,0 +1,21 @@
+%verify "executed"
+%verify "null object"
+    /* For: iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sw        a0, 0(t0)                    #  obj.field (always 32 bits) <- a0
+    beqz      a0, 1f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t1, a3, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, 0(t2)
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S
new file mode 100644
index 0000000..8320a7d
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_OBJECT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..48cdb6c
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_OBJECT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_QUICK.S b/vm/mterp/mips/OP_IPUT_QUICK.S
new file mode 100644
index 0000000..8976265
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_QUICK.S
@@ -0,0 +1,16 @@
+%verify "executed"
+%verify "null object"
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sw        a0, 0(t0)                    #  obj.field (always 32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_SHORT.S b/vm/mterp/mips/OP_IPUT_SHORT.S
new file mode 100644
index 0000000..4f09dab
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S b/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..8457c29
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_VOLATILE.S b/vm/mterp/mips/OP_IPUT_VOLATILE.S
new file mode 100644
index 0000000..4cb365f
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..aaf70b7
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_WIDE.S b/vm/mterp/mips/OP_IPUT_WIDE.S
new file mode 100644
index 0000000..b8d9690
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE.S
@@ -0,0 +1,48 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    # iput-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .L${opcode}_finish       #  yes, finish up
+    b         common_exceptionThrown
+%break
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    GET_OPA4(a2)                           #  a2 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[A]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if $volatile
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S b/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..8edc142
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S
@@ -0,0 +1,55 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   # a1<- AAAAaaaa
+
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .L${opcode}_resolved           # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to ${opcode}_finish
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.L${opcode}_finish:
+    FETCH(a2, 3)                           # a1<- BBBB
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[BBBB]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if $volatile
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S b/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S
new file mode 100644
index 0000000..f86c403
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+    # iput-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPA4(a0)                           #  a0 <- A(+)
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
+    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
+    # check object for null
+    beqz      a2, common_errNullObject     #  object was null
+    FETCH(a3, 1)                           #  a3 <- field byte offset
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S
new file mode 100644
index 0000000..784be66
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..72436fa
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_LONG_TO_DOUBLE.S b/vm/mterp/mips/OP_LONG_TO_DOUBLE.S
new file mode 100644
index 0000000..fad9ec0
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/vm/mterp/mips/OP_LONG_TO_FLOAT.S b/vm/mterp/mips/OP_LONG_TO_FLOAT.S
new file mode 100644
index 0000000..86a143a
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "instr_f":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/vm/mterp/mips/OP_LONG_TO_INT.S b/vm/mterp/mips/OP_LONG_TO_INT.S
new file mode 100644
index 0000000..fe8f865
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_INT.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifdef HAVE_BIG_ENDIAN
+    addu      a1, a1, 1
+#endif
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
diff --git a/vm/mterp/mips/OP_MONITOR_ENTER.S b/vm/mterp/mips/OP_MONITOR_ENTER.S
new file mode 100644
index 0000000..1f5541e
--- /dev/null
+++ b/vm/mterp/mips/OP_MONITOR_ENTER.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "exception for null object"
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a1, a2)                       #  a1 <- vAA (object)
+    move      a0, rSELF                    #  a0 <- self
+    EXPORT_PC()                            #  export PC so we can grab stack trace
+    # null object?
+    beqz      a1, common_errNullObject     #  null object, throw an exception
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(dvmLockObject)                     #  call(self, obj)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MONITOR_EXIT.S b/vm/mterp/mips/OP_MONITOR_EXIT.S
new file mode 100644
index 0000000..fc671cb
--- /dev/null
+++ b/vm/mterp/mips/OP_MONITOR_EXIT.S
@@ -0,0 +1,26 @@
+%verify "executed"
+%verify "exception for null object (impossible in javac)"
+%verify "dvmUnlockObject fails"
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    EXPORT_PC()                            #  before fetch: export the PC
+    GET_VREG(a1, a2)                       #  a1 <- vAA (object)
+    # null object?
+    beqz      a1, 1f
+    move      a0, rSELF                    #  a0 <- self
+    JAL(dvmUnlockObject)                   #  v0 <- success for unlock(self, obj)
+    # failed?
+    FETCH_ADVANCE_INST(1)                  #  before throw: advance rPC, load rINST
+    beqz      v0, common_exceptionThrown   #  yes, exception is pending
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+1:
+    FETCH_ADVANCE_INST(1)                  #  before throw: advance rPC, load rINST
+    b         common_errNullObject
diff --git a/vm/mterp/mips/OP_MOVE.S b/vm/mterp/mips/OP_MOVE.S
new file mode 100644
index 0000000..dbf7ea4
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
+
diff --git a/vm/mterp/mips/OP_MOVE_16.S b/vm/mterp/mips/OP_MOVE_16.S
new file mode 100644
index 0000000..8410b93
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_16.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2 and jump
+
diff --git a/vm/mterp/mips/OP_MOVE_EXCEPTION.S b/vm/mterp/mips/OP_MOVE_EXCEPTION.S
new file mode 100644
index 0000000..1040155
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_EXCEPTION.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* move-exception vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    LOAD_offThread_exception(a3, rSELF)    #  a3 <- dvmGetException bypass
+    li        a1, 0                        #  a1 <- 0
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG(a3, a2)                       #  fp[AA] <- exception obj
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE_offThread_exception(a1, rSELF)   #  dvmClearException bypass
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_FROM16.S b/vm/mterp/mips/OP_MOVE_FROM16.S
new file mode 100644
index 0000000..d018140
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_FROM16.S
@@ -0,0 +1,10 @@
+%verify "executed"
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
+
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT.S b/vm/mterp/mips/OP_MOVE_OBJECT.S
new file mode 100644
index 0000000..7150ed5
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE.S"
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT_16.S b/vm/mterp/mips/OP_MOVE_OBJECT_16.S
new file mode 100644
index 0000000..c3dfae0
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT_16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_16.S"
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S b/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S
new file mode 100644
index 0000000..1ec1ae9
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_FROM16.S"
diff --git a/vm/mterp/mips/OP_MOVE_RESULT.S b/vm/mterp/mips/OP_MOVE_RESULT.S
new file mode 100644
index 0000000..05f40fa
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT.S
@@ -0,0 +1,9 @@
+%verify "executed"
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_rSELF_retval(a0)                  #  a0 <- self->retval.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
+
diff --git a/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S b/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S
new file mode 100644
index 0000000..74aa091
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_RESULT.S"
diff --git a/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S b/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S
new file mode 100644
index 0000000..8a548d1
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S
@@ -0,0 +1,11 @@
+%verify "executed"
+    /* move-result-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    addu      a3, rSELF, offThread_retval  #  a3 <- &self->retval
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE.S b/vm/mterp/mips/OP_MOVE_WIDE.S
new file mode 100644
index 0000000..7470061
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE_16.S b/vm/mterp/mips/OP_MOVE_WIDE_16.S
new file mode 100644
index 0000000..bdd9f26
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE_16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 2)                           #  a3 <- BBBB
+    FETCH(a2, 1)                           #  a2 <- AAAA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AAAA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AAAA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S b/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S
new file mode 100644
index 0000000..44251f4
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 1)                           #  a3 <- BBBB
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MUL_DOUBLE.S b/vm/mterp/mips/OP_MUL_DOUBLE.S
new file mode 100644
index 0000000..565ca57
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__muldf3)", "instr_f":"mul.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..8d1dac1
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__muldf3)", "instr_f":"mul.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_FLOAT.S b/vm/mterp/mips/OP_MUL_FLOAT.S
new file mode 100644
index 0000000..af9bb3b
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__mulsf3)", "instr_f":"mul.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S b/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S
new file mode 100644
index 0000000..726e8a4
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__mulsf3)", "instr_f":"mul.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_INT.S b/vm/mterp/mips/OP_MUL_INT.S
new file mode 100644
index 0000000..d9d6d2a
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_2ADDR.S b/vm/mterp/mips/OP_MUL_INT_2ADDR.S
new file mode 100644
index 0000000..bbf4d77
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_LIT16.S b/vm/mterp/mips/OP_MUL_INT_LIT16.S
new file mode 100644
index 0000000..654e76d
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_LIT8.S b/vm/mterp/mips/OP_MUL_INT_LIT8.S
new file mode 100644
index 0000000..c0278ae
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_LONG.S b/vm/mterp/mips/OP_MUL_LONG.S
new file mode 100644
index 0000000..c16a230
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_LONG.S
@@ -0,0 +1,41 @@
+%verify "executed"
+    /*
+     * Signed 64-bit integer multiply.
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       t0, a0, 255                  #  a2 <- BB
+    srl       t1, a0, 8                    #  a3 <- CC
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
+    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
+
+    mul       v1, a3, a0                   #  v1= a3a0
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+    mul       t0, a2, a1                   #  t0= a2a1
+    addu      v1, v1, t1                   #  v1+= hi(a2a0)
+    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
+
+    GET_OPA(a0)                            #  a0 <- AA
+    EAS2(a0, rFP, a0)                      #  a0 <- &fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    b         .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, a0)                    #  vAA::vAA+1 <- v0(low) :: v1(high)
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MUL_LONG_2ADDR.S b/vm/mterp/mips/OP_MUL_LONG_2ADDR.S
new file mode 100644
index 0000000..85de7be
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+    /*
+     * See comments in OP_MUL_LONG.S
+     */
+    /* mul-long/2addr vA, vB */
+    GET_OPA4(t0)                           #  t0 <- A+
+
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  vAA.low / high
+
+    GET_OPB(t1)                            #  t1 <- B
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
+    LOAD64(a2, a3, t1)                     #  vBB.low / high
+
+    mul       v1, a3, a0                   #  v1= a3a0
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+    mul       t2, a2, a1                   #  t2= a2a1
+    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
+    addu      v1, v1, t2                   #  v1= v1 + a2a1;
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    # vAA <- v0 (low)
+    STORE64(v0, v1, t0)                    #  vAA+1 <- v1 (high)
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_NEG_DOUBLE.S b/vm/mterp/mips/OP_NEG_DOUBLE.S
new file mode 100644
index 0000000..5707c65
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/vm/mterp/mips/OP_NEG_FLOAT.S b/vm/mterp/mips/OP_NEG_FLOAT.S
new file mode 100644
index 0000000..7e25e55
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/vm/mterp/mips/OP_NEG_INT.S b/vm/mterp/mips/OP_NEG_INT.S
new file mode 100644
index 0000000..da87a6a
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/vm/mterp/mips/OP_NEG_LONG.S b/vm/mterp/mips/OP_NEG_LONG.S
new file mode 100644
index 0000000..a562987
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_LONG.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
+
diff --git a/vm/mterp/mips/OP_NEW_ARRAY.S b/vm/mterp/mips/OP_NEW_ARRAY.S
new file mode 100644
index 0000000..5d01794
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_ARRAY.S
@@ -0,0 +1,61 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    FETCH(a2, 1)                           #  a2 <- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    GET_VREG(a1, a0)                       #  a1 <- vB (array length)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- resolved class
+    # check length
+    bltz      a1, common_errNegativeArraySize #  negative length, bail - len in a1
+    EXPORT_PC()                            #  req'd for resolve, alloc
+    # already resolved?
+    beqz      a0, .L${opcode}_resolve
+
+    /*
+     * Finish allocation.
+     *
+     *  a0 holds class
+     *  a1 holds array length
+     */
+.L${opcode}_finish:
+    li        a2, ALLOC_DONT_TRACK         #  don't track in local refs table
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(clazz, length, flags)
+    GET_OPA4(a2)                           #  a2 <- A+
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a2)                       #  vA <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+%break
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  a1 holds array length
+     *  a2 holds class ref CCCC
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      rOBJ, a1                     #  rOBJ <- length (save)
+    move      a1, a2                       #  a1 <- CCCC
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a1, rOBJ                     #  a1 <- length (restore)
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a0, v0
+    b         .L${opcode}_finish           #  continue with ${opcode}_finish
+
+
diff --git a/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S b/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S
new file mode 100644
index 0000000..6761505
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,69 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(a2, 1)                           # a2<- aaaa (lo)
+    FETCH(a3, 2)                           # a3<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- vCCCC
+    sll     a3,a3,16                       #
+    or      a2, a2, a3                     # a2<- AAAAaaaa
+
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    GET_VREG(a1, a0)                       #  a1 <- vCCCC (array length)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- resolved class
+    # check length
+    bltz      a1, common_errNegativeArraySize #  negative length, bail - len in a1
+    EXPORT_PC()                            #  req'd for resolve, alloc
+    # already resolved?
+    beqz      a0, .L${opcode}_resolve      #  not resolved,
+    b         .L${opcode}_finish
+%break
+
+    /*
+     * Finish allocation.
+     *
+     *  a0 holds class
+     *  a1 holds array length
+     */
+.L${opcode}_finish:
+    li        a2, ALLOC_DONT_TRACK         #  don't track in local refs table
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(clazz, length, flags)
+    FETCH(a2, 3)                           # r2<- vBBBB
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a2)                       #  vBBBB <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#%break
+
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  a1 holds array length
+     *  a2 holds class ref AAAAAAAA
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      rOBJ, a1                     #  rOBJ <- length (save)
+    move      a1, a2                       #  a1 <- AAAAAAAA
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a1, rOBJ                     #  a1 <- length (restore)
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a0, v0
+    b         .L${opcode}_finish           #  continue with to ${opcode}_finish
+
+
diff --git a/vm/mterp/mips/OP_NEW_INSTANCE.S b/vm/mterp/mips/OP_NEW_INSTANCE.S
new file mode 100644
index 0000000..ca946ad
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_INSTANCE.S
@@ -0,0 +1,106 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+    /*
+     * Create a new instance of a class.
+     */
+    # new-instance vAA, class              /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX <- &resolved_class
+#endif
+    EXPORT_PC()                            #  req'd for init, resolve, alloc
+    # already resolved?
+    beqz      a0, .L${opcode}_resolve      #  no, resolve it now
+.L${opcode}_resolved:                      #  a0=class
+    lbu       a1, offClassObject_status(a0) #  a1 <- ClassStatus enum
+    # has class been initialized?
+    li        t0, CLASS_INITIALIZED
+    move      rOBJ, a0                     #  save a0
+    bne       a1, t0, .L${opcode}_needinit #  no, init class now
+
+.L${opcode}_initialized:                   #  a0=class
+    LOAD_base_offClassObject_accessFlags(a3, a0) #  a3 <- clazz->accessFlags
+    li        a1, ALLOC_DONT_TRACK         #  flags for alloc call
+    # a0=class
+    JAL(dvmAllocObject)                    #  v0 <- new object
+    GET_OPA(a3)                            #  a3 <- AA
+#if defined(WITH_JIT)
+    /*
+     * The JIT needs the class to be fully resolved before it can
+     * include this instruction in a trace.
+     */
+    lhu       a1, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    and       a1, kSubModeJitTraceBuild    #  under construction?
+    bnez      a1, .L${opcode}_jitCheck
+#else
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+#endif
+    b         .L${opcode}_continue
+
+%break
+
+.L${opcode}_continue:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a3)                       #  vAA <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we need to stop the trace building early.
+     * v0: new object
+     * a3: vAA
+     */
+.L${opcode}_jitCheck:
+    lw        a1, 0(rBIX)                  #  reload resolved class
+    # okay?
+    bnez      a1, .L${opcode}_continue     #  yes, finish
+    move      rOBJ, v0                     #  preserve new object
+    move      rBIX, a3                     #  preserve vAA
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(rOBJ, rBIX)                   #  vAA <- new object
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+    /*
+     * Class initialization required.
+     *
+     *  a0 holds class object
+     */
+.L${opcode}_needinit:
+    JAL(dvmInitClass)                      #  initialize class
+    move      a0, rOBJ                     #  restore a0
+    # check boolean result
+    bnez      v0, .L${opcode}_initialized  #  success, continue
+    b         common_exceptionThrown       #  failed, deal with init exception
+
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a1 holds BBBB
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    move      a0, v0
+    # got null?
+    bnez      v0, .L${opcode}_resolved     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
diff --git a/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S b/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S
new file mode 100644
index 0000000..a00991e
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S
@@ -0,0 +1,108 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(a0, 1)                            # a0<- aaaa (lo)DvmDex
+    FETCH(a1, 2)                            # a1<- AAAA (hi)BBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    sll      a1,a1,16
+    or       a1, a0, a1                    # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX <- &resolved_class
+#endif
+    EXPORT_PC()                            #  req'd for init, resolve, alloc
+    # already resolved?
+    beqz      a0, .L${opcode}_resolve      #  no, resolve it now
+.L${opcode}_resolved:                   #  a0=class
+    lbu       a1, offClassObject_status(a0) #  a1 <- ClassStatus enum
+    # has class been initialized?
+    li        t0, CLASS_INITIALIZED
+    move      rOBJ, a0                     #  save a0
+    bne       a1, t0, .L${opcode}_needinit #  no, init class now
+
+.L${opcode}_initialized:                #  a0=class
+    LOAD_base_offClassObject_accessFlags(a3, a0) #  a3 <- clazz->accessFlags
+    li        a1, ALLOC_DONT_TRACK         #  flags for alloc call
+    # a0=class
+    JAL(dvmAllocObject)                    #  v0 <- new object
+    FETCH(a3, 3)                           # a3<- BBBB
+#if defined(WITH_JIT)
+    /*
+     * The JIT needs the class to be fully resolved before it can
+     * include this instruction in a trace.
+     */
+    lhu       a1, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    and       a1, kSubModeJitTraceBuild    #  under construction?
+    bnez      a1, .L${opcode}_jitCheck
+#else
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+#endif
+    b         .L${opcode}_continue
+
+%break
+
+.L${opcode}_continue:
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a3)                       #  vBBBB <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we need to stop the trace building early.
+     * v0: new object
+     * a3: vAA
+     */
+.L${opcode}_jitCheck:
+    lw        a1, 0(rBIX)                  #  reload resolved class
+    # okay?
+    bnez      a1, .L${opcode}_continue     #  yes, finish
+    move      rOBJ, v0                     #  preserve new object
+    move      rBIX, a3                     #  preserve vAA
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(rOBJ, rBIX)                   #  vAA <- new object
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+    /*
+     * Class initialization required.
+     *
+     *  a0 holds class object
+     */
+.L${opcode}_needinit:
+    JAL(dvmInitClass)                      #  initialize class
+    move      a0, rOBJ                     #  restore a0
+    # check boolean result
+    bnez      v0, .L${opcode}_initialized  #  success, continue
+    b         common_exceptionThrown       #  failed, deal with init exception
+
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a1 holds AAAAAAAA
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    move      a0, v0
+    # got null?
+    bnez      v0, .L${opcode}_resolved     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
diff --git a/vm/mterp/mips/OP_NOP.S b/vm/mterp/mips/OP_NOP.S
new file mode 100644
index 0000000..38a5eb4
--- /dev/null
+++ b/vm/mterp/mips/OP_NOP.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    FETCH_ADVANCE_INST(1)                  #  advance to next instr, load rINST
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)                        #  execute it
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    .type dalvik_inst, @function
+dalvik_inst:
+    .ent dalvik_inst
+    .end dalvik_inst
+#endif
+
diff --git a/vm/mterp/mips/OP_NOT_INT.S b/vm/mterp/mips/OP_NOT_INT.S
new file mode 100644
index 0000000..3402d19
--- /dev/null
+++ b/vm/mterp/mips/OP_NOT_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/vm/mterp/mips/OP_NOT_LONG.S b/vm/mterp/mips/OP_NOT_LONG.S
new file mode 100644
index 0000000..8947c4e
--- /dev/null
+++ b/vm/mterp/mips/OP_NOT_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT.S b/vm/mterp/mips/OP_OR_INT.S
new file mode 100644
index 0000000..683242f
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_2ADDR.S b/vm/mterp/mips/OP_OR_INT_2ADDR.S
new file mode 100644
index 0000000..e63835b
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_LIT16.S b/vm/mterp/mips/OP_OR_INT_LIT16.S
new file mode 100644
index 0000000..c12495d
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_LIT8.S b/vm/mterp/mips/OP_OR_INT_LIT8.S
new file mode 100644
index 0000000..f2ac2d0
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_LONG.S b/vm/mterp/mips/OP_OR_LONG.S
new file mode 100644
index 0000000..8b080f6
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_OR_LONG_2ADDR.S b/vm/mterp/mips/OP_OR_LONG_2ADDR.S
new file mode 100644
index 0000000..ef37dbf
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_PACKED_SWITCH.S b/vm/mterp/mips/OP_PACKED_SWITCH.S
new file mode 100644
index 0000000..add1dac
--- /dev/null
+++ b/vm/mterp/mips/OP_PACKED_SWITCH.S
@@ -0,0 +1,34 @@
+%default { "func":"dvmInterpHandlePackedSwitch" }
+%verify executed
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * When the JIT is present, all targets are considered treated as
+     * a potential trace heads regardless of branch direction.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL($func)                             #  a0 <- code-unit branch offset
+    addu      a1, v0, v0                   #  a1 <- byte offset
+    bgtz      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bnez      a0, common_updateProfile
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_REM_DOUBLE.S b/vm/mterp/mips/OP_REM_DOUBLE.S
new file mode 100644
index 0000000..4329ed3
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(fmod)", "instr_f":"JAL(fmod)"}
diff --git a/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..97cd893
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(fmod)", "instr_f":"JAL(fmod)"}
diff --git a/vm/mterp/mips/OP_REM_FLOAT.S b/vm/mterp/mips/OP_REM_FLOAT.S
new file mode 100644
index 0000000..e68cfb5
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(fmodf)", "instr_f":"JAL(fmodf)"}
diff --git a/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S b/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S
new file mode 100644
index 0000000..f78cbb3
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(fmodf)", "instr_f":"JAL(fmodf)"}
diff --git a/vm/mterp/mips/OP_REM_INT.S b/vm/mterp/mips/OP_REM_INT.S
new file mode 100644
index 0000000..f1dcf37
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_2ADDR.S b/vm/mterp/mips/OP_REM_INT_2ADDR.S
new file mode 100644
index 0000000..85d616b
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_LIT16.S b/vm/mterp/mips/OP_REM_INT_LIT16.S
new file mode 100644
index 0000000..1f31442
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_LIT8.S b/vm/mterp/mips/OP_REM_INT_LIT8.S
new file mode 100644
index 0000000..4b5bb82
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_LONG.S b/vm/mterp/mips/OP_REM_LONG.S
new file mode 100644
index 0000000..d76221a
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_LONG.S
@@ -0,0 +1,7 @@
+%verify "executed"
+/* ldivmod returns quotient in a0/a1 and remainder in a2/a3 */
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide.S" { "arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_REM_LONG_2ADDR.S b/vm/mterp/mips/OP_REM_LONG_2ADDR.S
new file mode 100644
index 0000000..be194a5
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_LONG_2ADDR.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide2addr.S" {"arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_RETURN.S b/vm/mterp/mips/OP_RETURN.S
new file mode 100644
index 0000000..acc01cf
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /*
+     * Return a 32-bit value.  Copies the return value into the "thread"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a0, a2)                       #  a0 <- vAA
+    sw        a0, offThread_retval(rSELF)  #  retval.i <- vAA
+    b         common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RETURN_OBJECT.S b/vm/mterp/mips/OP_RETURN_OBJECT.S
new file mode 100644
index 0000000..4459668
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_RETURN.S"
diff --git a/vm/mterp/mips/OP_RETURN_VOID.S b/vm/mterp/mips/OP_RETURN_VOID.S
new file mode 100644
index 0000000..781f835
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_VOID.S
@@ -0,0 +1,3 @@
+%verify "executed"
+    b         common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S b/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S
new file mode 100644
index 0000000..4cb5b9b
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S
@@ -0,0 +1,3 @@
+%verify "executed"
+    SMP_DMB
+    b         common_returnFromMethod
diff --git a/vm/mterp/mips/OP_RETURN_WIDE.S b/vm/mterp/mips/OP_RETURN_WIDE.S
new file mode 100644
index 0000000..bd93d6a
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_WIDE.S
@@ -0,0 +1,13 @@
+%verify "executed"
+    /*
+     * Return a 64-bit value.  Copies the return value into the "thread"
+     * structure, then jumps to the return handler.
+     */
+    /* return-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    addu      a3, rSELF, offThread_retval  #  a3 <- &self->retval
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vAA/vAA+1
+    STORE64(a0, a1, a3)                    #  retval <- a0/a1
+    b         common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RSUB_INT.S b/vm/mterp/mips/OP_RSUB_INT.S
new file mode 100644
index 0000000..03918ea
--- /dev/null
+++ b/vm/mterp/mips/OP_RSUB_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/vm/mterp/mips/OP_RSUB_INT_LIT8.S b/vm/mterp/mips/OP_RSUB_INT_LIT8.S
new file mode 100644
index 0000000..75d3d40
--- /dev/null
+++ b/vm/mterp/mips/OP_RSUB_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/vm/mterp/mips/OP_SGET.S b/vm/mterp/mips/OP_SGET.S
new file mode 100644
index 0000000..80e1913
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET.S
@@ -0,0 +1,50 @@
+%default { "barrier":"                  #  no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .L${opcode}_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    $barrier                               #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
diff --git a/vm/mterp/mips/OP_SGET_BOOLEAN.S b/vm/mterp/mips/OP_SGET_BOOLEAN.S
new file mode 100644
index 0000000..86024ec
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..2a787a2
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_BYTE.S b/vm/mterp/mips/OP_SGET_BYTE.S
new file mode 100644
index 0000000..86024ec
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S b/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S
new file mode 100644
index 0000000..2a787a2
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_CHAR.S b/vm/mterp/mips/OP_SGET_CHAR.S
new file mode 100644
index 0000000..86024ec
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S b/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S
new file mode 100644
index 0000000..2a787a2
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_JUMBO.S b/vm/mterp/mips/OP_SGET_JUMBO.S
new file mode 100644
index 0000000..93e7586
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_JUMBO.S
@@ -0,0 +1,54 @@
+%default { "barrier":"                  #  no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .L${opcode}_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    $barrier                               #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
diff --git a/vm/mterp/mips/OP_SGET_OBJECT.S b/vm/mterp/mips/OP_SGET_OBJECT.S
new file mode 100644
index 0000000..86024ec
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S b/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S
new file mode 100644
index 0000000..2a787a2
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S
new file mode 100644
index 0000000..d880f97
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..c9975c8
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_SHORT.S b/vm/mterp/mips/OP_SGET_SHORT.S
new file mode 100644
index 0000000..86024ec
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S b/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S
new file mode 100644
index 0000000..2a787a2
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_VOLATILE.S b/vm/mterp/mips/OP_SGET_VOLATILE.S
new file mode 100644
index 0000000..d880f97
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..93a5f41
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_WIDE.S b/vm/mterp/mips/OP_SGET_WIDE.S
new file mode 100644
index 0000000..0e72992
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE.S
@@ -0,0 +1,58 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * 64-bit SGET handler.
+     */
+    # sget-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    GET_OPA(a1)                            #  a1 <- AA
+    .if $volatile
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[AA]
+    STORE64(a2, a3, a1)                    #  vAA/vAA+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
diff --git a/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S b/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S
new file mode 100644
index 0000000..7a52889
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S
@@ -0,0 +1,47 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(a2, a2) #  a2 <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                 # a1<- AAAAaaaa
+    LOAD_eas2(a0, a2, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .L${opcode}_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  a0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    FETCH(a1, 3)                           # a1<- BBBB
+    .if $volatile
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[BBBB]
+    STORE64(a2, a3, a1)                    #  vBBBB/vBBBB+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S
new file mode 100644
index 0000000..ca2fce4
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..c6039c3
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SHL_INT.S b/vm/mterp/mips/OP_SHL_INT.S
new file mode 100644
index 0000000..9981dec
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_INT_2ADDR.S b/vm/mterp/mips/OP_SHL_INT_2ADDR.S
new file mode 100644
index 0000000..0ac0a8f
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_INT_LIT8.S b/vm/mterp/mips/OP_SHL_INT_LIT8.S
new file mode 100644
index 0000000..1110037
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_LONG.S b/vm/mterp/mips/OP_SHL_LONG.S
new file mode 100644
index 0000000..817ac2f
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t2)                            #  t2 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t2, rFP, t2)                      #  t2 <- &fp[AA]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    andi    a2, 0x20                       #  shift< shift & 0x20
+    movn    v1, v0, a2                     #  rhi<- rlo (if shift&0x20)
+    movn    v0, zero, a2                   #  rlo<- 0  (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t2)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHL_LONG_2ADDR.S b/vm/mterp/mips/OP_SHL_LONG_2ADDR.S
new file mode 100644
index 0000000..1191427
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(rOBJ, rFP, t2)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    andi    a2, 0x20                       #  shift< shift & 0x20
+    movn    v1, v0, a2                     #  rhi<- rlo (if shift&0x20)
+    movn    v0, zero, a2                   #  rlo<- 0  (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)                  #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHR_INT.S b/vm/mterp/mips/OP_SHR_INT.S
new file mode 100644
index 0000000..c5911e7
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_INT_2ADDR.S b/vm/mterp/mips/OP_SHR_INT_2ADDR.S
new file mode 100644
index 0000000..b979e9f
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_INT_LIT8.S b/vm/mterp/mips/OP_SHR_INT_LIT8.S
new file mode 100644
index 0000000..6124619
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_LONG.S b/vm/mterp/mips/OP_SHR_LONG.S
new file mode 100644
index 0000000..6906978
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t3)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    EAS2(t3, rFP, t3)                      #  t3 <- &fp[AA]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    andi    a2, 0x20                       #  shift & 0x20
+    movn    v0, v1, a2                     #  rlo<- rhi (if shift&0x20)
+    movn    v1, a3, a2                     #  rhi<- sign(ahi) (if shift&0x20)
+
+    STORE64(v0, v1, t3)                    #  vAA/VAA+1 <- v0/v0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHR_LONG_2ADDR.S b/vm/mterp/mips/OP_SHR_LONG_2ADDR.S
new file mode 100644
index 0000000..439923e
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t2, rFP, t2)                      #  t2 <- &fp[A]
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    andi    a2, 0x20                       #  shift & 0x20
+    movn    v0, v1, a2                     #  rlo<- rhi (if shift&0x20)
+    movn    v1, a3, a2                     #  rhi<- sign(ahi) (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t2)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SPARSE_SWITCH.S b/vm/mterp/mips/OP_SPARSE_SWITCH.S
new file mode 100644
index 0000000..32067de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPARSE_SWITCH.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_PACKED_SWITCH.S" { "func":"dvmInterpHandleSparseSwitch" }
diff --git a/vm/mterp/mips/OP_SPUT.S b/vm/mterp/mips/OP_SPUT.S
new file mode 100644
index 0000000..722a12f
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT.S
@@ -0,0 +1,50 @@
+%default { "postbarrier":"#  no-op", "prebarrier":"#  no-op" }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .L${opcode}_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $prebarrier                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    $postbarrier
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_BOOLEAN.S b/vm/mterp/mips/OP_SPUT_BOOLEAN.S
new file mode 100644
index 0000000..96434b7
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 0000000..e183701
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_BYTE.S b/vm/mterp/mips/OP_SPUT_BYTE.S
new file mode 100644
index 0000000..96434b7
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S b/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S
new file mode 100644
index 0000000..e183701
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_CHAR.S b/vm/mterp/mips/OP_SPUT_CHAR.S
new file mode 100644
index 0000000..96434b7
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S b/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S
new file mode 100644
index 0000000..e183701
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_JUMBO.S b/vm/mterp/mips/OP_SPUT_JUMBO.S
new file mode 100644
index 0000000..5a4f824
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_JUMBO.S
@@ -0,0 +1,55 @@
+%default { "postbarrier":"      #  no-op ", "prebarrier":"      #  no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .L${opcode}_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .L${opcode}_finish            # resume
+%break
+
+.L${opcode}_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $prebarrier                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+    $postbarrier
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT.S b/vm/mterp/mips/OP_SPUT_OBJECT.S
new file mode 100644
index 0000000..0fd3db3
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT.S
@@ -0,0 +1,56 @@
+%default { "postbarrier":"#  no-op", "prebarrier":"#  no-op" }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput-object, sput-object-volatile
+     */
+    /* op vAA, field@BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .L${opcode}_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b       .L${opcode}_finish             # resume
+
+%break
+.L${opcode}_finish:                        #  field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $prebarrier                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    $postbarrier
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S b/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S
new file mode 100644
index 0000000..22fa450
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S
@@ -0,0 +1,58 @@
+%default { "postbarrier":"      #  no-op ", "prebarrier":"      #  no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a1,a1,16
+    or      a1,a0,a1                       # a1<- AAAAaaaa
+
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .L${opcode}_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b         .L${opcode}_finish           #  resume
+
+%break
+.L${opcode}_finish:                     #  field ptr in a0
+    FETCH(a2, 3)                        # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    $prebarrier                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+    $postbarrier
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+    1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S
new file mode 100644
index 0000000..8b6dc14
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_OBJECT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..fd22e6e
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_OBJECT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_SHORT.S b/vm/mterp/mips/OP_SPUT_SHORT.S
new file mode 100644
index 0000000..96434b7
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S b/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S
new file mode 100644
index 0000000..e183701
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_VOLATILE.S b/vm/mterp/mips/OP_SPUT_VOLATILE.S
new file mode 100644
index 0000000..9e1f1a5
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..7c8e2f4
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_WIDE.S b/vm/mterp/mips/OP_SPUT_WIDE.S
new file mode 100644
index 0000000..3e1d042
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE.S
@@ -0,0 +1,58 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * 64-bit SPUT handler.
+     */
+    # sput-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    GET_OPA(t0)                            #  t0 <- AA
+    LOAD_eas2(a2, rBIX, a1)                #  a2 <- resolved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[AA]
+    # is resolved entry null?
+    beqz      a2, .L${opcode}_resolve      #  yes, do resolve
+.L${opcode}_finish:                        #  field ptr in a2, AA in rOBJ
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vAA/vAA+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if $volatile
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vAA/vAA+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rOBJ:  &fp[AA]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .L${opcode}_finish           # resume
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S b/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S
new file mode 100644
index 0000000..b12ac62
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S
@@ -0,0 +1,60 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a2,a2,16
+    or      a1, a1, a2                    # a1<- AAAAaaaa
+    FETCH(rOBJ, 3)                        # rOBJ<- BBBB    solved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[BBBB]
+    # is resolved entry null?
+    beqz      a2, .L${opcode}_resolve      #  yes, do resolve
+.L${opcode}_finish:                        #  field ptr in a2, BBBB in rOBJ
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vBBBB/vBBBB+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if $volatile
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+%break
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rOBJ:  &fp[BBBB]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.L${opcode}_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .L${opcode}_finish           # resume
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S
new file mode 100644
index 0000000..359b37f
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 0000000..6dc59e5
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SUB_DOUBLE.S b/vm/mterp/mips/OP_SUB_DOUBLE.S
new file mode 100644
index 0000000..3b6fa6d
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__subdf3)", "instr_f":"sub.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S
new file mode 100644
index 0000000..cdd973e
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__subdf3)", "instr_f":"sub.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_FLOAT.S b/vm/mterp/mips/OP_SUB_FLOAT.S
new file mode 100644
index 0000000..9096267
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__subsf3)", "instr_f":"sub.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S b/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S
new file mode 100644
index 0000000..143b7e6
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__subsf3)", "instr_f":"sub.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_INT.S b/vm/mterp/mips/OP_SUB_INT.S
new file mode 100644
index 0000000..aaa6a7b
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SUB_INT_2ADDR.S b/vm/mterp/mips/OP_SUB_INT_2ADDR.S
new file mode 100644
index 0000000..0032229
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SUB_LONG.S b/vm/mterp/mips/OP_SUB_LONG.S
new file mode 100644
index 0000000..700d4ea
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_LONG.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ * For little endian the code sequence looks as follows:
+ *    subu    v0,a0,a2
+ *    subu    v1,a1,a3
+ *    sltu    a0,a0,v0
+ *    subu    v1,v1,a0
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
+
diff --git a/vm/mterp/mips/OP_SUB_LONG_2ADDR.S b/vm/mterp/mips/OP_SUB_LONG_2ADDR.S
new file mode 100644
index 0000000..9b12d69
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_LONG_2ADDR.S
@@ -0,0 +1,5 @@
+%verify "executed"
+/*
+ * See comments in OP_SUB_LONG.S
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/vm/mterp/mips/OP_THROW.S b/vm/mterp/mips/OP_THROW.S
new file mode 100644
index 0000000..b879b29
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW.S
@@ -0,0 +1,15 @@
+%verify "executed"
+%verify "exception for null object"
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a1, a2)                       #  a1 <- vAA (exception object)
+    EXPORT_PC()                            #  exception handler can throw
+    # null object?
+    beqz      a1, common_errNullObject     #  yes, throw an NPE instead
+    # bypass dvmSetException, just store it
+    STORE_offThread_exception(a1, rSELF)   #  thread->exception <- obj
+    b         common_exceptionThrown
+
diff --git a/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S
new file mode 100644
index 0000000..a68b256
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S
@@ -0,0 +1,15 @@
+%verify executed
+    /*
+     * Handle a throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by AA, with some detail provided by BBBB.
+     */
+    /* op AA, ref@BBBB */
+
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    FETCH(a2, 1)                           #  a2 <- BBBB
+    EXPORT_PC()                            #  export the PC
+    GET_OPA(a1)                            #  a1 <- AA
+    JAL(dvmThrowVerificationError)         #  always throws
+    b         common_exceptionThrown       #  handle exception
+
diff --git a/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S
new file mode 100644
index 0000000..dbddc42
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S
@@ -0,0 +1,17 @@
+%verify executed
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+     /* exop BBBB, Class@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    sll    a2,a2,16
+    or     a2, a1, a2                      # a2<- AAAAaaaa
+    EXPORT_PC()                            #  export the PC
+    FETCH(a1, 3)                           # a1<- BBBB
+    JAL(dvmThrowVerificationError)         #  always throws
+    b         common_exceptionThrown       #  handle exception
+
diff --git a/vm/mterp/mips/OP_UNUSED_27FF.S b/vm/mterp/mips/OP_UNUSED_27FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_27FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_28FF.S b/vm/mterp/mips/OP_UNUSED_28FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_28FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_29FF.S b/vm/mterp/mips/OP_UNUSED_29FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_29FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2AFF.S b/vm/mterp/mips/OP_UNUSED_2AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2BFF.S b/vm/mterp/mips/OP_UNUSED_2BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2CFF.S b/vm/mterp/mips/OP_UNUSED_2CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2DFF.S b/vm/mterp/mips/OP_UNUSED_2DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2EFF.S b/vm/mterp/mips/OP_UNUSED_2EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2FFF.S b/vm/mterp/mips/OP_UNUSED_2FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_30FF.S b/vm/mterp/mips/OP_UNUSED_30FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_30FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_31FF.S b/vm/mterp/mips/OP_UNUSED_31FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_31FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_32FF.S b/vm/mterp/mips/OP_UNUSED_32FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_32FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_33FF.S b/vm/mterp/mips/OP_UNUSED_33FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_33FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_34FF.S b/vm/mterp/mips/OP_UNUSED_34FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_34FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_35FF.S b/vm/mterp/mips/OP_UNUSED_35FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_35FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_36FF.S b/vm/mterp/mips/OP_UNUSED_36FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_36FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_37FF.S b/vm/mterp/mips/OP_UNUSED_37FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_37FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_38FF.S b/vm/mterp/mips/OP_UNUSED_38FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_38FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_39FF.S b/vm/mterp/mips/OP_UNUSED_39FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_39FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3AFF.S b/vm/mterp/mips/OP_UNUSED_3AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3BFF.S b/vm/mterp/mips/OP_UNUSED_3BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3CFF.S b/vm/mterp/mips/OP_UNUSED_3CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3DFF.S b/vm/mterp/mips/OP_UNUSED_3DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3E.S b/vm/mterp/mips/OP_UNUSED_3E.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3E.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3EFF.S b/vm/mterp/mips/OP_UNUSED_3EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3F.S b/vm/mterp/mips/OP_UNUSED_3F.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3F.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3FFF.S b/vm/mterp/mips/OP_UNUSED_3FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_40.S b/vm/mterp/mips/OP_UNUSED_40.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_40.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_40FF.S b/vm/mterp/mips/OP_UNUSED_40FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_40FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_41.S b/vm/mterp/mips/OP_UNUSED_41.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_41.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_41FF.S b/vm/mterp/mips/OP_UNUSED_41FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_41FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_42.S b/vm/mterp/mips/OP_UNUSED_42.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_42.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_42FF.S b/vm/mterp/mips/OP_UNUSED_42FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_42FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_43.S b/vm/mterp/mips/OP_UNUSED_43.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_43.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_43FF.S b/vm/mterp/mips/OP_UNUSED_43FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_43FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_44FF.S b/vm/mterp/mips/OP_UNUSED_44FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_44FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_45FF.S b/vm/mterp/mips/OP_UNUSED_45FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_45FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_46FF.S b/vm/mterp/mips/OP_UNUSED_46FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_46FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_47FF.S b/vm/mterp/mips/OP_UNUSED_47FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_47FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_48FF.S b/vm/mterp/mips/OP_UNUSED_48FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_48FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_49FF.S b/vm/mterp/mips/OP_UNUSED_49FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_49FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4AFF.S b/vm/mterp/mips/OP_UNUSED_4AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4BFF.S b/vm/mterp/mips/OP_UNUSED_4BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4CFF.S b/vm/mterp/mips/OP_UNUSED_4CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4DFF.S b/vm/mterp/mips/OP_UNUSED_4DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4EFF.S b/vm/mterp/mips/OP_UNUSED_4EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4FFF.S b/vm/mterp/mips/OP_UNUSED_4FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_50FF.S b/vm/mterp/mips/OP_UNUSED_50FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_50FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_51FF.S b/vm/mterp/mips/OP_UNUSED_51FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_51FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_52FF.S b/vm/mterp/mips/OP_UNUSED_52FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_52FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_53FF.S b/vm/mterp/mips/OP_UNUSED_53FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_53FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_54FF.S b/vm/mterp/mips/OP_UNUSED_54FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_54FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_55FF.S b/vm/mterp/mips/OP_UNUSED_55FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_55FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_56FF.S b/vm/mterp/mips/OP_UNUSED_56FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_56FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_57FF.S b/vm/mterp/mips/OP_UNUSED_57FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_57FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_58FF.S b/vm/mterp/mips/OP_UNUSED_58FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_58FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_59FF.S b/vm/mterp/mips/OP_UNUSED_59FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_59FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5AFF.S b/vm/mterp/mips/OP_UNUSED_5AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5BFF.S b/vm/mterp/mips/OP_UNUSED_5BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5CFF.S b/vm/mterp/mips/OP_UNUSED_5CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5DFF.S b/vm/mterp/mips/OP_UNUSED_5DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5EFF.S b/vm/mterp/mips/OP_UNUSED_5EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5FFF.S b/vm/mterp/mips/OP_UNUSED_5FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_60FF.S b/vm/mterp/mips/OP_UNUSED_60FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_60FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_61FF.S b/vm/mterp/mips/OP_UNUSED_61FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_61FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_62FF.S b/vm/mterp/mips/OP_UNUSED_62FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_62FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_63FF.S b/vm/mterp/mips/OP_UNUSED_63FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_63FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_64FF.S b/vm/mterp/mips/OP_UNUSED_64FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_64FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_65FF.S b/vm/mterp/mips/OP_UNUSED_65FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_65FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_66FF.S b/vm/mterp/mips/OP_UNUSED_66FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_66FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_67FF.S b/vm/mterp/mips/OP_UNUSED_67FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_67FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_68FF.S b/vm/mterp/mips/OP_UNUSED_68FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_68FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_69FF.S b/vm/mterp/mips/OP_UNUSED_69FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_69FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6AFF.S b/vm/mterp/mips/OP_UNUSED_6AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6BFF.S b/vm/mterp/mips/OP_UNUSED_6BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6CFF.S b/vm/mterp/mips/OP_UNUSED_6CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6DFF.S b/vm/mterp/mips/OP_UNUSED_6DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6EFF.S b/vm/mterp/mips/OP_UNUSED_6EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6FFF.S b/vm/mterp/mips/OP_UNUSED_6FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_70FF.S b/vm/mterp/mips/OP_UNUSED_70FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_70FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_71FF.S b/vm/mterp/mips/OP_UNUSED_71FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_71FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_72FF.S b/vm/mterp/mips/OP_UNUSED_72FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_72FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_73.S b/vm/mterp/mips/OP_UNUSED_73.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_73.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_73FF.S b/vm/mterp/mips/OP_UNUSED_73FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_73FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_74FF.S b/vm/mterp/mips/OP_UNUSED_74FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_74FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_75FF.S b/vm/mterp/mips/OP_UNUSED_75FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_75FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_76FF.S b/vm/mterp/mips/OP_UNUSED_76FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_76FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_77FF.S b/vm/mterp/mips/OP_UNUSED_77FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_77FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_78FF.S b/vm/mterp/mips/OP_UNUSED_78FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_78FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_79.S b/vm/mterp/mips/OP_UNUSED_79.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_79.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_79FF.S b/vm/mterp/mips/OP_UNUSED_79FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_79FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7A.S b/vm/mterp/mips/OP_UNUSED_7A.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7A.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7AFF.S b/vm/mterp/mips/OP_UNUSED_7AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7BFF.S b/vm/mterp/mips/OP_UNUSED_7BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7CFF.S b/vm/mterp/mips/OP_UNUSED_7CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7DFF.S b/vm/mterp/mips/OP_UNUSED_7DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7EFF.S b/vm/mterp/mips/OP_UNUSED_7EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7FFF.S b/vm/mterp/mips/OP_UNUSED_7FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_80FF.S b/vm/mterp/mips/OP_UNUSED_80FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_80FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_81FF.S b/vm/mterp/mips/OP_UNUSED_81FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_81FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_82FF.S b/vm/mterp/mips/OP_UNUSED_82FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_82FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_83FF.S b/vm/mterp/mips/OP_UNUSED_83FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_83FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_84FF.S b/vm/mterp/mips/OP_UNUSED_84FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_84FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_85FF.S b/vm/mterp/mips/OP_UNUSED_85FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_85FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_86FF.S b/vm/mterp/mips/OP_UNUSED_86FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_86FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_87FF.S b/vm/mterp/mips/OP_UNUSED_87FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_87FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_88FF.S b/vm/mterp/mips/OP_UNUSED_88FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_88FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_89FF.S b/vm/mterp/mips/OP_UNUSED_89FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_89FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8AFF.S b/vm/mterp/mips/OP_UNUSED_8AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8BFF.S b/vm/mterp/mips/OP_UNUSED_8BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8CFF.S b/vm/mterp/mips/OP_UNUSED_8CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8DFF.S b/vm/mterp/mips/OP_UNUSED_8DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8EFF.S b/vm/mterp/mips/OP_UNUSED_8EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8FFF.S b/vm/mterp/mips/OP_UNUSED_8FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_90FF.S b/vm/mterp/mips/OP_UNUSED_90FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_90FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_91FF.S b/vm/mterp/mips/OP_UNUSED_91FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_91FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_92FF.S b/vm/mterp/mips/OP_UNUSED_92FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_92FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_93FF.S b/vm/mterp/mips/OP_UNUSED_93FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_93FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_94FF.S b/vm/mterp/mips/OP_UNUSED_94FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_94FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_95FF.S b/vm/mterp/mips/OP_UNUSED_95FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_95FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_96FF.S b/vm/mterp/mips/OP_UNUSED_96FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_96FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_97FF.S b/vm/mterp/mips/OP_UNUSED_97FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_97FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_98FF.S b/vm/mterp/mips/OP_UNUSED_98FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_98FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_99FF.S b/vm/mterp/mips/OP_UNUSED_99FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_99FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9AFF.S b/vm/mterp/mips/OP_UNUSED_9AFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9BFF.S b/vm/mterp/mips/OP_UNUSED_9BFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9CFF.S b/vm/mterp/mips/OP_UNUSED_9CFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9DFF.S b/vm/mterp/mips/OP_UNUSED_9DFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9EFF.S b/vm/mterp/mips/OP_UNUSED_9EFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9FFF.S b/vm/mterp/mips/OP_UNUSED_9FFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A0FF.S b/vm/mterp/mips/OP_UNUSED_A0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A1FF.S b/vm/mterp/mips/OP_UNUSED_A1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A2FF.S b/vm/mterp/mips/OP_UNUSED_A2FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A3FF.S b/vm/mterp/mips/OP_UNUSED_A3FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A4FF.S b/vm/mterp/mips/OP_UNUSED_A4FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A5FF.S b/vm/mterp/mips/OP_UNUSED_A5FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A6FF.S b/vm/mterp/mips/OP_UNUSED_A6FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A7FF.S b/vm/mterp/mips/OP_UNUSED_A7FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A8FF.S b/vm/mterp/mips/OP_UNUSED_A8FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A9FF.S b/vm/mterp/mips/OP_UNUSED_A9FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AAFF.S b/vm/mterp/mips/OP_UNUSED_AAFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ABFF.S b/vm/mterp/mips/OP_UNUSED_ABFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ABFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ACFF.S b/vm/mterp/mips/OP_UNUSED_ACFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ACFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ADFF.S b/vm/mterp/mips/OP_UNUSED_ADFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ADFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AEFF.S b/vm/mterp/mips/OP_UNUSED_AEFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AFFF.S b/vm/mterp/mips/OP_UNUSED_AFFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B0FF.S b/vm/mterp/mips/OP_UNUSED_B0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B1FF.S b/vm/mterp/mips/OP_UNUSED_B1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B2FF.S b/vm/mterp/mips/OP_UNUSED_B2FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B3FF.S b/vm/mterp/mips/OP_UNUSED_B3FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B4FF.S b/vm/mterp/mips/OP_UNUSED_B4FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B5FF.S b/vm/mterp/mips/OP_UNUSED_B5FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B6FF.S b/vm/mterp/mips/OP_UNUSED_B6FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B7FF.S b/vm/mterp/mips/OP_UNUSED_B7FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B8FF.S b/vm/mterp/mips/OP_UNUSED_B8FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B9FF.S b/vm/mterp/mips/OP_UNUSED_B9FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BAFF.S b/vm/mterp/mips/OP_UNUSED_BAFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BBFF.S b/vm/mterp/mips/OP_UNUSED_BBFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BCFF.S b/vm/mterp/mips/OP_UNUSED_BCFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BDFF.S b/vm/mterp/mips/OP_UNUSED_BDFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BEFF.S b/vm/mterp/mips/OP_UNUSED_BEFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BFFF.S b/vm/mterp/mips/OP_UNUSED_BFFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C0FF.S b/vm/mterp/mips/OP_UNUSED_C0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C1FF.S b/vm/mterp/mips/OP_UNUSED_C1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C2FF.S b/vm/mterp/mips/OP_UNUSED_C2FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C3FF.S b/vm/mterp/mips/OP_UNUSED_C3FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C4FF.S b/vm/mterp/mips/OP_UNUSED_C4FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C5FF.S b/vm/mterp/mips/OP_UNUSED_C5FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C6FF.S b/vm/mterp/mips/OP_UNUSED_C6FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C7FF.S b/vm/mterp/mips/OP_UNUSED_C7FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C8FF.S b/vm/mterp/mips/OP_UNUSED_C8FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C9FF.S b/vm/mterp/mips/OP_UNUSED_C9FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CAFF.S b/vm/mterp/mips/OP_UNUSED_CAFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CBFF.S b/vm/mterp/mips/OP_UNUSED_CBFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CCFF.S b/vm/mterp/mips/OP_UNUSED_CCFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CDFF.S b/vm/mterp/mips/OP_UNUSED_CDFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CEFF.S b/vm/mterp/mips/OP_UNUSED_CEFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CFFF.S b/vm/mterp/mips/OP_UNUSED_CFFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D0FF.S b/vm/mterp/mips/OP_UNUSED_D0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D1FF.S b/vm/mterp/mips/OP_UNUSED_D1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D2FF.S b/vm/mterp/mips/OP_UNUSED_D2FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D3FF.S b/vm/mterp/mips/OP_UNUSED_D3FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D4FF.S b/vm/mterp/mips/OP_UNUSED_D4FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D5FF.S b/vm/mterp/mips/OP_UNUSED_D5FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D6FF.S b/vm/mterp/mips/OP_UNUSED_D6FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D7FF.S b/vm/mterp/mips/OP_UNUSED_D7FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D8FF.S b/vm/mterp/mips/OP_UNUSED_D8FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D9FF.S b/vm/mterp/mips/OP_UNUSED_D9FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DAFF.S b/vm/mterp/mips/OP_UNUSED_DAFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DBFF.S b/vm/mterp/mips/OP_UNUSED_DBFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DCFF.S b/vm/mterp/mips/OP_UNUSED_DCFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DDFF.S b/vm/mterp/mips/OP_UNUSED_DDFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DEFF.S b/vm/mterp/mips/OP_UNUSED_DEFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DFFF.S b/vm/mterp/mips/OP_UNUSED_DFFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E0FF.S b/vm/mterp/mips/OP_UNUSED_E0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E1FF.S b/vm/mterp/mips/OP_UNUSED_E1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E2FF.S b/vm/mterp/mips/OP_UNUSED_E2FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E3.S b/vm/mterp/mips/OP_UNUSED_E3.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E3.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E3FF.S b/vm/mterp/mips/OP_UNUSED_E3FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E4.S b/vm/mterp/mips/OP_UNUSED_E4.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E4.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E4FF.S b/vm/mterp/mips/OP_UNUSED_E4FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E5.S b/vm/mterp/mips/OP_UNUSED_E5.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E5.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E5FF.S b/vm/mterp/mips/OP_UNUSED_E5FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E6.S b/vm/mterp/mips/OP_UNUSED_E6.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E6.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E6FF.S b/vm/mterp/mips/OP_UNUSED_E6FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E7.S b/vm/mterp/mips/OP_UNUSED_E7.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E7.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E7FF.S b/vm/mterp/mips/OP_UNUSED_E7FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E8.S b/vm/mterp/mips/OP_UNUSED_E8.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E8.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E8FF.S b/vm/mterp/mips/OP_UNUSED_E8FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E9.S b/vm/mterp/mips/OP_UNUSED_E9.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E9.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E9FF.S b/vm/mterp/mips/OP_UNUSED_E9FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EA.S b/vm/mterp/mips/OP_UNUSED_EA.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EA.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EAFF.S b/vm/mterp/mips/OP_UNUSED_EAFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EB.S b/vm/mterp/mips/OP_UNUSED_EB.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EB.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EBFF.S b/vm/mterp/mips/OP_UNUSED_EBFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EC.S b/vm/mterp/mips/OP_UNUSED_EC.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EC.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ECFF.S b/vm/mterp/mips/OP_UNUSED_ECFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ECFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ED.S b/vm/mterp/mips/OP_UNUSED_ED.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ED.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EDFF.S b/vm/mterp/mips/OP_UNUSED_EDFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EEFF.S b/vm/mterp/mips/OP_UNUSED_EEFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EF.S b/vm/mterp/mips/OP_UNUSED_EF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EFFF.S b/vm/mterp/mips/OP_UNUSED_EFFF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F0FF.S b/vm/mterp/mips/OP_UNUSED_F0FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F1.S b/vm/mterp/mips/OP_UNUSED_F1.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F1.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F1FF.S b/vm/mterp/mips/OP_UNUSED_F1FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FC.S b/vm/mterp/mips/OP_UNUSED_FC.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FC.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FD.S b/vm/mterp/mips/OP_UNUSED_FD.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FD.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FE.S b/vm/mterp/mips/OP_UNUSED_FE.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FE.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FF.S b/vm/mterp/mips/OP_UNUSED_FF.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_USHR_INT.S b/vm/mterp/mips/OP_USHR_INT.S
new file mode 100644
index 0000000..7b474b6
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_USHR_INT_2ADDR.S b/vm/mterp/mips/OP_USHR_INT_2ADDR.S
new file mode 100644
index 0000000..71b5e36
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1 "}
diff --git a/vm/mterp/mips/OP_USHR_INT_LIT8.S b/vm/mterp/mips/OP_USHR_INT_LIT8.S
new file mode 100644
index 0000000..7dbe863
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_USHR_LONG.S b/vm/mterp/mips/OP_USHR_LONG.S
new file mode 100644
index 0000000..acd9d15
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_LONG.S
@@ -0,0 +1,32 @@
+%verify "executed"
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    andi      a2, 0x20                     #  shift & 0x20
+    movn      v0, v1, a2                   #  rlo<- rhi (if shift&0x20)
+    movn      v1, zero, a2                 #  rhi<- 0 (if shift&0x20)
+
+    STORE64(v0, v1, rOBJ)                  #  vAA/vAA+1 <- v0/v1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_USHR_LONG_2ADDR.S b/vm/mterp/mips/OP_USHR_LONG_2ADDR.S
new file mode 100644
index 0000000..103cc98
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_LONG_2ADDR.S
@@ -0,0 +1,27 @@
+%verify "executed"
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    GET_OPA4(t3)                           #  t3 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t3, rFP, t3)                      #  t3 <- &fp[A]
+    LOAD64(a0, a1, t3)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    andi      a2, 0x20                     #  shift & 0x20
+    movn      v0, v1, a2                   #  rlo<- rhi (if shift&0x20)
+    movn      v1, zero, a2                 #  rhi<- 0 (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t3)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
diff --git a/vm/mterp/mips/OP_XOR_INT.S b/vm/mterp/mips/OP_XOR_INT.S
new file mode 100644
index 0000000..6551e75
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_2ADDR.S b/vm/mterp/mips/OP_XOR_INT_2ADDR.S
new file mode 100644
index 0000000..f93b782
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_LIT16.S b/vm/mterp/mips/OP_XOR_INT_LIT16.S
new file mode 100644
index 0000000..add8ef2
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_LIT8.S b/vm/mterp/mips/OP_XOR_INT_LIT8.S
new file mode 100644
index 0000000..31fa360
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_LONG.S b/vm/mterp/mips/OP_XOR_LONG.S
new file mode 100644
index 0000000..1f07c84
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_XOR_LONG_2ADDR.S b/vm/mterp/mips/OP_XOR_LONG_2ADDR.S
new file mode 100644
index 0000000..dade7a9
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/vm/mterp/mips/alt_stub.S b/vm/mterp/mips/alt_stub.S
new file mode 100644
index 0000000..edf71a7
--- /dev/null
+++ b/vm/mterp/mips/alt_stub.S
@@ -0,0 +1,20 @@
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (${opnum} * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
diff --git a/vm/mterp/mips/bincmp.S b/vm/mterp/mips/bincmp.S
new file mode 100644
index 0000000..e2398d0
--- /dev/null
+++ b/vm/mterp/mips/bincmp.S
@@ -0,0 +1,35 @@
+%verify "branch taken"
+%verify "branch not taken"
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    b${revcmp} a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/mips/binflop.S b/vm/mterp/mips/binflop.S
new file mode 100644
index 0000000..6b02707
--- /dev/null
+++ b/vm/mterp/mips/binflop.S
@@ -0,0 +1,44 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if $chkzero
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    $instr_f                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
diff --git a/vm/mterp/mips/binflop2addr.S b/vm/mterp/mips/binflop2addr.S
new file mode 100644
index 0000000..c20a1c6
--- /dev/null
+++ b/vm/mterp/mips/binflop2addr.S
@@ -0,0 +1,45 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if $chkzero
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    $instr_f
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binflopWide.S b/vm/mterp/mips/binflopWide.S
new file mode 100644
index 0000000..ad61680
--- /dev/null
+++ b/vm/mterp/mips/binflopWide.S
@@ -0,0 +1,52 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if $chkzero
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if $chkzero
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    $instr_f
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
diff --git a/vm/mterp/mips/binflopWide2addr.S b/vm/mterp/mips/binflopWide2addr.S
new file mode 100644
index 0000000..aacd482
--- /dev/null
+++ b/vm/mterp/mips/binflopWide2addr.S
@@ -0,0 +1,46 @@
+%default {"preinstr":"", "chkzero":"0"}
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if $chkzero
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if $chkzero
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    $instr_f
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
diff --git a/vm/mterp/mips/binop.S b/vm/mterp/mips/binop.S
new file mode 100644
index 0000000..8bbe0fb
--- /dev/null
+++ b/vm/mterp/mips/binop.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 11-14 instructions */
+
diff --git a/vm/mterp/mips/binop2addr.S b/vm/mterp/mips/binop2addr.S
new file mode 100644
index 0000000..acca20d
--- /dev/null
+++ b/vm/mterp/mips/binop2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binopLit16.S b/vm/mterp/mips/binopLit16.S
new file mode 100644
index 0000000..74b4533
--- /dev/null
+++ b/vm/mterp/mips/binopLit16.S
@@ -0,0 +1,30 @@
+%default {"result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if $chkzero
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binopLit8.S b/vm/mterp/mips/binopLit8.S
new file mode 100644
index 0000000..c3d7464
--- /dev/null
+++ b/vm/mterp/mips/binopLit8.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if $chkzero
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  $result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
+    /* 10-12 instructions */
+
diff --git a/vm/mterp/mips/binopWide.S b/vm/mterp/mips/binopWide.S
new file mode 100644
index 0000000..3e47ab9
--- /dev/null
+++ b/vm/mterp/mips/binopWide.S
@@ -0,0 +1,38 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64($result0, $result1, rOBJ)      #  vAA/vAA+1 <- $result0/$result1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
diff --git a/vm/mterp/mips/binopWide2addr.S b/vm/mterp/mips/binopWide2addr.S
new file mode 100644
index 0000000..7494604
--- /dev/null
+++ b/vm/mterp/mips/binopWide2addr.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64($arg0, $arg1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if $chkzero
+    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64($result0, $result1, rOBJ)      #  vAA/vAA+1 <- $result0/$result1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
diff --git a/vm/mterp/mips/debug.cpp b/vm/mterp/mips/debug.cpp
new file mode 100644
index 0000000..0de6b67
--- /dev/null
+++ b/vm/mterp/mips/debug.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose MIPS registers, along with some other info.
+ *
+ */
+void dvmMterpDumpMipsRegs(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
+{
+    register uint32_t rPC       asm("s0");
+    register uint32_t rFP       asm("s1");
+    register uint32_t rSELF     asm("s2");
+    register uint32_t rIBASE    asm("s3");
+    register uint32_t rINST     asm("s4");
+    register uint32_t rOBJ      asm("s5");
+    register uint32_t rBIX      asm("s6");
+    register uint32_t rTEMP	asm("s7");
+
+    //extern char dvmAsmInstructionStart[];
+
+    printf("REGS: a0=%08x a1=%08x a2=%08x a3=%08x\n", a0, a1, a2, a3);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rIBASE=%08x\n",
+        rPC, rFP, rSELF, rIBASE);
+    printf("    : rINST=%08x rOBJ=%08x rBIX=%08x rTEMP=%08x \n", rINST, rOBJ, rBIX, rTEMP);
+
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
+    printf("    + self is %p\n", dvmThreadSelf());
+    //printf("    + currently in %s.%s %s\n",
+    //    method->clazz->descriptor, method->name, method->signature);
+    //printf("    + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+    //printf("    + next handler for 0x%02x = %p\n",
+    //    rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+    StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+    printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+    printf("  prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+        saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc);
+#else
+    printf("  prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+        saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc,
+        *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+    /*
+     * It is a direct (non-virtual) method if it is static, private,
+     * or a constructor.
+     */
+    bool isDirect =
+        ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+        (method->name[0] == '<');
+
+    char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+    printf("<%c:%s.%s %s> ",
+            isDirect ? 'D' : 'V',
+            method->clazz->descriptor,
+            method->name,
+            desc);
+
+    free(desc);
+}
diff --git a/vm/mterp/mips/entry.S b/vm/mterp/mips/entry.S
new file mode 100644
index 0000000..8a1b61a
--- /dev/null
+++ b/vm/mterp/mips/entry.S
@@ -0,0 +1,107 @@
+
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+#define ASSIST_DEBUGGER 1
+
+    .text
+    .align 2
+    .global dvmMterpStdRun
+    .ent dvmMterpStdRun
+    .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ *  r0  Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+
+dvmMterpStdRun:
+    .set noreorder
+    .cpload t9
+    .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+    STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+    .cprestore STACK_OFFSET_GP
+
+    addu      fp, sp, STACK_SIZE           #  Move Frame Pointer to the base of frame
+    /* save stack pointer, add magic word for debuggerd */
+    sw        sp, offThread_bailPtr(a0)      # Save SP
+
+    /* set up "named" registers, figure out entry point */
+    move      rSELF, a0                    #  set rSELF
+    LOAD_PC_FROM_SELF()
+    LOAD_FP_FROM_SELF()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+
+#if defined(WITH_JIT)
+.LentryInstr:
+    /* Entry is always a possible trace start */
+    lw        a0, offThread_pJitProfTable(rSELF)
+    FETCH_INST()                           #  load rINST from rPC
+    sw        zero, offThread_inJitCodeCache(rSELF)
+#if !defined(WITH_SELF_VERIFICATION)
+    bnez      a0, common_updateProfile     # profiling is enabled
+#else
+    lw       a2, offThread_shadowSpace(rSELF) # to find out the jit exit state
+    beqz     a0, 1f                        # profiling is disabled
+    lw       a3, offShadowSpace_jitExitState(a2) # jit exit state
+    li	     t0, kSVSTraceSelect
+    bne      a3, t0, 2f
+    li       a2, kJitTSelectRequestHot     # ask for trace selection
+    b        common_selectTrace            # go build the trace
+2:
+    li       a4, kSVSNoProfile
+    beq      a3, a4, 1f                    # don't profile the next instruction?
+    b        common_updateProfile          # collect profiles
+#endif
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    /* start executing the instruction at rPC */
+    FETCH_INST()                           #  load rINST from rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+.Lbad_arg:
+    la        a0, .LstrBadEntryPoint
+    #a1 holds value of entryPoint
+    JAL(printf)
+    JAL(dvmAbort)
+
+    .end dvmMterpStdRun
+
+    .global dvmMterpStdBail
+    .ent dvmMterpStdBail
+
+/* Restore the stack pointer and all the registers stored at sp from the save
+ * point established  on entry. Return to whoever called dvmMterpStdRun.
+ *
+ * On entry:
+ *   a0    Thread* self
+ */
+dvmMterpStdBail:
+    lw        sp, offThread_bailPtr(a0)      #  Restore sp
+    STACK_LOAD_FULL()
+    jr        ra
+
+    .end dvmMterpStdBail
diff --git a/vm/mterp/mips/footer.S b/vm/mterp/mips/footer.S
new file mode 100644
index 0000000..b5b53b7
--- /dev/null
+++ b/vm/mterp/mips/footer.S
@@ -0,0 +1,1205 @@
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align 2
+
+#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * "longjmp" to a translation after single-stepping.  Before returning
+ * to translation, must save state for self-verification.
+ */
+    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+    move    rSELF, a0                           # restore self
+    move    rPC, a1                             # restore Dalvik pc
+    move    rFP, a2                             # restore Dalvik fp
+    lw      rBIX, offThread_jitResumeNPC(rSELF)
+    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
+    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
+    b       jitSVShadowRunStart                 # resume as if cache hit
+                                                # expects resume addr in rBIX
+
+    .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+    li        a2, kSVSPunt                 #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+    move      rPC, a0                      # set up dalvik pc
+    EXPORT_PC()
+    sw        ra, offThread_jitResumeNPC(rSELF)
+    sw        a1, offThread_jitResumeDPC(rSELF)
+    li        a2, kSVSSingleStep           #  a2 <- interpreter entry point
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSNoProfile            #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSBackwardBranch       #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSNormal               #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSNoChain              #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+#else                                   /*  WITH_SELF_VERIFICATION */
+
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+    move    rSELF, a0                           # restore self
+    move    rPC, a1                             # restore Dalvik pc
+    move    rFP, a2                             # restore Dalvik fp
+    lw      a0, offThread_jitResumeNPC(rSELF)
+    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
+    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
+    jr      a0                                  # resume translation
+
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+    .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+    lw        gp, STACK_OFFSET_GP(sp)
+    move      rPC, a0
+#if defined(WITH_JIT_TUNING)
+    move      a0, ra
+    JAL(dvmBumpPunt)
+#endif
+    EXPORT_PC()
+    sw        zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * On entry:
+ *    rPC <= Dalvik PC of instrucion to interpret
+ *    a1 <= Dalvik PC of resume instruction
+ *    ra <= resume point in translation
+ */
+
+    .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+    lw        gp, STACK_OFFSET_GP(sp)
+    move      rPC, a0                       # set up dalvik pc
+    EXPORT_PC()
+    sw        ra, offThread_jitResumeNPC(rSELF)
+    sw        sp, offThread_jitResumeNSP(rSELF)
+    sw        a1, offThread_jitResumeDPC(rSELF)
+    li        a1, 1
+    sw        a1, offThread_singleStepCount(rSELF) # just step once
+    move      a0, rSELF
+    li        a1, kSubModeCountedStep
+    JAL(dvmEnableSubMode)                   # (self, subMode)
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target.  Commonly used for callees.
+ */
+    .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+    lw        gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+    move      a1, rPC                      # arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, 2f                       # 0 means translation does not exist
+    jr        a0
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target.  Commonly used following
+ * invokes.
+ */
+    .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+    lw        gp, STACK_OFFSET_GP(sp)
+    lw        rPC, (ra)                    #  get our target PC
+    subu      rINST, ra, 8                 #  save start of chain branch
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # @ (pc, self)
+    sw        v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+    beqz      v0, 2f
+    move      a0, v0
+    move      a1, rINST
+    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    move      a0, v0
+    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
+
+    jr        a0                           #  continue native execution
+
+/* No translation, so request one if profiling isn't disabled */
+2:
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    FETCH_INST()
+    li        t0, kJitTSelectRequestHot
+    movn      a2, t0, a0                   #  ask for trace selection
+    bnez      a0, common_selectTrace
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target.  If so, we do a translation chain and
+ * go back to native execution.  Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+    .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+    lw        gp, STACK_OFFSET_GP(sp)
+    lw        rPC, (ra)                    #  get our target PC
+    subu      rINST, ra, 8                 #  save start of chain branch
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNormal)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)           # @ (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    beqz      a0, toInterpreter            #  go if not, otherwise do chain
+    move      a1, rINST
+    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    move      a0, v0
+    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
+
+    jr        a0                           #  continue native execution
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+    .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, footer235
+
+    jr        a0                           #  continue native execution if so
+footer235:
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+
+    .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+    lw        gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, 1f
+    jr        a0                           #  continue native execution if so
+1:
+#endif                                  /*  WITH_SELF_VERIFICATION */
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here.  We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+
+toInterpreter:
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    lw        a0, offThread_pJitProfTable(rSELF)
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    # NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+
+common_testUpdateProfile:
+
+    beqz      a0, 4f
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ *    r0    <= pJitProfTable (verified non-NULL)
+ *    rPC   <= Dalvik PC
+ *    rINST <= next instruction
+ */
+common_updateProfile:
+    srl       a3, rPC, 12                  #  cheap, but fast hash function
+    xor       a3, a3, rPC
+    andi      a3, a3, JIT_PROF_SIZE-1      #  eliminate excess bits
+    addu      t1, a0, a3
+    lbu       a1, (t1)                     #  get counter
+    GET_INST_OPCODE(t0)
+    subu      a1, a1, 1                    #  decrement counter
+    sb        a1, (t1)                     #  and store it
+    beqz      a1, 1f
+    GOTO_OPCODE(t0)                        #  if not threshold, fallthrough otherwise
+1:
+    /* Looks good, reset the counter */
+    lw        a1, offThread_jitThreshold(rSELF)
+    sb        a1, (t1)
+    EXPORT_PC()
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        v0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+
+#if !defined(WITH_SELF_VERIFICATION)
+    li        t0, kJitTSelectRequest       #  ask for trace selection
+    movz      a2, t0, a0
+    beqz      a0, common_selectTrace
+    jr        a0                           #  jump to the translation
+#else
+
+    bne       a0, zero, skip_ask_for_trace_selection
+    li        a2, kJitTSelectRequest       #  ask for trace selection
+    j         common_selectTrace
+
+skip_ask_for_trace_selection:
+    /*
+     * At this point, we have a target translation.  However, if
+     * that translation is actually the interpret-only pseudo-translation
+     * we want to treat it the same as no translation.
+     */
+    move      rBIX, a0                     #  save target
+    jal       dvmCompilerGetInterpretTemplate
+    # special case?
+    bne       v0, rBIX, jitSVShadowRunStart  #  set up self verification shadow space
+    # Need to clear the inJitCodeCache flag
+    sw        zero, offThread_inJitCodeCache(rSELF) #  back to the interp land
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+    /* no return */
+#endif
+
+/*
+ * On entry:
+ *  r2 is jit state.
+ */
+
+common_selectTrace:
+    lhu        a0, offThread_subMode(rSELF)
+    andi       a0, (kSubModeJitTraceBuild | kSubModeJitSV)
+    bnez       a0, 3f                      # already doing JIT work, continue
+    sw         a2, offThread_jitState(rSELF)
+    move       a0, rSELF
+
+/*
+ * Call out to validate trace-building request.  If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+
+    EXPORT_PC()
+    SAVE_PC_TO_SELF()
+    SAVE_FP_TO_SELF()
+    JAL(dvmJitCheckTraceRequest)
+3:
+    FETCH_INST()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+4:
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)
+    /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ *    rPC, rFP, rSELF: the values that they should contain
+ *    r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+    move      a0, rPC                      #  r0 <- program counter
+    move      a1, rFP                      #  r1 <- frame pointer
+    move      a2, rSELF                    #  r2 <- InterpState pointer
+    move      a3, rBIX                     #  r3 <- target translation
+    jal       dvmSelfVerificationSaveState #  save registers to shadow space
+    lw        rFP, offShadowSpace_shadowFP(v0) #  rFP <- fp in shadow space
+    jr        rBIX                         #  jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+jitSVShadowRunEnd:
+    move      a1, rFP                      #  pass ending fp
+    move      a3, rSELF                    #  pass self ptr for convenience
+    jal       dvmSelfVerificationRestoreState #  restore pc and fp values
+    LOAD_PC_FP_FROM_SELF()                 #  restore pc, fp
+    lw        a1, offShadowSpace_svState(a0) #  get self verification state
+    beq       a1, zero, 1f                 #  check for punt condition
+
+    # Setup SV single-stepping
+    move      a0, rSELF
+    li        a1, kSubModeJitSV
+    JAL(dvmEnableSubMode)                  # (self, subMode)
+    li        a2, kJitSelfVerification     #  ask for self verification
+    sw        a2, offThread_jitState(rSELF)
+    # Intentional fallthrough
+
+1:
+    # exit to interpreter without check
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+    .ent common_gotoBail
+common_gotoBail:
+    SAVE_PC_FP_TO_SELF()                   # export state to "thread"
+    move      a0, rSELF                    # a0 <- self ptr
+    b         dvmMterpStdBail              # call(self, changeInterp)
+    .end common_gotoBail
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair.  Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+    beqz    rOBJ, 1f
+    lw      rOBJ, offObject_clazz(rOBJ)
+1:
+    sw      a0, offThread_methodToCall(rSELF)
+    sw      rOBJ, offThread_callsiteClass(rSELF)
+    jr      ra
+#endif
+
+/*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ */
+common_invokeMethodJumboNoThis:
+#if defined(WITH_JIT)
+ /* On entry: a0 is "Method* methodToCall */
+    li       rOBJ, 0                     # clear "this"
+#endif
+common_invokeMethodJumbo:
+ /* On entry: a0 is "Method* methodToCall, rOBJ is "this" */
+.LinvokeNewJumbo:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+/* prepare to copy args to "outs" area of current frame */
+1:
+    add      rPC, rPC, 4          # adjust pc to make return consistent
+    FETCH(a2, 1)
+    SAVEAREA_FROM_FP(rBIX, rFP)   # rBIX <- stack save area
+    beqz     a2, .LinvokeArgsDone  # if no args, skip the rest
+    FETCH(a1, 2)                  # a1 <- CCCC
+    b         .LinvokeRangeArgs   # handle args like invoke range
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ *  a0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+    # prepare to copy args to "outs" area of current frame
+1:
+    GET_OPA(a2)
+    SAVEAREA_FROM_FP(rBIX, rFP)              #  rBIX <- stack save area
+    beqz      a2, .LinvokeArgsDone
+    FETCH(a1, 2)                           #  a1 <- CCCC
+.LinvokeRangeArgs:
+    # a0=methodToCall, a1=CCCC, a2=count, rBIX=outs
+    # (very few methods have > 10 args; could unroll for common cases)
+    EAS2(a3, rFP, a1)
+    sll       t0, a2, 2
+    subu      rBIX, rBIX, t0
+
+1:
+    lw        a1, 0(a3)
+    addu      a3, a3, 4
+    subu      a2, a2, 1
+    sw        a1, 0(rBIX)
+    addu      rBIX, 4
+    bnez      a2, 1b
+    b         .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ *  a0 is "Method* methodToCall", "rOBJ is this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+
+    # prepare to copy args to "outs" area of current frame
+1:
+    GET_OPB(a2)
+    SAVEAREA_FROM_FP(rBIX, rFP)
+    beqz      a2, .LinvokeArgsDone
+    FETCH(a1, 2)
+
+    # a0=methodToCall, a1=GFED, a2=count,
+.LinvokeNonRange:
+    beq       a2, 0, 0f
+    beq       a2, 1, 1f
+    beq       a2, 2, 2f
+    beq       a2, 3, 3f
+    beq       a2, 4, 4f
+    beq       a2, 5, 5f
+
+5:
+    and       t0, rINST, 0x0f00
+    ESRN(t2, rFP, t0, 6)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+4:
+    and       t0, a1, 0xf000
+    ESRN(t2, rFP, t0, 10)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+3:
+    and       t0, a1, 0x0f00
+    ESRN(t2, rFP, t0, 6)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+2:
+    and       t0, a1, 0x00f0
+    ESRN(t2, rFP, t0, 2)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+1:
+    and       t0, a1, 0x000f
+    EASN(t2, rFP, t0, 2)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+0:
+    #fall through .LinvokeArgsDone
+
+
+.LinvokeArgsDone:                          #  a0=methodToCall
+    lhu       rOBJ, offMethod_registersSize(a0)
+    lhu       a3, offMethod_outsSize(a0)
+    lw        a2, offMethod_insns(a0)
+    lw        rINST, offMethod_clazz(a0)
+    # find space for the new stack frame, check for overflow
+    SAVEAREA_FROM_FP(a1, rFP)              # a1 <- stack save area
+    sll       t0, rOBJ, 2                    #  a1 <- newFp (old savearea - regsSize)
+    subu      a1, a1, t0
+    SAVEAREA_FROM_FP(rBIX, a1)
+    lw        rOBJ, offThread_interpStackEnd(rSELF) #  t3 <- interpStackEnd
+    sll       t2, a3, 2
+    subu      t0, rBIX, t2
+    lhu       ra, offThread_subMode(rSELF)
+    lw        a3, offMethod_accessFlags(a0) #  a3 <- methodToCall->accessFlags
+    bltu      t0, rOBJ, .LstackOverflow      #  yes, this frame will overflow stack
+
+
+    # set up newSaveArea
+#ifdef EASY_GDB
+    SAVEAREA_FROM_FP(t0, rFP)
+    sw        t0, offStackSaveArea_prevSave(rBIX)
+#endif
+    sw        rFP, (offStackSaveArea_prevFrame)(rBIX)
+    sw        rPC, (offStackSaveArea_savedPc)(rBIX)
+#if defined(WITH_JIT)
+    sw        zero, (offStackSaveArea_returnAddr)(rBIX)
+#endif
+    sw        a0, (offStackSaveArea_method)(rBIX)
+    # Profiling?
+    bnez       ra, 2f
+1:
+    and       t2, a3, ACC_NATIVE
+    bnez      t2, .LinvokeNative
+    lhu       rOBJ, (a2)           # rOBJ -< load Inst from New PC
+    lw        a3, offClassObject_pDvmDex(rINST)
+    move      rPC, a2              # Publish new rPC
+    # Update state values for the new method
+    # a0=methodToCall, a1=newFp, a3=newMethodClass, rOBJ=newINST
+    sw        a0, offThread_method(rSELF)
+    sw        a3, offThread_methodClassDex(rSELF)
+    li        a2, 1
+    sw        a2, offThread_debugIsMethodEntry(rSELF)
+
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    move      rFP, a1                    # fp = newFp
+    GET_PREFETCHED_OPCODE(t0, rOBJ)      # extract prefetched opcode from rOBJ
+    move      rINST, rOBJ                # publish new rINST
+    sw        a1, offThread_curFrame(rSELF)
+    bnez      a0, common_updateProfile
+    GOTO_OPCODE(t0)
+#else
+    move      rFP, a1
+    GET_PREFETCHED_OPCODE(t0, rOBJ)
+    move      rINST, rOBJ
+    sw        a1, offThread_curFrame(rSELF)
+    GOTO_OPCODE(t0)
+#endif
+
+2:
+    # Profiling - record method entry.  a0: methodToCall
+    STACK_STORE(a0, 0)
+    STACK_STORE(a1, 4)
+    STACK_STORE(a2, 8)
+    STACK_STORE(a3, 12)
+    sw       rPC, offThread_pc(rSELF)          # update interpSave.pc
+    move     a1, a0
+    move     a0, rSELF
+    JAL(dvmReportInvoke)
+    STACK_LOAD(a3, 12)                         # restore a0-a3
+    STACK_LOAD(a2, 8)
+    STACK_LOAD(a1, 4)
+    STACK_LOAD(a0, 0)
+    b        1b
+.LinvokeNative:
+    # Prep for the native call
+    # a0=methodToCall, a1=newFp, rBIX=newSaveArea
+    lhu       ra, offThread_subMode(rSELF)
+    lw        t3, offThread_jniLocal_topCookie(rSELF)
+    sw        a1, offThread_curFrame(rSELF)
+    sw        t3, offStackSaveArea_localRefCookie(rBIX) # newFp->localRefCookie=top
+    move      a2, a0
+    move      a0, a1
+    addu      a1, rSELF, offThread_retval
+    move      a3, rSELF
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    b         .Lskip
+    .ent dalvik_mterp
+dalvik_mterp:
+    STACK_STORE_FULL()
+.Lskip:
+#endif
+    bnez      ra, 11f                          # Any special SubModes active?
+    lw        t9, offMethod_nativeFunc(a2)
+    jalr      t9
+    lw        gp, STACK_OFFSET_GP(sp)
+7:
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw        a0, offStackSaveArea_localRefCookie(rBIX)
+    lw        a1, offThread_exception(rSELF)
+    sw        rFP, offThread_curFrame(rSELF)
+    sw        a0, offThread_jniLocal_topCookie(rSELF)    # new top <- old top
+    bnez      a1, common_exceptionThrown
+
+    FETCH_ADVANCE_INST(3)
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+11:
+    # a0=newFp, a1=&retval, a2=methodToCall, a3=self, ra=subModes
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    move      a0, a2                    # a0 <- methodToCall
+    move      a1, rSELF
+    move      a2, rFP
+    JAL(dvmReportPreNativeInvoke)       # (methodToCall, self, fp)
+    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    # Call the native method
+    lw       t9, offMethod_nativeFunc(a2)      # t9<-methodToCall->nativeFunc
+    jalr     t9
+    lw       gp, STACK_OFFSET_GP(sp)
+
+    # Restore the pre-call arguments
+    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    # Finish up any post-invoke subMode requirements
+    move      a0, a2
+    move      a1, rSELF
+    move      a2, rFP
+    JAL(dvmReportPostNativeInvoke)      # (methodToCall, self, fp)
+    b         7b
+
+
+.LstackOverflow:       # a0=methodToCall
+    move      a1, a0                    #  a1 <- methodToCall
+    move      a0, rSELF                 # a0 <- self
+    JAL(dvmHandleStackOverflow)         #  dvmHandleStackOverflow(self, methodToCall)
+    b         common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+    .end dalvik_mterp
+#endif
+
+    /*
+     * Common code for method invocation, calling through "glue code".
+     *
+     * TODO: now that we have range and non-range invoke handlers, this
+     *       needs to be split into two.  Maybe just create entry points
+     *       that set r9 and jump here?
+     *
+     * On entry:
+     *  r0 is "Method* methodToCall", the method we're trying to call
+     *  r9 is "bool methodCallRange", indicating if this is a /range variant
+     */
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+    lhu       t0, offThread_subMode(rSELF)
+    SAVEAREA_FROM_FP(a0, rFP)
+    lw        rOBJ, offStackSaveArea_savedPc(a0) # rOBJ = saveArea->savedPc
+    bnez      t0, 19f
+14:
+    lw        rFP, offStackSaveArea_prevFrame(a0) # fp = saveArea->prevFrame
+    lw        a2, (offStackSaveArea_method - sizeofStackSaveArea)(rFP)
+                                               # a2<- method we're returning to
+    # is this a break frame?
+    beqz      a2, common_gotoBail              # break frame, bail out completely
+
+    lw        rBIX, offMethod_clazz(a2)        # rBIX<- method->clazz
+    lw        rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+    PREFETCH_ADVANCE_INST(rINST, rOBJ, 3)      # advance rOBJ, update new rINST
+    sw        a2, offThread_method(rSELF)      # self->method = newSave->method
+    lw        a1, offClassObject_pDvmDex(rBIX) # r1<- method->clazz->pDvmDex
+    sw        rFP, offThread_curFrame(rSELF)   # curFrame = fp
+#if defined(WITH_JIT)
+    lw         rBIX, offStackSaveArea_returnAddr(a0)
+    move       rPC, rOBJ                       # publish new rPC
+    sw         a1, offThread_methodClassDex(rSELF)
+    sw         rBIX, offThread_inJitCodeCache(rSELF) # may return to JIT'ed land
+    beqz       rBIX, 15f                       # caller is compiled code
+    move       t9, rBIX
+    jalr       t9
+    lw         gp, STACK_OFFSET_GP(sp)
+15:
+    GET_INST_OPCODE(t0)                        # extract opcode from rINST
+    GOTO_OPCODE(t0)                            # jump to next instruction
+#else
+    GET_INST_OPCODE(t0)                        # extract opcode from rINST
+    move       rPC, rOBJ                       # publish new rPC
+    sw         a1, offThread_methodClassDex(rSELF)
+    GOTO_OPCODE(t0)
+#endif
+
+19:
+    # Handle special actions
+    # On entry, a0: StackSaveArea
+    lw         a1, offStackSaveArea_prevFrame(a0) # a1<- prevFP
+    sw         rPC, offThread_pc(rSELF)        # update interpSave.pc
+    sw         a1, offThread_curFrame(rSELF)   # update interpSave.curFrame
+    move       a0, rSELF
+    JAL(dvmReportReturn)
+    SAVEAREA_FROM_FP(a0, rFP)                  # restore StackSaveArea
+    b          14b
+
+    .if 0
+    /*
+     * Return handling, calls through "glue code".
+     */
+.LreturnOld:
+    SAVE_PC_FP_TO_SELF()                       # export state
+    move       a0, rSELF                       # arg to function
+    JAL(dvmMterp_returnFromMethod)
+    b          common_resumeAfterGlueCall
+    .endif
+
+/*
+ * Somebody has thrown an exception.  Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+    .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+    EXPORT_PC()
+    move     a0, rSELF
+    JAL(dvmCheckSuspendPending)
+    lw       rOBJ, offThread_exception(rSELF)
+    move     a1, rSELF
+    move     a0, rOBJ
+    JAL(dvmAddTrackedAlloc)
+    lhu      a2, offThread_subMode(rSELF)
+    sw       zero, offThread_exception(rSELF)
+
+    # Special subMode?
+    bnez     a2, 7f                     # any special subMode handling needed?
+8:
+    /* set up args and a local for "&fp" */
+    sw       rFP, 20(sp)                 #  store rFP => tmp
+    addu     t0, sp, 20                  #  compute &tmp
+    sw       t0, STACK_OFFSET_ARG04(sp)  #  save it in arg4 as per ABI
+    li       a3, 0                       #  a3 <- false
+    lw       a1, offThread_method(rSELF)
+    move     a0, rSELF
+    lw       a1, offMethod_insns(a1)
+    lhu      ra, offThread_subMode(rSELF)
+    move     a2, rOBJ
+    subu     a1, rPC, a1
+    sra      a1, a1, 1
+
+    /* call, r0 gets catchRelPc (a code-unit offset) */
+    JAL(dvmFindCatchBlock)           # call(self, relPc, exc, scan?, &fp)
+    lw        rFP, 20(sp)            # retrieve the updated rFP
+
+    /* update frame pointer and check result from dvmFindCatchBlock */
+    move      a0, v0
+    bltz      v0, .LnotCaughtLocally
+
+    /* fix earlier stack overflow if necessary; Preserve a0 */
+    lbu       a1, offThread_stackOverflowed(rSELF)
+    beqz      a1, 1f
+    move      rBIX, a0
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmCleanupStackOverflow)
+    move      a0, rBIX
+
+1:
+
+/* adjust locals to match self->interpSave.curFrame and updated PC */
+    SAVEAREA_FROM_FP(a1, rFP)           # a1<- new save area
+    lw        a1, offStackSaveArea_method(a1)
+    sw        a1, offThread_method(rSELF)
+    lw        a2, offMethod_clazz(a1)
+    lw        a3, offMethod_insns(a1)
+    lw        a2, offClassObject_pDvmDex(a2)
+    EAS1(rPC, a3, a0)
+    sw        a2, offThread_methodClassDex(rSELF)
+
+    /* release the tracked alloc on the exception */
+    move      a0, rOBJ
+    move      a1, rSELF
+    JAL(dvmReleaseTrackedAlloc)
+
+    /* restore the exception if the handler wants it */
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    bne       t0, OP_MOVE_EXCEPTION, 2f
+    sw        rOBJ, offThread_exception(rSELF)
+2:
+    GOTO_OPCODE(t0)
+
+    # Manage debugger bookkeeping
+7:
+    sw        rPC, offThread_pc(rSELF)
+    sw        rFP, offThread_curFrame(rSELF)
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmReportExceptionThrow)
+    b         8b
+
+.LnotCaughtLocally:                     #  rOBJ = exception
+    /* fix stack overflow if necessary */
+    lbu       a1, offThread_stackOverflowed(rSELF)
+    beqz      a1, 3f
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmCleanupStackOverflow)           #  dvmCleanupStackOverflow(self, exception)
+
+3:
+    # may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+    /* call __android_log_print(prio, tag, format, ...) */
+    /* "Exception %s from %s:%d not caught locally" */
+    lw        a0, offThread_method(rSELF)
+    lw        a1, offMethod_insns(a0)
+    subu      a1, rPC, a1
+    sra       a1, a1, 1
+    JAL(dvmLineNumFromPC)
+    sw        v0, 20(sp)
+    # dvmGetMethodSourceFile(method)
+    lw        a0, offThread_method(rSELF)
+    JAL(dvmGetMethodSourceFile)
+    sw        v0, 16(sp)
+    # exception->clazz->descriptor
+    lw        a3, offObject_clazz(rOBJ)
+    lw        a3, offClassObject_descriptor(a3)
+    la        a2, .LstrExceptionNotCaughtLocally
+    la        a1, .LstrLogTag
+    li        a0, 3
+    JAL(__android_log_print)
+#endif
+    sw        rOBJ, offThread_exception(rSELF)
+    move      a0, rOBJ
+    move      a1, rSELF
+    JAL(dvmReleaseTrackedAlloc)
+    b         common_gotoBail
+
+    /*
+     * Exception handling, calls through "glue code".
+     */
+    .if     0
+.LexceptionOld:
+    SAVE_PC_TO_SELF()                # export state
+    SAVE_FP_TO_SELF()
+    move     a0, rSELF               # arg to function
+    JAL(dvmMterp_exceptionThrown)
+    b       common_resumeAfterGlueCall
+    .endif
+
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including the current
+     * instruction.
+     *
+     * On entry:
+     *     rBIX: &dvmDex->pResFields[field]
+     *     a0:  field pointer (must preserve)
+     */
+common_verifyField:
+     lhu     a3, offThread_subMode(rSELF)
+     andi    a3, kSubModeJitTraceBuild
+     bnez    a3, 1f                 # Not building trace, continue
+     jr      ra
+1:
+     lw      a1, (rBIX)
+     beqz    a1, 2f                 # resolution complete ?
+     jr      ra
+2:
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    SCRATCH_STORE(ra, 16)
+    move    a0, rSELF
+    move    a1, rPC
+    JAL(dvmJitEndTraceSelect)        #(self,pc) end trace before this inst)
+    SCRATCH_LOAD(a0, 0)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(ra, 16)
+    jr      ra                       # return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+    LOAD_PC_FP_FROM_SELF()           #  pull rPC and rFP out of thread
+    lw      rIBASE, offThread_curHandlerTable(rSELF) # refresh
+    FETCH_INST()                     #  load rINST from rPC
+    GET_INST_OPCODE(t0)              #  extract opcode from rINST
+    GOTO_OPCODE(t0)                  #  jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use a1
+ * and a3 because those just happen to be the registers all our callers are
+ * using. We move a3 before calling the C function, but a1 happens to match.
+ * a1: index
+ * a3: size
+ */
+common_errArrayIndex:
+    EXPORT_PC()
+    move      a0, a3
+    JAL(dvmThrowArrayIndexOutOfBoundsException)
+    b         common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+    la     a0, .LstrDivideByZero
+    JAL(dvmThrowArithmeticException)
+    b       common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in a1
+ */
+common_errNegativeArraySize:
+    EXPORT_PC()
+    move    a0, a1                                # arg0 <- len
+    JAL(dvmThrowNegativeArraySizeException)    # (len)
+    b       common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in a1
+ */
+common_errNoSuchMethod:
+    EXPORT_PC()
+    move     a0, a1
+    JAL(dvmThrowNoSuchMethodError)
+    b       common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one.  We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+    EXPORT_PC()
+    li      a0, 0
+    JAL(dvmThrowNullPointerException)
+    b       common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will be in ra. Use a jal to jump here.
+ */
+common_abort:
+    lw      zero,-4(zero)            #  generate SIGSEGV
+
+/*
+ * Spit out a "we were here", preserving all registers.
+ */
+    .macro SQUEAK num
+common_squeak\num:
+    STACK_STORE_RA();
+    la        a0, .LstrSqueak
+    LOAD_IMM(a1, \num);
+    JAL(printf);
+    STACK_LOAD_RA();
+    RETURN;
+    .endm
+
+    SQUEAK 0
+    SQUEAK 1
+    SQUEAK 2
+    SQUEAK 3
+    SQUEAK 4
+    SQUEAK 5
+
+/*
+ * Spit out the number in a0, preserving registers.
+ */
+common_printNum:
+    STACK_STORE_RA()
+    MOVE_REG(a1, a0)
+    la        a0, .LstrSqueak
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+    STACK_STORE_RA()
+    la        a0, .LstrNewline
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN
+
+    /*
+     * Print the 32-bit quantity in a0 as a hex value, preserving registers.
+     */
+common_printHex:
+    STACK_STORE_RA()
+    MOVE_REG(a1, a0)
+    la        a0, .LstrPrintHex
+    JAL(printf)
+    STACK_LOAD_RA()
+RETURN;
+
+/*
+ * Print the 64-bit quantity in a0-a1, preserving registers.
+ */
+common_printLong:
+    STACK_STORE_RA()
+    MOVE_REG(a3, a1)
+    MOVE_REG(a2, a0)
+    la        a0, .LstrPrintLong
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN;
+
+/*
+ * Print full method info.  Pass the Method* in a0.  Preserves regs.
+ */
+common_printMethod:
+    STACK_STORE_RA()
+    JAL(dvmMterpPrintMethod)
+    STACK_LOAD_RA()
+    RETURN
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info.  Requires the C function to be compiled in.
+ */
+    .if 0
+common_dumpRegs:
+    STACK_STORE_RA()
+    JAL(dvmMterpDumpMipsRegs)
+    STACK_LOAD_RA()
+    RETURN
+    .endif
+
+/*
+ * Zero-terminated ASCII string data.
+ */
+    .data
+
+.LstrBadEntryPoint:
+    .asciiz "Bad entry point %d\n"
+.LstrDivideByZero:
+    .asciiz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+    .asciiz "filled-new-array only implemented for 'int'"
+.LstrLogTag:
+    .asciiz  "mterp"
+.LstrExceptionNotCaughtLocally:
+    .asciiz  "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+    .asciiz "\n"
+.LstrSqueak:
+    .asciiz "<%d>"
+.LstrPrintHex:
+    .asciiz "<0x%x>"
+.LstrPrintLong:
+    .asciiz "<%lld>"
diff --git a/vm/mterp/mips/header.S b/vm/mterp/mips/header.S
new file mode 100644
index 0000000..0f03599
--- /dev/null
+++ b/vm/mterp/mips/header.S
@@ -0,0 +1,345 @@
+#include "../common/asm-constants.h"
+#include "../common/mips-defines.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef __mips_hard_float
+#define HARD_FLOAT
+#else
+#define SOFT_FLOAT
+#endif
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32R2
+#endif
+
+/* MIPS definitions and declarations
+
+   reg	nick		purpose
+   s0	rPC		interpreted program counter, used for fetching instructions
+   s1	rFP		interpreted frame pointer, used for accessing locals and args
+   s2	rSELF		self (Thread) pointer
+   s3	rIBASE		interpreted instruction base pointer, used for computed goto
+   s4	rINST		first 16-bit code unit of current instruction
+*/
+
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rBIX s6
+#define rTEMP s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+#else
+#define rARG0 a1
+#define rARG1 a0
+#define rARG2 a3
+#define rARG3 a2
+#define rRESULT0 v1
+#define rRESULT1 v0
+#endif
+
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_SELF() lw rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF() sw rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF() lw rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF() sw rFP, offThread_curFrame(rSELF)
+#define LOAD_PC_FP_FROM_SELF() \
+	LOAD_PC_FROM_SELF();   \
+	LOAD_FP_FROM_SELF()
+#define SAVE_PC_FP_TO_SELF()   \
+	SAVE_PC_TO_SELF();     \
+	SAVE_FP_TO_SELF()
+
+#define EXPORT_PC() \
+    sw        rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+    subu      rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST() lhu rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+    addu      rPC, rPC, ((_count) * 2)
+
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+    lhu       _dreg, ((_count)*2)(_sreg) ;            \
+    addu      _sreg, _sreg, (_count)*2
+
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+    lhu       rINST, (rPC)
+
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+
+#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+
+#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
+    addu      rd, rIBASE, rd; \
+    jr        rd
+
+#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, ${handler_size_bits}; \
+    addu      rd, _base, rd; \
+    jr        rd
+
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+    sll       dst, dst, ${handler_size_bits}; \
+    addu      dst, rIBASE, dst; \
+    sll       t8, rix, 2; \
+    addu      t8, t8, rFP; \
+    jr        dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+
+#define SET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; s.s rd, (AT); .set at
+
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifndef MIPS32R2
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#else
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd, off) lw rd, offThread_##off## (rSELF)
+
+#define LOAD_rSELF_method(rd) LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd) LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd) LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd) LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd) LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd) LOAD_rSELF_OFF(rd, bailPtr)
+#define LOAD_rSELF_SelfSuspendCount(rd) LOAD_rSELF_OFF(rd, SelfSuspendCount)
+
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+    sll       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+    srl       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define LOADu2_RB_OFF(rd, rbase, off) lhu rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    s.s       rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    l.s       rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+    sw        rhi, (off)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+    lw        rhi, (off)(rbase)
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+    sw        rhi, (off)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+    lw        rhi, (off)(rbase)
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, (off+4)(rbase); \
+    s.s       rhi, (off)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, (off+4)(rbase); \
+    l.s       rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#define vSTORE64(rlo, rhi, rbase) vSTORE64_off(rlo, rhi, rbase, 0)
+#define vLOAD64(rlo, rhi, rbase) vLOAD64_off(rlo, rhi, rbase, 0)
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define STORE64_lo(rd, rbase) sw rd, 0(rbase)
+#define STORE64_hi(rd, rbase) sw rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd, rbase) LOAD_RB_OFF(rd, rbase, offThread_exception)
+#define LOAD_base_offArrayObject_length(rd, rbase) LOAD_RB_OFF(rd, rbase, offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd, rbase) LOAD_RB_OFF(rd, rbase, offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd, rbase) LOAD_RB_OFF(rd, rbase, offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_name)
+#define LOAD_base_offObject_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd, rbase) LOADu2_RB_OFF(rd, rbase, offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd, rbase) STORE_RB_OFF(rd, rbase, offThread_exception)
+
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define SAVE_RA(offset) STACK_STORE(ra, offset)
+#define LOAD_RA(offset) STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define RETURN jr ra
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_SCR   32
+#define STACK_OFFSET_SCRMX 80
+#define STACK_OFFSET_GP    84
+#define STACK_OFFSET_rFP   112
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+#define STACK_STORE_RA() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(gp, STACK_OFFSET_GP); \
+    STACK_STORE(ra, 124)
+
+#define STACK_STORE_S0() STACK_STORE_RA(); \
+    STACK_STORE(s0, 116)
+
+#define STACK_STORE_S0S1() STACK_STORE_S0(); \
+    STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define STACK_LOAD_RA() STACK_LOAD(ra, 124); \
+    STACK_LOAD(gp, STACK_OFFSET_GP); \
+    DELETE_STACK(STACK_SIZE)
+
+#define STACK_LOAD_S0() STACK_LOAD(s0, 116); \
+    STACK_LOAD_RA()
+
+#define STACK_LOAD_S0S1() STACK_LOAD(s1, STACK_OFFSET_rFP); \
+    STACK_LOAD_S0()
+
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(ra, 124); \
+    STACK_STORE(fp, 120); \
+    STACK_STORE(s0, 116); \
+    STACK_STORE(s1, STACK_OFFSET_rFP); \
+    STACK_STORE(s2, 108); \
+    STACK_STORE(s3, 104); \
+    STACK_STORE(s4, 100); \
+    STACK_STORE(s5, 96); \
+    STACK_STORE(s6, 92); \
+    STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+    STACK_LOAD(s7, 88); \
+    STACK_LOAD(s6, 92); \
+    STACK_LOAD(s5, 96); \
+    STACK_LOAD(s4, 100); \
+    STACK_LOAD(s3, 104); \
+    STACK_LOAD(s2, 108); \
+    STACK_LOAD(s1, STACK_OFFSET_rFP); \
+    STACK_LOAD(s0, 116); \
+    STACK_LOAD(fp, 120); \
+    STACK_LOAD(ra, 124); \
+    DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define SCRATCH_STORE(r,off) \
+    STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+    STACK_LOAD(r, STACK_OFFSET_SCR+off);
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
diff --git a/vm/mterp/mips/platform.S b/vm/mterp/mips/platform.S
new file mode 100644
index 0000000..ec1e3ee
--- /dev/null
+++ b/vm/mterp/mips/platform.S
@@ -0,0 +1,32 @@
+/*
+ * ===========================================================================
+ *  CPU-version-specific defines
+ * ===========================================================================
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP"
+#endif
+
+/*
+ * Macro for data memory barrier.
+ */
+.macro SMP_DMB
+#if ANDROID_SMP != 0
+    sync
+#else
+    /* not SMP */
+#endif
+.endm
+
+/*
+ * Macro for data memory barrier (store/store variant).
+ */
+.macro  SMP_DMB_ST
+#if ANDROID_SMP != 0
+    // FIXME: Is this really needed?
+    sync
+#else
+    /* not SMP */
+#endif
+.endm
diff --git a/vm/mterp/mips/stub.S b/vm/mterp/mips/stub.S
new file mode 100644
index 0000000..fad2238
--- /dev/null
+++ b/vm/mterp/mips/stub.S
@@ -0,0 +1,10 @@
+    /* (stub) */
+    SAVE_PC_TO_SELF()            # only need to export PC and FP
+    SAVE_FP_TO_SELF()
+    move        a0, rSELF        # self is first arg to function
+    JAL(dvmMterp_${opcode})      # call
+    LOAD_PC_FROM_SELF()          # retrieve updated values
+    LOAD_FP_FROM_SELF()
+    FETCH_INST()                 # load next instruction from rPC
+    GET_INST_OPCODE(t0)          # ...trim down to just the opcode
+    GOTO_OPCODE(t0)              # ...and jump to the handler
diff --git a/vm/mterp/mips/unflop.S b/vm/mterp/mips/unflop.S
new file mode 100644
index 0000000..9018bc9
--- /dev/null
+++ b/vm/mterp/mips/unflop.S
@@ -0,0 +1,32 @@
+%default {"preinstr":""}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+    $preinstr                              #  optional op
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifdef SOFT_FLOAT
+    $instr                                 #  a0 <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vAA <- result0
+#else
+    $instr_f
+
+.L${opcode}_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+#endif
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+    /* 9-10 instructions */
diff --git a/vm/mterp/mips/unflopWide.S b/vm/mterp/mips/unflopWide.S
new file mode 100644
index 0000000..3411c2e
--- /dev/null
+++ b/vm/mterp/mips/unflopWide.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"STORE64_F(fv0, fv0f, rOBJ)"}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vAA
+#else
+    $ld_arg
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0/a1 <- op, a2-a3 changed
+
+.L${opcode}_set_vreg:
+#ifdef SOFT_FLOAT
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vAA <- a0/a1
+#else
+    $st_result                             #  vAA <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
diff --git a/vm/mterp/mips/unflopWider.S b/vm/mterp/mips/unflopWider.S
new file mode 100644
index 0000000..f6d5718
--- /dev/null
+++ b/vm/mterp/mips/unflopWider.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "st_result":"STORE64_F(fv0, fv0f, rOBJ)"}
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  result <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vA/vA+1 <- a0/a1
+#else
+    $instr_f
+
+.L${opcode}_set_vreg:
+    $st_result                             #  vA/vA+1 <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
diff --git a/vm/mterp/mips/unop.S b/vm/mterp/mips/unop.S
new file mode 100644
index 0000000..52a8f0a
--- /dev/null
+++ b/vm/mterp/mips/unop.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0"}
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO($result0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
diff --git a/vm/mterp/mips/unopNarrower.S b/vm/mterp/mips/unopNarrower.S
new file mode 100644
index 0000000..85a94b7
--- /dev/null
+++ b/vm/mterp/mips/unopNarrower.S
@@ -0,0 +1,37 @@
+%default {"preinstr":"", "load":"LOAD64_F(fa0, fa0f, a3)"}
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vB/vB+1
+#else
+    $load
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+#ifdef SOFT_FLOAT
+    $instr                                 #  a0 <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vA <- result0
+#else
+    $instr_f
+
+.L${opcode}_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
diff --git a/vm/mterp/mips/unopWide.S b/vm/mterp/mips/unopWide.S
new file mode 100644
index 0000000..00e4e17
--- /dev/null
+++ b/vm/mterp/mips/unopWide.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64($result0, $result1, rOBJ)      #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
diff --git a/vm/mterp/mips/unopWider.S b/vm/mterp/mips/unopWider.S
new file mode 100644
index 0000000..f601c11
--- /dev/null
+++ b/vm/mterp/mips/unopWider.S
@@ -0,0 +1,20 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    $preinstr                              #  optional op
+    $instr                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64($result0, $result1, rOBJ)      #  vA/vA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
diff --git a/vm/mterp/mips/unused.S b/vm/mterp/mips/unused.S
new file mode 100644
index 0000000..d91dafb
--- /dev/null
+++ b/vm/mterp/mips/unused.S
@@ -0,0 +1,2 @@
+    BAL(common_abort)
+
diff --git a/vm/mterp/mips/zcmp.S b/vm/mterp/mips/zcmp.S
new file mode 100644
index 0000000..aaac52d
--- /dev/null
+++ b/vm/mterp/mips/zcmp.S
@@ -0,0 +1,33 @@
+%verify "branch taken"
+%verify "branch not taken"
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    b${revcmp} a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index a0835f9..cc5a877 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -26912,8 +26912,8 @@
 
     cmp     lr, #0                      @ any special SubModes active?
     bne     11f                         @ go handle them if so
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 7:
 
     @ native return; r10=newSaveArea
@@ -26939,8 +26939,8 @@
     ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
 
     @ Call the native method
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 
     @ Restore the pre-call arguments
     ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 5e4ccd4..6bbd91a 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -27370,8 +27370,8 @@
 
     cmp     lr, #0                      @ any special SubModes active?
     bne     11f                         @ go handle them if so
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 7:
 
     @ native return; r10=newSaveArea
@@ -27397,8 +27397,8 @@
     ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
 
     @ Call the native method
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 
     @ Restore the pre-call arguments
     ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 3a01a83..0203b02 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -26849,8 +26849,8 @@
 
     cmp     lr, #0                      @ any special SubModes active?
     bne     11f                         @ go handle them if so
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 7:
 
     @ native return; r10=newSaveArea
@@ -26876,8 +26876,8 @@
     ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
 
     @ Call the native method
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 
     @ Restore the pre-call arguments
     ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index e8a9501..efb1ea8 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -26849,8 +26849,8 @@
 
     cmp     lr, #0                      @ any special SubModes active?
     bne     11f                         @ go handle them if so
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 7:
 
     @ native return; r10=newSaveArea
@@ -26876,8 +26876,8 @@
     ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
 
     @ Call the native method
-    mov     lr, pc                      @ set return addr
-    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+    blx     ip
 
     @ Restore the pre-call arguments
     ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-mips.S b/vm/mterp/out/InterpAsm-mips.S
new file mode 100644
index 0000000..734ae91
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-mips.S
@@ -0,0 +1,29959 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips/header.S */
+#include "../common/asm-constants.h"
+#include "../common/mips-defines.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef __mips_hard_float
+#define HARD_FLOAT
+#else
+#define SOFT_FLOAT
+#endif
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32R2
+#endif
+
+/* MIPS definitions and declarations
+
+   reg	nick		purpose
+   s0	rPC		interpreted program counter, used for fetching instructions
+   s1	rFP		interpreted frame pointer, used for accessing locals and args
+   s2	rSELF		self (Thread) pointer
+   s3	rIBASE		interpreted instruction base pointer, used for computed goto
+   s4	rINST		first 16-bit code unit of current instruction
+*/
+
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rBIX s6
+#define rTEMP s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+#else
+#define rARG0 a1
+#define rARG1 a0
+#define rARG2 a3
+#define rARG3 a2
+#define rRESULT0 v1
+#define rRESULT1 v0
+#endif
+
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_SELF() lw rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF() sw rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF() lw rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF() sw rFP, offThread_curFrame(rSELF)
+#define LOAD_PC_FP_FROM_SELF() \
+	LOAD_PC_FROM_SELF();   \
+	LOAD_FP_FROM_SELF()
+#define SAVE_PC_FP_TO_SELF()   \
+	SAVE_PC_TO_SELF();     \
+	SAVE_FP_TO_SELF()
+
+#define EXPORT_PC() \
+    sw        rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+    subu      rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST() lhu rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+    addu      rPC, rPC, ((_count) * 2)
+
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+    lhu       _dreg, ((_count)*2)(_sreg) ;            \
+    addu      _sreg, _sreg, (_count)*2
+
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+    lhu       rINST, (rPC)
+
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+
+#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+
+#define GOTO_OPCODE(rd) sll rd, rd, 7; \
+    addu      rd, rIBASE, rd; \
+    jr        rd
+
+#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, 7; \
+    addu      rd, _base, rd; \
+    jr        rd
+
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+    sll       dst, dst, 7; \
+    addu      dst, rIBASE, dst; \
+    sll       t8, rix, 2; \
+    addu      t8, t8, rFP; \
+    jr        dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+
+#define SET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+    .set noat; s.s rd, (AT); .set at
+
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifndef MIPS32R2
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#else
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd, off) lw rd, offThread_##off## (rSELF)
+
+#define LOAD_rSELF_method(rd) LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd) LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd) LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd) LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd) LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd) LOAD_rSELF_OFF(rd, bailPtr)
+#define LOAD_rSELF_SelfSuspendCount(rd) LOAD_rSELF_OFF(rd, SelfSuspendCount)
+
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+    sll       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+    srl       AT, roff, rshift; \
+    addu      rd, rbase, AT; \
+    .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+    .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define LOADu2_RB_OFF(rd, rbase, off) lhu rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+    sw        rhi, (off+4)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+    lw        rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+    s.s       rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+    l.s       rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+    sw        rhi, (off)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+    lw        rhi, (off)(rbase)
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+    sw        rhi, (off)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+    lw        rhi, (off)(rbase)
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, (off+4)(rbase); \
+    s.s       rhi, (off)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, (off+4)(rbase); \
+    l.s       rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#define vSTORE64(rlo, rhi, rbase) vSTORE64_off(rlo, rhi, rbase, 0)
+#define vLOAD64(rlo, rhi, rbase) vLOAD64_off(rlo, rhi, rbase, 0)
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define STORE64_lo(rd, rbase) sw rd, 0(rbase)
+#define STORE64_hi(rd, rbase) sw rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd, rbase) LOAD_RB_OFF(rd, rbase, offThread_exception)
+#define LOAD_base_offArrayObject_length(rd, rbase) LOAD_RB_OFF(rd, rbase, offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd, rbase) LOAD_RB_OFF(rd, rbase, offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd, rbase) LOAD_RB_OFF(rd, rbase, offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_name)
+#define LOAD_base_offObject_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd, rbase) LOADu2_RB_OFF(rd, rbase, offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd, rbase) STORE_RB_OFF(rd, rbase, offThread_exception)
+
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define SAVE_RA(offset) STACK_STORE(ra, offset)
+#define LOAD_RA(offset) STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define RETURN jr ra
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_SCR   32
+#define STACK_OFFSET_SCRMX 80
+#define STACK_OFFSET_GP    84
+#define STACK_OFFSET_rFP   112
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+#define STACK_STORE_RA() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(gp, STACK_OFFSET_GP); \
+    STACK_STORE(ra, 124)
+
+#define STACK_STORE_S0() STACK_STORE_RA(); \
+    STACK_STORE(s0, 116)
+
+#define STACK_STORE_S0S1() STACK_STORE_S0(); \
+    STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define STACK_LOAD_RA() STACK_LOAD(ra, 124); \
+    STACK_LOAD(gp, STACK_OFFSET_GP); \
+    DELETE_STACK(STACK_SIZE)
+
+#define STACK_LOAD_S0() STACK_LOAD(s0, 116); \
+    STACK_LOAD_RA()
+
+#define STACK_LOAD_S0S1() STACK_LOAD(s1, STACK_OFFSET_rFP); \
+    STACK_LOAD_S0()
+
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+    STACK_STORE(ra, 124); \
+    STACK_STORE(fp, 120); \
+    STACK_STORE(s0, 116); \
+    STACK_STORE(s1, STACK_OFFSET_rFP); \
+    STACK_STORE(s2, 108); \
+    STACK_STORE(s3, 104); \
+    STACK_STORE(s4, 100); \
+    STACK_STORE(s5, 96); \
+    STACK_STORE(s6, 92); \
+    STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+    STACK_LOAD(s7, 88); \
+    STACK_LOAD(s6, 92); \
+    STACK_LOAD(s5, 96); \
+    STACK_LOAD(s4, 100); \
+    STACK_LOAD(s3, 104); \
+    STACK_LOAD(s2, 108); \
+    STACK_LOAD(s1, STACK_OFFSET_rFP); \
+    STACK_LOAD(s0, 116); \
+    STACK_LOAD(fp, 120); \
+    STACK_LOAD(ra, 124); \
+    DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define SCRATCH_STORE(r,off) \
+    STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+    STACK_LOAD(r, STACK_OFFSET_SCR+off);
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
+
+/* File: mips/platform.S */
+/*
+ * ===========================================================================
+ *  CPU-version-specific defines
+ * ===========================================================================
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP"
+#endif
+
+/*
+ * Macro for data memory barrier.
+ */
+.macro SMP_DMB
+#if ANDROID_SMP != 0
+    sync
+#else
+    /* not SMP */
+#endif
+.endm
+
+/*
+ * Macro for data memory barrier (store/store variant).
+ */
+.macro  SMP_DMB_ST
+#if ANDROID_SMP != 0
+    // FIXME: Is this really needed?
+    sync
+#else
+    /* not SMP */
+#endif
+.endm
+
+/* File: mips/entry.S */
+
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+#define ASSIST_DEBUGGER 1
+
+    .text
+    .align 2
+    .global dvmMterpStdRun
+    .ent dvmMterpStdRun
+    .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ *  r0  Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+
+dvmMterpStdRun:
+    .set noreorder
+    .cpload t9
+    .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+    STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+    .cprestore STACK_OFFSET_GP
+
+    addu      fp, sp, STACK_SIZE           #  Move Frame Pointer to the base of frame
+    /* save stack pointer, add magic word for debuggerd */
+    sw        sp, offThread_bailPtr(a0)      # Save SP
+
+    /* set up "named" registers, figure out entry point */
+    move      rSELF, a0                    #  set rSELF
+    LOAD_PC_FROM_SELF()
+    LOAD_FP_FROM_SELF()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+
+#if defined(WITH_JIT)
+.LentryInstr:
+    /* Entry is always a possible trace start */
+    lw        a0, offThread_pJitProfTable(rSELF)
+    FETCH_INST()                           #  load rINST from rPC
+    sw        zero, offThread_inJitCodeCache(rSELF)
+#if !defined(WITH_SELF_VERIFICATION)
+    bnez      a0, common_updateProfile     # profiling is enabled
+#else
+    lw       a2, offThread_shadowSpace(rSELF) # to find out the jit exit state
+    beqz     a0, 1f                        # profiling is disabled
+    lw       a3, offShadowSpace_jitExitState(a2) # jit exit state
+    li	     t0, kSVSTraceSelect
+    bne      a3, t0, 2f
+    li       a2, kJitTSelectRequestHot     # ask for trace selection
+    b        common_selectTrace            # go build the trace
+2:
+    li       a4, kSVSNoProfile
+    beq      a3, a4, 1f                    # don't profile the next instruction?
+    b        common_updateProfile          # collect profiles
+#endif
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#else
+    /* start executing the instruction at rPC */
+    FETCH_INST()                           #  load rINST from rPC
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+.Lbad_arg:
+    la        a0, .LstrBadEntryPoint
+    #a1 holds value of entryPoint
+    JAL(printf)
+    JAL(dvmAbort)
+
+    .end dvmMterpStdRun
+
+    .global dvmMterpStdBail
+    .ent dvmMterpStdBail
+
+/* Restore the stack pointer and all the registers stored at sp from the save
+ * point established  on entry. Return to whoever called dvmMterpStdRun.
+ *
+ * On entry:
+ *   a0    Thread* self
+ */
+dvmMterpStdBail:
+    lw        sp, offThread_bailPtr(a0)      #  Restore sp
+    STACK_LOAD_FULL()
+    jr        ra
+
+    .end dvmMterpStdBail
+
+
+    .global dvmAsmInstructionStart
+    .type   dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+    .text
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NOP: /* 0x00 */
+/* File: mips/OP_NOP.S */
+    FETCH_ADVANCE_INST(1)                  #  advance to next instr, load rINST
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)                        #  execute it
+
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    .type dalvik_inst, @function
+dalvik_inst:
+    .ent dalvik_inst
+    .end dalvik_inst
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE: /* 0x01 */
+/* File: mips/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: mips/OP_MOVE_FROM16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_16: /* 0x03 */
+/* File: mips/OP_MOVE_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2 and jump
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: mips/OP_MOVE_WIDE.S */
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: mips/OP_MOVE_WIDE_FROM16.S */
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 1)                           #  a3 <- BBBB
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: mips/OP_MOVE_WIDE_16.S */
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+    FETCH(a3, 2)                           #  a3 <- BBBB
+    FETCH(a2, 1)                           #  a2 <- AAAA
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AAAA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AAAA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: mips/OP_MOVE_OBJECT.S */
+/* File: mips/OP_MOVE.S */
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: mips/OP_MOVE_OBJECT_FROM16.S */
+/* File: mips/OP_MOVE_FROM16.S */
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    GET_OPA(a0)                            #  a0 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: mips/OP_MOVE_OBJECT_16.S */
+/* File: mips/OP_MOVE_16.S */
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    FETCH(a1, 2)                           #  a1 <- BBBB
+    FETCH(a0, 1)                           #  a0 <- AAAA
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2 and jump
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: mips/OP_MOVE_RESULT.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_rSELF_retval(a0)                  #  a0 <- self->retval.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: mips/OP_MOVE_RESULT_WIDE.S */
+    /* move-result-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    addu      a3, rSELF, offThread_retval  #  a3 <- &self->retval
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a2)                    #  fp[AA] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: mips/OP_MOVE_RESULT_OBJECT.S */
+/* File: mips/OP_MOVE_RESULT.S */
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_rSELF_retval(a0)                  #  a0 <- self->retval.i
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: mips/OP_MOVE_EXCEPTION.S */
+    /* move-exception vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    LOAD_offThread_exception(a3, rSELF)    #  a3 <- dvmGetException bypass
+    li        a1, 0                        #  a1 <- 0
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    SET_VREG(a3, a2)                       #  fp[AA] <- exception obj
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE_offThread_exception(a1, rSELF)   #  dvmClearException bypass
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: mips/OP_RETURN_VOID.S */
+    b         common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RETURN: /* 0x0f */
+/* File: mips/OP_RETURN.S */
+    /*
+     * Return a 32-bit value.  Copies the return value into the "thread"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a0, a2)                       #  a0 <- vAA
+    sw        a0, offThread_retval(rSELF)  #  retval.i <- vAA
+    b         common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: mips/OP_RETURN_WIDE.S */
+    /*
+     * Return a 64-bit value.  Copies the return value into the "thread"
+     * structure, then jumps to the return handler.
+     */
+    /* return-wide vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
+    addu      a3, rSELF, offThread_retval  #  a3 <- &self->retval
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vAA/vAA+1
+    STORE64(a0, a1, a3)                    #  retval <- a0/a1
+    b         common_returnFromMethod
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: mips/OP_RETURN_OBJECT.S */
+/* File: mips/OP_RETURN.S */
+    /*
+     * Return a 32-bit value.  Copies the return value into the "thread"
+     * structure, then jumps to the return handler.
+     *
+     * for: return, return-object
+     */
+    /* op vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a0, a2)                       #  a0 <- vAA
+    sw        a0, offThread_retval(rSELF)  #  retval.i <- vAA
+    b         common_returnFromMethod
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_4: /* 0x12 */
+/* File: mips/OP_CONST_4.S */
+    # const/4 vA,                          /* +B */
+    sll       a1, rINST, 16                #  a1 <- Bxxx0000
+    GET_OPA(a0)                            #  a0 <- A+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
+    and       a0, a0, 15
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_16: /* 0x13 */
+/* File: mips/OP_CONST_16.S */
+    # const/16 vAA,                        /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST: /* 0x14 */
+/* File: mips/OP_CONST.S */
+    # const vAA,                           /* +BBBBbbbb */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a1, a1, 16
+    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: mips/OP_CONST_HIGH16.S */
+    # const/high16 vAA,                    /* +BBBB0000 */
+    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a0, a0, 16                   #  a0 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: mips/OP_CONST_WIDE_16.S */
+    # const-wide/16 vAA,                   /* +BBBB */
+    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: mips/OP_CONST_WIDE_32.S */
+    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    sll       a2, a2, 16
+    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    sra       a1, a0, 31                   #  a1 <- ssssssss
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: mips/OP_CONST_WIDE.S */
+    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
+    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
+    sll       a1, 16 #
+    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    FETCH(a3, 4)                           #  a3 <- HHHH (high)
+    GET_OPA(t1)                            #  t1 <- AA
+    sll       a3, 16
+    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, t1)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: mips/OP_CONST_WIDE_HIGH16.S */
+    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
+    GET_OPA(a3)                            #  a3 <- AA
+    li        a0, 0                        #  a0 <- 00000000
+    sll       a1, 16                       #  a1 <- BBBB0000
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: mips/OP_CONST_STRING.S */
+    # const/string vAA, String             /* BBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResStrings(a2, a2) #  a2 <- dvmDex->pResStrings
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResStrings[BBBB]
+    # not yet resolved?
+    bnez      v0, .LOP_CONST_STRING_resolve
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  a1:   BBBB (String ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveString)                  #  v0 <- String reference
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.LOP_CONST_STRING_resolve:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: mips/OP_CONST_STRING_JUMBO.S */
+    # const/string vAA, String             /* BBBBBBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (low)
+    FETCH(a1, 2)                           #  a1 <- BBBB (high)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResStrings(a2, a2) #  a2 <- dvmDex->pResStrings
+    sll       a1, a1, 16
+    or        a1, a1, a0                   #  a1 <- BBBBbbbb
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResStrings[BBBB]
+    bnez      v0, .LOP_CONST_STRING_JUMBO_resolve
+
+    /*
+     * Continuation if the String has not yet been resolved.
+     *  a1: BBBBBBBB (String ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveString)                  #  v0 <- String reference
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.LOP_CONST_STRING_JUMBO_resolve:
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t1)            #  vAA <- v0
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: mips/OP_CONST_CLASS.S */
+    # const/class vAA, Class               /* BBBB */
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- dvmDex->pResClasses
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResClasses[BBBB]
+
+    bnez      v0, .LOP_CONST_CLASS_resolve      #  v0!=0 => resolved-ok
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  a1: BBBB (Class ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- Class reference
+    # failed==0?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.LOP_CONST_CLASS_resolve:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: mips/OP_MONITOR_ENTER.S */
+    /*
+     * Synchronize on an object.
+     */
+    /* monitor-enter vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a1, a2)                       #  a1 <- vAA (object)
+    move      a0, rSELF                    #  a0 <- self
+    EXPORT_PC()                            #  export PC so we can grab stack trace
+    # null object?
+    beqz      a1, common_errNullObject     #  null object, throw an exception
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(dvmLockObject)                     #  call(self, obj)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: mips/OP_MONITOR_EXIT.S */
+    /*
+     * Unlock an object.
+     *
+     * Exceptions that occur when unlocking a monitor need to appear as
+     * if they happened at the following instruction.  See the Dalvik
+     * instruction spec.
+     */
+    /* monitor-exit vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    EXPORT_PC()                            #  before fetch: export the PC
+    GET_VREG(a1, a2)                       #  a1 <- vAA (object)
+    # null object?
+    beqz      a1, 1f
+    move      a0, rSELF                    #  a0 <- self
+    JAL(dvmUnlockObject)                   #  v0 <- success for unlock(self, obj)
+    # failed?
+    FETCH_ADVANCE_INST(1)                  #  before throw: advance rPC, load rINST
+    beqz      v0, common_exceptionThrown   #  yes, exception is pending
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+1:
+    FETCH_ADVANCE_INST(1)                  #  before throw: advance rPC, load rINST
+    b         common_errNullObject
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: mips/OP_CHECK_CAST.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    # check-cast vAA, class                /* BBBB */
+    GET_OPA(a3)                            #  a3 <- AA
+    FETCH(a2, 1)                           #  a2 <- BBBB
+    GET_VREG(rOBJ, a3)                     #  rOBJ <- object
+    LOAD_rSELF_methodClassDex(a0)          #  a0 <- pDvmDex
+    LOAD_base_offDvmDex_pResClasses(a0, a0) #  a0 <- pDvmDex->pResClasses
+    # is object null?
+    beqz      rOBJ, .LOP_CHECK_CAST_okay       #  null obj, cast always succeeds
+    LOAD_eas2(a1, a0, a2)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .LOP_CHECK_CAST_resolve      #  not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+    # same class (trivial success)?
+    bne       a0, a1, .LOP_CHECK_CAST_fullcheck #  no, do full check
+.LOP_CHECK_CAST_okay:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from BBBB
+     *  rOBJ holds object
+     */
+.LOP_CHECK_CAST_fullcheck:
+    move      rBIX,a1                      #  avoid ClassObject getting clobbered
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    # failed?
+    bnez      v0, .LOP_CHECK_CAST_okay         #  no, success
+    b         .LOP_CHECK_CAST_castfailure
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: mips/OP_INSTANCE_OF.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     */
+    # instance-of vA, vB, class            /* CCCC */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB (object)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- pDvmDex
+    # is object null?
+    beqz      a0, .LOP_INSTANCE_OF_store        #  null obj, not an instance, store a0
+    FETCH(a3, 1)                           #  a3 <- CCCC
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- pDvmDex->pResClasses
+    LOAD_eas2(a1, a2, a3)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .LOP_INSTANCE_OF_resolve      #  not resolved, do it now
+.LOP_INSTANCE_OF_resolved:                   #  a0=obj->clazz, a1=resolved class
+    # same class (trivial success)?
+    beq       a0, a1, .LOP_INSTANCE_OF_trivial  #  yes, trivial finish
+    b         .LOP_INSTANCE_OF_fullcheck        #  no, do full check
+
+    /*
+     * Trivial test succeeded, save and bail.
+     *  rOBJ holds A
+     */
+.LOP_INSTANCE_OF_trivial:
+    li        a0, 1                        #  indicate success
+    # fall thru
+    /*
+     * a0   holds boolean result
+     * rOBJ holds A
+     */
+.LOP_INSTANCE_OF_store:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG(a0, rOBJ)                     #  vA <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: mips/OP_ARRAY_LENGTH.S */
+    /*
+     * Return the length of an array.
+     */
+    GET_OPB(a1)                            #  a1 <- B
+    GET_OPA4(a2)                           #  a2 <- A+
+    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
+    # is object null?
+    beqz      a0, common_errNullObject     #  yup, fail
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- array length
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: mips/OP_NEW_INSTANCE.S */
+    /*
+     * Create a new instance of a class.
+     */
+    # new-instance vAA, class              /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX <- &resolved_class
+#endif
+    EXPORT_PC()                            #  req'd for init, resolve, alloc
+    # already resolved?
+    beqz      a0, .LOP_NEW_INSTANCE_resolve      #  no, resolve it now
+.LOP_NEW_INSTANCE_resolved:                      #  a0=class
+    lbu       a1, offClassObject_status(a0) #  a1 <- ClassStatus enum
+    # has class been initialized?
+    li        t0, CLASS_INITIALIZED
+    move      rOBJ, a0                     #  save a0
+    bne       a1, t0, .LOP_NEW_INSTANCE_needinit #  no, init class now
+
+.LOP_NEW_INSTANCE_initialized:                   #  a0=class
+    LOAD_base_offClassObject_accessFlags(a3, a0) #  a3 <- clazz->accessFlags
+    li        a1, ALLOC_DONT_TRACK         #  flags for alloc call
+    # a0=class
+    JAL(dvmAllocObject)                    #  v0 <- new object
+    GET_OPA(a3)                            #  a3 <- AA
+#if defined(WITH_JIT)
+    /*
+     * The JIT needs the class to be fully resolved before it can
+     * include this instruction in a trace.
+     */
+    lhu       a1, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    and       a1, kSubModeJitTraceBuild    #  under construction?
+    bnez      a1, .LOP_NEW_INSTANCE_jitCheck
+#else
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+#endif
+    b         .LOP_NEW_INSTANCE_continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: mips/OP_NEW_ARRAY.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array vA, vB, class@CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    FETCH(a2, 1)                           #  a2 <- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    GET_VREG(a1, a0)                       #  a1 <- vB (array length)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- resolved class
+    # check length
+    bltz      a1, common_errNegativeArraySize #  negative length, bail - len in a1
+    EXPORT_PC()                            #  req'd for resolve, alloc
+    # already resolved?
+    beqz      a0, .LOP_NEW_ARRAY_resolve
+
+    /*
+     * Finish allocation.
+     *
+     *  a0 holds class
+     *  a1 holds array length
+     */
+.LOP_NEW_ARRAY_finish:
+    li        a2, ALLOC_DONT_TRACK         #  don't track in local refs table
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(clazz, length, flags)
+    GET_OPA4(a2)                           #  a2 <- A+
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a2)                       #  vA <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: mips/OP_FILLED_NEW_ARRAY.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    EXPORT_PC()                            #  need for resolve and alloc
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+    GET_OPA(rOBJ)                          #  rOBJ <- AA or BA
+    # already resolved?
+    bnez      a0, .LOP_FILLED_NEW_ARRAY_continue     #  yes, continue on
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: mips/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: mips/OP_FILLED_NEW_ARRAY.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * for: filled-new-array, filled-new-array/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    EXPORT_PC()                            #  need for resolve and alloc
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+    GET_OPA(rOBJ)                          #  rOBJ <- AA or BA
+    # already resolved?
+    bnez      a0, .LOP_FILLED_NEW_ARRAY_RANGE_continue     #  yes, continue on
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: mips/OP_FILL_ARRAY_DATA.S */
+    /* fill-array-data vAA, +BBBBBBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
+    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
+    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
+    EXPORT_PC()
+    JAL(dvmInterpHandleFillArrayData)      #  fill the array with predefined data
+    # 0 means an exception is thrown
+    beqz      v0, common_exceptionThrown   #  has exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_THROW: /* 0x27 */
+/* File: mips/OP_THROW.S */
+    /*
+     * Throw an exception object in the current thread.
+     */
+    /* throw vAA */
+    GET_OPA(a2)                            #  a2 <- AA
+    GET_VREG(a1, a2)                       #  a1 <- vAA (exception object)
+    EXPORT_PC()                            #  exception handler can throw
+    # null object?
+    beqz      a1, common_errNullObject     #  yes, throw an NPE instead
+    # bypass dvmSetException, just store it
+    STORE_offThread_exception(a1, rSELF)   #  thread->exception <- obj
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_GOTO: /* 0x28 */
+/* File: mips/OP_GOTO.S */
+    /*
+     * Unconditional branch, 8-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto +AA */
+    sll       a0, rINST, 16                #  a0 <- AAxx0000
+    sra       a1, a0, 24                   #  a1 <- ssssssAA (sign-extended)
+    addu      a2, a1, a1                   #  a2 <- byte offset
+    /* If backwards branch refresh rBASE */
+    bgez      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bltz      a1, common_testUpdateProfile #  (a0) check for trace hotness
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_GOTO_16: /* 0x29 */
+/* File: mips/OP_GOTO_16.S */
+    /*
+     * Unconditional branch, 16-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     */
+    /* goto/16 +AAAA */
+    FETCH_S(a0, 1)                         #  a0 <- ssssAAAA (sign-extended)
+    addu      a1, a0, a0                   #  a1 <- byte offset, flags set
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgez      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bltz      a1, common_testUpdateProfile #  (a0) hot trace head?
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_GOTO_32: /* 0x2a */
+/* File: mips/OP_GOTO_32.S */
+    /*
+     * Unconditional branch, 32-bit offset.
+     *
+     * The branch distance is a signed code-unit offset, which we need to
+     * double to get a byte offset.
+     *
+     * Unlike most opcodes, this one is allowed to branch to itself, so
+     * our "backward branch" test must be "<=0" instead of "<0".
+     */
+    /* goto/32 +AAAAAAAA */
+    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
+    sll       a1, a1, 16
+    or        a0, a0, a1                   #  a0 <- AAAAaaaa
+    addu      a1, a0, a0                   #  a1 <- byte offset
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgtz      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    blez      a1, common_testUpdateProfile # (a0) hot trace head?
+#else
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+    bgtz      a0, 2f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+2:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: mips/OP_PACKED_SWITCH.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * When the JIT is present, all targets are considered treated as
+     * a potential trace heads regardless of branch direction.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(dvmInterpHandlePackedSwitch)                             #  a0 <- code-unit branch offset
+    addu      a1, v0, v0                   #  a1 <- byte offset
+    bgtz      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bnez      a0, common_updateProfile
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: mips/OP_SPARSE_SWITCH.S */
+/* File: mips/OP_PACKED_SWITCH.S */
+    /*
+     * Handle a packed-switch or sparse-switch instruction.  In both cases
+     * we decode it and hand it off to a helper function.
+     *
+     * We don't really expect backward branches in a switch statement, but
+     * they're perfectly legal, so we check for them here.
+     *
+     * When the JIT is present, all targets are considered treated as
+     * a potential trace heads regardless of branch direction.
+     *
+     * for: packed-switch, sparse-switch
+     */
+    /* op vAA, +BBBB */
+    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
+    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    GET_OPA(a3)                            #  a3 <- AA
+    sll       t0, a1, 16
+    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    GET_VREG(a1, a3)                       #  a1 <- vAA
+    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
+    JAL(dvmInterpHandleSparseSwitch)                             #  a0 <- code-unit branch offset
+    addu      a1, v0, v0                   #  a1 <- byte offset
+    bgtz      a1, 1f
+    lw        rIBASE, offThread_curHandlerTable(rSELF) #  refresh handler base
+1:
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bnez      a0, common_updateProfile
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: mips/OP_CMPL_FLOAT.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+#ifdef SOFT_FLOAT
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- vBB
+    GET_VREG(rBIX, a3)                     #  rBIX <- vCC
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__eqsf2)                           #  a0 <- (vBB == vCC)
+    li        rTEMP, 0                     # set rTEMP to 0
+    beqz      v0, OP_CMPL_FLOAT_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__ltsf2)                           #  a0 <- (vBB < vCC)
+    li        rTEMP, -1
+    bltz      v0, OP_CMPL_FLOAT_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    b         OP_CMPL_FLOAT_continue
+#else
+    GET_VREG_F(fs0, a2)
+    GET_VREG_F(fs1, a3)
+    c.olt.s   fcc0, fs0, fs1               # Is fs0 < fs1
+    li        rTEMP, -1
+    bc1t      fcc0, OP_CMPL_FLOAT_finish
+    c.olt.s   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, OP_CMPL_FLOAT_finish
+    c.eq.s    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, OP_CMPL_FLOAT_finish
+    b         OP_CMPL_FLOAT_nan
+
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: mips/OP_CMPG_FLOAT.S */
+/* File: mips/OP_CMPL_FLOAT.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * The operation we're implementing is:
+     *   if (x == y)
+     *     return 0;
+     *   else if (x < y)
+     *     return -1;
+     *   else if (x > y)
+     *     return 1;
+     *   else
+     *     return {-1,1};  // one or both operands was NaN
+     *
+     * for: cmpl-float, cmpg-float
+     */
+    /* op vAA, vBB, vCC */
+
+    /* "clasic" form */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8
+#ifdef SOFT_FLOAT
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- vBB
+    GET_VREG(rBIX, a3)                     #  rBIX <- vCC
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__eqsf2)                           #  a0 <- (vBB == vCC)
+    li        rTEMP, 0                     # set rTEMP to 0
+    beqz      v0, OP_CMPG_FLOAT_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    JAL(__ltsf2)                           #  a0 <- (vBB < vCC)
+    li        rTEMP, -1
+    bltz      v0, OP_CMPG_FLOAT_finish
+    move      a0, rOBJ                     #  a0 <- vBB
+    move      a1, rBIX                     #  a1 <- vCC
+    b         OP_CMPG_FLOAT_continue
+#else
+    GET_VREG_F(fs0, a2)
+    GET_VREG_F(fs1, a3)
+    c.olt.s   fcc0, fs0, fs1               # Is fs0 < fs1
+    li        rTEMP, -1
+    bc1t      fcc0, OP_CMPG_FLOAT_finish
+    c.olt.s   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, OP_CMPG_FLOAT_finish
+    c.eq.s    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, OP_CMPG_FLOAT_finish
+    b         OP_CMPG_FLOAT_nan
+
+#endif
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: mips/OP_CMPL_DOUBLE.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s0 <- BB
+    srl       rBIX, a0, 8                  #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s0 <- &fp[BB]
+    EAS2(rBIX, rFP, rBIX)                  #  t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__eqdf2)                           #  cmp <=: C clear if <, Z set if eq
+    li        rTEMP, 0
+    beqz      v0, OP_CMPL_DOUBLE_finish
+
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__ltdf2)
+    li        rTEMP, -1
+    bltz      v0, OP_CMPL_DOUBLE_finish
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    b         OP_CMPL_DOUBLE_continue
+#else
+    LOAD64_F(fs0, fs0f, rOBJ)
+    LOAD64_F(fs1, fs1f, rBIX)
+    c.olt.d   fcc0, fs0, fs1
+    li        rTEMP, -1
+    bc1t      fcc0, OP_CMPL_DOUBLE_finish
+    c.olt.d   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, OP_CMPL_DOUBLE_finish
+    c.eq.d    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, OP_CMPL_DOUBLE_finish
+    b         OP_CMPL_DOUBLE_nan
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: mips/OP_CMPG_DOUBLE.S */
+/* File: mips/OP_CMPL_DOUBLE.S */
+    /*
+     * Compare two floating-point values.  Puts 0, 1, or -1 into the
+     * destination register based on the results of the comparison.
+     *
+     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+     * on what value we'd like to return when one of the operands is NaN.
+     *
+     * See OP_CMPL_FLOAT for an explanation.
+     *
+     * For: cmpl-double, cmpg-double
+     */
+    /* op vAA, vBB, vCC */
+
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       rOBJ, a0, 255                #  s0 <- BB
+    srl       rBIX, a0, 8                  #  t0 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s0 <- &fp[BB]
+    EAS2(rBIX, rFP, rBIX)                  #  t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__eqdf2)                           #  cmp <=: C clear if <, Z set if eq
+    li        rTEMP, 0
+    beqz      v0, OP_CMPG_DOUBLE_finish
+
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__ltdf2)
+    li        rTEMP, -1
+    bltz      v0, OP_CMPG_DOUBLE_finish
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vBB/vBB+1
+    b         OP_CMPG_DOUBLE_continue
+#else
+    LOAD64_F(fs0, fs0f, rOBJ)
+    LOAD64_F(fs1, fs1f, rBIX)
+    c.olt.d   fcc0, fs0, fs1
+    li        rTEMP, -1
+    bc1t      fcc0, OP_CMPG_DOUBLE_finish
+    c.olt.d   fcc0, fs1, fs0
+    li        rTEMP, 1
+    bc1t      fcc0, OP_CMPG_DOUBLE_finish
+    c.eq.d    fcc0, fs0, fs1
+    li        rTEMP, 0
+    bc1t      fcc0, OP_CMPG_DOUBLE_finish
+    b         OP_CMPG_DOUBLE_nan
+#endif
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: mips/OP_CMP_LONG.S */
+    /*
+     * Compare two 64-bit values
+     *    x = y     return  0
+     *    x < y     return -1
+     *    x > y     return  1
+     *
+     * I think I can improve on the ARM code by the following observation
+     *    slt   t0,  x.hi, y.hi;	# (x.hi < y.hi) ? 1:0
+     *    sgt   t1,  x.hi, y.hi;	# (y.hi > x.hi) ? 1:0
+     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
+     */
+    /* cmp-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    slt       t0, a1, a3                   #  compare hi
+    sgt       t1, a1, a3
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
+    bnez      v0, .LOP_CMP_LONG_finish
+    # at this point x.hi==y.hi
+    sltu      t0, a0, a2                   #  compare lo
+    sgtu      t1, a0, a2
+    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
+
+.LOP_CMP_LONG_finish:
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_EQ: /* 0x32 */
+/* File: mips/OP_IF_EQ.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bne a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_NE: /* 0x33 */
+/* File: mips/OP_IF_NE.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    beq a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_LT: /* 0x34 */
+/* File: mips/OP_IF_LT.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bge a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_GE: /* 0x35 */
+/* File: mips/OP_IF_GE.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    blt a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_GT: /* 0x36 */
+/* File: mips/OP_IF_GT.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    ble a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_LE: /* 0x37 */
+/* File: mips/OP_IF_LE.S */
+/* File: mips/bincmp.S */
+    /*
+     * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+     */
+    /* if-cmp vA, vB, +CCCC */
+    GET_OPA4(a0)                           #  a0 <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a3, a1)                       #  a3 <- vB
+    GET_VREG(a2, a0)                       #  a2 <- vA
+    bgt a2, a3, 1f                  #  branch to 1 if comparison failed
+    FETCH_S(a1, 1)                         #  a1<- branch offset, in code units
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a2, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a2)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a2, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+3:
+    bnez      a0, common_updateProfile
+#else
+    bgez      a2, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rIBASE
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: mips/OP_IF_EQZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    bne a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: mips/OP_IF_NEZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    beq a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: mips/OP_IF_LTZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    bge a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: mips/OP_IF_GEZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    blt a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: mips/OP_IF_GTZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    ble a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: mips/OP_IF_LEZ.S */
+/* File: mips/zcmp.S */
+    /*
+     * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+     * fragment that specifies the *reverse* comparison to perform, e.g.
+     * for "if-le" you would use "gt".
+     *
+     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+     */
+    /* if-cmp vAA, +BBBB */
+    GET_OPA(a0)                            #  a0 <- AA
+    GET_VREG(a2, a0)                       #  a2 <- vAA
+    FETCH_S(a1, 1)                         #  a1 <- branch offset, in code units
+    bgt a2, zero, 1f                #  branch to 1 if comparison failed
+    b 2f
+1:
+    li        a1, 2                        #  a1- BYTE branch dist for not-taken
+2:
+    addu      a1, a1, a1                   #  convert to bytes
+    FETCH_ADVANCE_INST_RB(a1)              #  update rPC, load rINST
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    bgez      a1, 3f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh table base
+3:
+    bnez      a0, common_updateProfile     #  test for JIT off at target
+#else
+    bgez      a1, 4f
+    lw        rIBASE, offThread_curHandlerTable(rSELF)  # refresh rtable base
+4:
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: mips/OP_UNUSED_3E.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: mips/OP_UNUSED_3F.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: mips/OP_UNUSED_40.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: mips/OP_UNUSED_41.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: mips/OP_UNUSED_42.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: mips/OP_UNUSED_43.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET: /* 0x44 */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 2
+    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lw a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: mips/OP_AGET_WIDE.S */
+    /*
+     * Array get, 64 bits.  vAA <- vBB[vCC].
+     *
+     * Arrays of long/double are 64-bit aligned.
+     */
+    /* aget-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+.LOP_AGET_WIDE_finish:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64_off(a2, a3, a0, offArrayObject_contents)
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a2, a3, rOBJ)                  #  vAA/vAA+1 <- a2/a3
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: mips/OP_AGET_OBJECT.S */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 2
+    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lw a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: mips/OP_AGET_BOOLEAN.S */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lbu a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: mips/OP_AGET_BYTE.S */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lb a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: mips/OP_AGET_CHAR.S */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lhu a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: mips/OP_AGET_SHORT.S */
+/* File: mips/OP_AGET.S */
+    /*
+     * Array get, 32 bits or less.  vAA <- vBB[vCC].
+     *
+     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+     * instructions.  We use a pair of FETCH_Bs instead.
+     *
+     * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    # a1 >= a3; compare unsigned index
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    lh a2, offArrayObject_contents(a0)  #  a2 <- vBB[vCC]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT: /* 0x4b */
+/* File: mips/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 2
+    EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sw a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: mips/OP_APUT_WIDE.S */
+    /*
+     * Array put, 64 bits.  vBB[vCC] <- vAA.
+     *
+     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+     */
+    /* aput-wide vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t0 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64_off(a2, a3, a0, offArrayObject_contents) #  a2/a3 <- vBB[vCC]
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: mips/OP_APUT_OBJECT.S */
+    /*
+     * Store an object into an array.  vBB[vCC] <- vAA.
+     *
+     */
+    /* op vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t1)                            #  t1 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    GET_VREG(rINST, a2)                    #  rINST <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    GET_VREG(rBIX, t1)                     #  rBIX <- vAA
+    # null array object?
+    beqz      rINST, common_errNullObject  #  yes, bail
+
+    LOAD_base_offArrayObject_length(a3, rINST) #  a3 <- arrayObj->length
+    EAS2(rOBJ, rINST, a1)                  #  rOBJ <- arrayObj + index*width
+    # compare unsigned index, length
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    /*
+     * On entry:
+     *  rINST = vBB (arrayObj)
+     *  rBIX = vAA (obj)
+     *  rOBJ = offset into array (vBB + vCC * width)
+     */
+    bnez      rBIX, .LOP_APUT_OBJECT_checks     #  yes, skip type checks
+.LOP_APUT_OBJECT_finish:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sw        rBIX, offArrayObject_contents(rOBJ) #  vBB[vCC] <- vAA
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: mips/OP_APUT_BOOLEAN.S */
+/* File: mips/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sb a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: mips/OP_APUT_BYTE.S */
+/* File: mips/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 0
+    EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sb a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: mips/OP_APUT_CHAR.S */
+/* File: mips/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sh a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: mips/OP_APUT_SHORT.S */
+/* File: mips/OP_APUT.S */
+    /*
+     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
+     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+     */
+    /* op vAA, vBB, vCC */
+    FETCH_B(a2, 1)                         #  a2 <- BB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    FETCH_C(a3, 1)                         #  a3 <- CC
+    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
+    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
+    # null array object?
+    beqz      a0, common_errNullObject     #  yes, bail
+    LOAD_base_offArrayObject_length(a3, a0) #  a3 <- arrayObj->length
+    .if 1
+    EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
+    .else
+    addu      a0, a0, a1
+    .endif
+    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    sh a2, offArrayObject_contents(a0) #  vBB[vCC] <- a2
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET: /* 0x52 */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_finish
+    b         common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: mips/OP_IGET_WIDE.S */
+    /*
+     * Wide 32-bit instance field get.
+     */
+    # iget-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_WIDE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test return code
+    move      a0, v0
+    bnez      v0, .LOP_IGET_WIDE_finish
+    b         common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: mips/OP_IGET_OBJECT.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_OBJECT_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_OBJECT_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: mips/OP_IGET_BOOLEAN.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_BOOLEAN_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_BOOLEAN_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: mips/OP_IGET_BYTE.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_BYTE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_BYTE_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: mips/OP_IGET_CHAR.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_CHAR_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_CHAR_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: mips/OP_IGET_SHORT.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_SHORT_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_SHORT_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT: /* 0x59 */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: mips/OP_IPUT_WIDE.S */
+    # iput-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_WIDE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_WIDE_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: mips/OP_IPUT_OBJECT.S */
+    /*
+     * 32-bit instance field put.
+     *
+     * for: iput-object, iput-object-volatile
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_OBJECT_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_OBJECT_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: mips/OP_IPUT_BOOLEAN.S */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_BOOLEAN_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_BOOLEAN_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: mips/OP_IPUT_BYTE.S */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_BYTE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_BYTE_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: mips/OP_IPUT_CHAR.S */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_CHAR_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_CHAR_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: mips/OP_IPUT_SHORT.S */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_SHORT_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_SHORT_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET: /* 0x60 */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: mips/OP_SGET_WIDE.S */
+    /*
+     * 64-bit SGET handler.
+     */
+    # sget-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_SGET_WIDE_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+
+    b        .LOP_SGET_WIDE_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: mips/OP_SGET_OBJECT.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_OBJECT_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_OBJECT_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: mips/OP_SGET_BOOLEAN.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_BOOLEAN_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_BOOLEAN_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: mips/OP_SGET_BYTE.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_BYTE_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_BYTE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: mips/OP_SGET_CHAR.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_CHAR_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_CHAR_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: mips/OP_SGET_SHORT.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_SHORT_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_SHORT_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT: /* 0x67 */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: mips/OP_SPUT_WIDE.S */
+    /*
+     * 64-bit SPUT handler.
+     */
+    # sput-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    GET_OPA(t0)                            #  t0 <- AA
+    LOAD_eas2(a2, rBIX, a1)                #  a2 <- resolved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[AA]
+    # is resolved entry null?
+    beqz      a2, .LOP_SPUT_WIDE_resolve      #  yes, do resolve
+.LOP_SPUT_WIDE_finish:                        #  field ptr in a2, AA in rOBJ
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vAA/vAA+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if 0
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vAA/vAA+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: mips/OP_SPUT_OBJECT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput-object, sput-object-volatile
+     */
+    /* op vAA, field@BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_OBJECT_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b       .LOP_SPUT_OBJECT_finish             # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: mips/OP_SPUT_BOOLEAN.S */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_BOOLEAN_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_BOOLEAN_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: mips/OP_SPUT_BYTE.S */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_BYTE_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_BYTE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: mips/OP_SPUT_CHAR.S */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_CHAR_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_CHAR_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: mips/OP_SPUT_SHORT.S */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_SHORT_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_SHORT_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: mips/OP_INVOKE_VIRTUAL.S */
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    .if (!0)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, .LOP_INVOKE_VIRTUAL_continue     #  yes, continue on
+
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    bnez      v0, .LOP_INVOKE_VIRTUAL_continue     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: mips/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    .if (!0)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this" ptr
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    # null "this"?
+    LOAD_rSELF_method(t1)                  #  t1 <- current method
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    # cmp a0, 0; already resolved?
+    LOAD_base_offMethod_clazz(rBIX, t1)    #  rBIX <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    bnez      a0, .LOP_INVOKE_SUPER_continue     #  resolved, continue on
+
+    move      a0, rBIX                     #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_INVOKE_SUPER_continue
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: mips/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+    .if (!0)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    # already resolved?
+    bnez      a0, 1f                       #  resolved, call the function
+
+    lw        a3, offThread_method(rSELF)  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_DIRECT            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+
+1:
+    bnez      rOBJ, common_invokeMethodNoRange #  a0=method, rOBJ="this"
+    b         common_errNullObject         #  yes, throw exception
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: mips/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    li      rOBJ, 0                        #  null "this" in delay slot
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX<- &resolved_metherToCall
+#endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, common_invokeMethodNoRange #  yes, continue on
+    b         .LOP_INVOKE_STATIC_resolve
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: mips/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(a2, 2)                           #  a2 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!0)
+    and       a2, 15                       #  a2 <- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- first arg ("this")
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- methodClassDex
+    LOAD_rSELF_method(a2)                  #  a2 <- method
+    # null obj?
+    beqz      rOBJ, common_errNullObject   #  yes, fail
+    LOAD_base_offObject_clazz(a0, rOBJ)      #  a0 <- thisPtr->clazz
+    JAL(dvmFindInterfaceMethodInCache)     #  v0 <- call(class, ref, method, dex)
+    move      a0, v0
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         common_invokeMethodNoRange #  (a0=method, rOBJ="this")
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: mips/OP_UNUSED_73.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: mips/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: mips/OP_INVOKE_VIRTUAL.S */
+    /*
+     * Handle a virtual method call.
+     *
+     * for: invoke-virtual, invoke-virtual/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    .if (!1)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, .LOP_INVOKE_VIRTUAL_RANGE_continue     #  yes, continue on
+
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    bnez      v0, .LOP_INVOKE_VIRTUAL_RANGE_continue     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: mips/OP_INVOKE_SUPER_RANGE.S */
+/* File: mips/OP_INVOKE_SUPER.S */
+    /*
+     * Handle a "super" method call.
+     *
+     * for: invoke-super, invoke-super/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    .if (!1)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this" ptr
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    # null "this"?
+    LOAD_rSELF_method(t1)                  #  t1 <- current method
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    # cmp a0, 0; already resolved?
+    LOAD_base_offMethod_clazz(rBIX, t1)    #  rBIX <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    bnez      a0, .LOP_INVOKE_SUPER_RANGE_continue     #  resolved, continue on
+
+    move      a0, rBIX                     #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_INVOKE_SUPER_RANGE_continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: mips/OP_INVOKE_DIRECT_RANGE.S */
+/* File: mips/OP_INVOKE_DIRECT.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     * for: invoke-direct, invoke-direct/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    FETCH(rBIX, 2)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+    .if (!1)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    # already resolved?
+    bnez      a0, 1f                       #  resolved, call the function
+
+    lw        a3, offThread_method(rSELF)  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_DIRECT            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+
+1:
+    bnez      rOBJ, common_invokeMethodRange #  a0=method, rOBJ="this"
+    b         common_errNullObject         #  yes, throw exception
+
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: mips/OP_INVOKE_STATIC_RANGE.S */
+/* File: mips/OP_INVOKE_STATIC.S */
+    /*
+     * Handle a static method call.
+     *
+     * for: invoke-static, invoke-static/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    li      rOBJ, 0                        #  null "this" in delay slot
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX<- &resolved_metherToCall
+#endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, common_invokeMethodRange #  yes, continue on
+    b         .LOP_INVOKE_STATIC_RANGE_resolve
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: mips/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: mips/OP_INVOKE_INTERFACE.S */
+    /*
+     * Handle an interface method call.
+     *
+     * for: invoke-interface, invoke-interface/range
+     */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+    FETCH(a2, 2)                           #  a2 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!1)
+    and       a2, 15                       #  a2 <- C (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- first arg ("this")
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- methodClassDex
+    LOAD_rSELF_method(a2)                  #  a2 <- method
+    # null obj?
+    beqz      rOBJ, common_errNullObject   #  yes, fail
+    LOAD_base_offObject_clazz(a0, rOBJ)      #  a0 <- thisPtr->clazz
+    JAL(dvmFindInterfaceMethodInCache)     #  v0 <- call(class, ref, method, dex)
+    move      a0, v0
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         common_invokeMethodRange #  (a0=method, rOBJ="this")
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: mips/OP_UNUSED_79.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: mips/OP_UNUSED_7A.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEG_INT: /* 0x7b */
+/* File: mips/OP_NEG_INT.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    negu a0, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NOT_INT: /* 0x7c */
+/* File: mips/OP_NOT_INT.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    not a0, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: mips/OP_NEG_LONG.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    negu v0, a0                              #  optional op
+    negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: mips/OP_NOT_LONG.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    not a0, a0                              #  optional op
+    not a1, a1                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: mips/OP_NEG_FLOAT.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a0, a0, 0x80000000                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: mips/OP_NEG_DOUBLE.S */
+/* File: mips/unopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be MIPS instruction or a function call.
+     *
+     * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a1, a1, 0x80000000                                 #  a0/a1 <- op, a2-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: mips/OP_INT_TO_LONG.S */
+/* File: mips/unopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-long, int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(t1)                           #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    EAS2(rOBJ, rFP, t1)                    #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    sra a1, a0, 31                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vA/vA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: mips/OP_INT_TO_FLOAT.S */
+/* File: mips/unflop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+                                  #  optional op
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifdef SOFT_FLOAT
+    JAL(__floatsisf)                                 #  a0 <- op, a0-a3 changed
+
+.LOP_INT_TO_FLOAT_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vAA <- result0
+#else
+    cvt.s.w fv0, fa0
+
+.LOP_INT_TO_FLOAT_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+#endif
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: mips/OP_INT_TO_DOUBLE.S */
+/* File: mips/unflopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__floatsidf)                                 #  result <- op, a0-a3 changed
+
+.LOP_INT_TO_DOUBLE_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vA/vA+1 <- a0/a1
+#else
+    cvt.d.w fv0, fa0
+
+.LOP_INT_TO_DOUBLE_set_vreg:
+    STORE64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: mips/OP_LONG_TO_INT.S */
+    GET_OPB(a1)                            #  a1 <- B from 15:12
+    GET_OPA4(a0)                           #  a0 <- A from 11:8
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifdef HAVE_BIG_ENDIAN
+    addu      a1, a1, 1
+#endif
+    GET_VREG(a2, a1)                       #  a2 <- fp[B]
+    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: mips/OP_LONG_TO_FLOAT.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vB/vB+1
+#else
+    LOAD64(rARG0, rARG1, a3)
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__floatdisf)                                 #  a0 <- op, a0-a3 changed
+
+.LOP_LONG_TO_FLOAT_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vA <- result0
+#else
+    JAL(__floatdisf)
+
+.LOP_LONG_TO_FLOAT_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: mips/OP_LONG_TO_DOUBLE.S */
+/* File: mips/unflopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vAA
+#else
+    LOAD64(rARG0, rARG1, a3)
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    JAL(__floatdidf)                                 #  a0/a1 <- op, a2-a3 changed
+
+.LOP_LONG_TO_DOUBLE_set_vreg:
+#ifdef SOFT_FLOAT
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vAA <- a0/a1
+#else
+    STORE64_F(fv0, fv0f, rOBJ)                             #  vAA <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: mips/OP_FLOAT_TO_INT.S */
+/* File: mips/unflop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: int-to-float, float-to-int
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t0 <- A+
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+                                  #  optional op
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+#ifdef SOFT_FLOAT
+    b f2i_doconv                                 #  a0 <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_INT_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vAA <- result0
+#else
+    b f2i_doconv
+
+.LOP_FLOAT_TO_INT_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)
+#endif
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: mips/OP_FLOAT_TO_LONG.S */
+/* File: mips/unflopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    b f2l_doconv                                 #  result <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_LONG_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vA/vA+1 <- a0/a1
+#else
+    b f2l_doconv
+
+.LOP_FLOAT_TO_LONG_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)                             #  vA/vA+1 <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: mips/OP_FLOAT_TO_DOUBLE.S */
+/* File: mips/unflopWider.S */
+    /*
+     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0", where
+     * "result" is a 64-bit quantity in a0/a1.
+     *
+     * For: int-to-double, float-to-long, float-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, a3)                       #  a0 <- vB
+#else
+    GET_VREG_F(fa0, a3)
+#endif
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__extendsfdf2)                                 #  result <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_DOUBLE_set_vreg:
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vA/vA+1 <- a0/a1
+#else
+    cvt.d.s fv0, fa0
+
+.LOP_FLOAT_TO_DOUBLE_set_vreg:
+    STORE64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: mips/OP_DOUBLE_TO_INT.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vB/vB+1
+#else
+    LOAD64_F(fa0, fa0f, a3)
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    b d2i_doconv                                 #  a0 <- op, a0-a3 changed
+
+.LOP_DOUBLE_TO_INT_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vA <- result0
+#else
+    b d2i_doconv
+
+.LOP_DOUBLE_TO_INT_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification.  The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer.  The EABI convert function isn't doing this for us.
+ * Use rBIX / rTEMP as global to hold arguments (they are not bound to a global var)
+ */
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: mips/OP_DOUBLE_TO_LONG.S */
+/* File: mips/unflopWide.S */
+    /*
+     * Generic 64-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0/a1".
+     * This could be a MIPS instruction or a function call.
+     *
+     * long-to-double, double-to-long
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vAA
+#else
+    LOAD64_F(fa0, fa0f, a3)
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    b d2l_doconv                                 #  a0/a1 <- op, a2-a3 changed
+
+.LOP_DOUBLE_TO_LONG_set_vreg:
+#ifdef SOFT_FLOAT
+    STORE64(rRESULT0, rRESULT1, rOBJ)      #  vAA <- a0/a1
+#else
+    STORE64(rRESULT0, rRESULT1, rOBJ)                             #  vAA <- a0/a1
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: mips/OP_DOUBLE_TO_FLOAT.S */
+/* File: mips/unopNarrower.S */
+    /*
+     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = op a0/a1", where
+     * "result" is a 32-bit quantity in a0.
+     *
+     * For: long-to-float, double-to-int, double-to-float
+     * If hard floating point support is available, use fa0 as the parameter, except for
+     * long-to-float opcode.
+     * (This would work for long-to-int, but that instruction is actually
+     * an exact match for OP_MOVE.)
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a3)               #  a0/a1 <- vB/vB+1
+#else
+    LOAD64_F(fa0, fa0f, a3)
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__truncdfsf2)                                 #  a0 <- op, a0-a3 changed
+
+.LOP_DOUBLE_TO_FLOAT_set_vreg:
+    SET_VREG(v0, rOBJ)                     #  vA <- result0
+#else
+    cvt.s.d fv0, fa0
+
+.LOP_DOUBLE_TO_FLOAT_set_vreg_f:
+    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-11 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: mips/OP_INT_TO_BYTE.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sll a0, a0, 24                              #  optional op
+    sra a0, a0, 24                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: mips/OP_INT_TO_CHAR.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+    and a0, 0xffff                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: mips/OP_INT_TO_SHORT.S */
+/* File: mips/unop.S */
+    /*
+     * Generic 32-bit unary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = op a0".
+     * This could be a MIPS instruction or a function call.
+     *
+     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+     *      int-to-byte, int-to-char, int-to-short
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(t0)                           #  t0 <- A+
+    GET_VREG(a0, a3)                       #  a0 <- vB
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    sll a0, 16                              #  optional op
+    sra a0, 16                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
+    /* 9-10 instructions */
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_INT: /* 0x90 */
+/* File: mips/OP_ADD_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_INT: /* 0x91 */
+/* File: mips/OP_SUB_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_INT: /* 0x92 */
+/* File: mips/OP_MUL_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_INT: /* 0x93 */
+/* File: mips/OP_DIV_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    div zero, a0, a1; mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_INT: /* 0x94 */
+/* File: mips/OP_REM_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    div zero, a0, a1; mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_INT: /* 0x95 */
+/* File: mips/OP_AND_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_INT: /* 0x96 */
+/* File: mips/OP_OR_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_INT: /* 0x97 */
+/* File: mips/OP_XOR_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHL_INT: /* 0x98 */
+/* File: mips/OP_SHL_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    and a1, a1, 31                              #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHR_INT: /* 0x99 */
+/* File: mips/OP_SHR_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    and a1, a1, 31                              #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_USHR_INT: /* 0x9a */
+/* File: mips/OP_USHR_INT.S */
+/* File: mips/binop.S */
+    /*
+     * Generic 32-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0 op a1".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.  Note that we
+     * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+     * handles it correctly.
+     *
+     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+     *      xor-int, shl-int, shr-int, ushr-int
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    and a1, a1, 31                              #  optional op
+    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: mips/OP_ADD_LONG.S */
+/*
+ *  The compiler generates the following sequence for
+ *  [v1 v0] =  [a1 a0] + [a3 a2];
+ *    addu v0,a2,a0
+ *    addu a1,a3,a1
+ *    sltu v1,v0,a2
+ *    addu v1,v1,a1
+ */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    addu v0, a2, a0                              #  optional op
+    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: mips/OP_SUB_LONG.S */
+/*
+ * For little endian the code sequence looks as follows:
+ *    subu    v0,a0,a2
+ *    subu    v1,a1,a3
+ *    sltu    a0,a0,v0
+ *    subu    v1,v1,a0
+ */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    subu v0, a0, a2                              #  optional op
+    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: mips/OP_MUL_LONG.S */
+    /*
+     * Signed 64-bit integer multiply.
+     *         a1   a0
+     *   x     a3   a2
+     *   -------------
+     *       a2a1 a2a0
+     *       a3a0
+     *  a3a1 (<= unused)
+     *  ---------------
+     *         v1   v0
+     */
+    /* mul-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    and       t0, a0, 255                  #  a2 <- BB
+    srl       t1, a0, 8                    #  a3 <- CC
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
+    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
+
+    mul       v1, a3, a0                   #  v1= a3a0
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+    mul       t0, a2, a1                   #  t0= a2a1
+    addu      v1, v1, t1                   #  v1+= hi(a2a0)
+    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
+
+    GET_OPA(a0)                            #  a0 <- AA
+    EAS2(a0, rFP, a0)                      #  a0 <- &fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    b         .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: mips/OP_DIV_LONG.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+#else
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a1, a0, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a3, a2, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a3, a2             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v1, v0, rOBJ)      #  vAA/vAA+1 <- v1/v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_LONG: /* 0x9f */
+/* File: mips/OP_REM_LONG.S */
+/* ldivmod returns quotient in a0/a1 and remainder in a2/a3 */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+#else
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a1, a0, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a3, a2, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 1
+    or        t0, a3, a2             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v1, v0, rOBJ)      #  vAA/vAA+1 <- v1/v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: mips/OP_AND_LONG.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a0, a0, a2                              #  optional op
+    and a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: mips/OP_OR_LONG.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    or a0, a0, a2                              #  optional op
+    or a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: mips/OP_XOR_LONG.S */
+/* File: mips/binopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+    LOAD64(a0, a1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(a2, a3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    xor a0, a0, a2                              #  optional op
+    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: mips/OP_SHL_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shl-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t2)                            #  t2 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+
+    EAS2(t2, rFP, t2)                      #  t2 <- &fp[AA]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    andi    a2, 0x20                       #  shift< shift & 0x20
+    movn    v1, v0, a2                     #  rhi<- rlo (if shift&0x20)
+    movn    v0, zero, a2                   #  rlo<- 0  (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t2)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: mips/OP_SHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* shr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t3)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    EAS2(t3, rFP, t3)                      #  t3 <- &fp[AA]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    andi    a2, 0x20                       #  shift & 0x20
+    movn    v0, v1, a2                     #  rlo<- rhi (if shift&0x20)
+    movn    v1, a3, a2                     #  rhi<- sign(ahi) (if shift&0x20)
+
+    STORE64(v0, v1, t3)                    #  vAA/VAA+1 <- v0/v0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: mips/OP_USHR_LONG.S */
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
+     * 6 bits of the shift distance.
+     */
+    /* ushr-long vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(t0)                            #  t3 <- AA
+    and       a3, a0, 255                  #  a3 <- BB
+    srl       a0, a0, 8                    #  a0 <- CC
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
+    GET_VREG(a2, a0)                       #  a2 <- vCC
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    andi      a2, 0x20                     #  shift & 0x20
+    movn      v0, v1, a2                   #  rlo<- rhi (if shift&0x20)
+    movn      v1, zero, a2                 #  rhi<- 0 (if shift&0x20)
+
+    STORE64(v0, v1, rOBJ)                  #  vAA/vAA+1 <- v0/v1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: mips/OP_ADD_FLOAT.S */
+/* File: mips/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__addsf3)                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    add.s fv0, fa0, fa1                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: mips/OP_SUB_FLOAT.S */
+/* File: mips/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__subsf3)                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    sub.s fv0, fa0, fa1                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: mips/OP_MUL_FLOAT.S */
+/* File: mips/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__mulsf3)                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    mul.s fv0, fa0, fa1                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: mips/OP_DIV_FLOAT.S */
+/* File: mips/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__divsf3)                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    div.s fv0, fa0, fa1                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: mips/OP_REM_FLOAT.S */
+/* File: mips/binflop.S */
+    /*
+     * Generic 32-bit binary float operation.
+     *
+     * For: add-fp, sub-fp, mul-fp, div-fp
+     */
+
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    srl       a3, a0, 8                    #  a3 <- CC
+    and       a2, a0, 255                  #  a2 <- BB
+#ifdef SOFT_FLOAT
+    GET_VREG(a1, a3)                       #  a1 <- vCC
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
+    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
+
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1               #  condition bit and comparision with 0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(fmodf)                                 #  v0 = result
+    SET_VREG(v0, rOBJ)                     #  vAA <- v0
+#else
+    JAL(fmodf)                               #  f0 = result
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: mips/OP_ADD_DOUBLE.S */
+/* File: mips/binflopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__adddf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    add.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: mips/OP_SUB_DOUBLE.S */
+/* File: mips/binflopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__subdf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    sub.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: mips/OP_MUL_DOUBLE.S */
+/* File: mips/binflopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__muldf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    mul.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: mips/OP_DIV_DOUBLE.S */
+/* File: mips/binflopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__divdf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    div.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: mips/OP_REM_DOUBLE.S */
+/* File: mips/binflopWide.S */
+    /*
+     * Generic 64-bit binary operation.  Provide an "instr" line that
+     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+     *      xor-long, add-double, sub-double, mul-double, div-double,
+     *      rem-double
+     *
+     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+     */
+    /* binop vAA, vBB, vCC */
+    FETCH(a0, 1)                           #  a0 <- CCBB
+    GET_OPA(rOBJ)                          #  s5 <- AA
+    and       a2, a0, 255                  #  a2 <- BB
+    srl       a3, a0, 8                    #  a3 <- CC
+    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[AA]
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
+    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG0, rARG1, a2)               #  a0/a1 <- vBB/vBB+1
+    LOAD64(rARG2, rARG3, t1)               #  a2/a3 <- vCC/vCC+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, a2)
+    LOAD64_F(fa1, fa1f, t1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(fmod)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    JAL(fmod)
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: mips/OP_ADD_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: mips/OP_SUB_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: mips/OP_MUL_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: mips/OP_DIV_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div zero, a0, a1; mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: mips/OP_REM_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div zero, a0, a1; mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: mips/OP_AND_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: mips/OP_OR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: mips/OP_XOR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: mips/OP_SHL_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: mips/OP_SHR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: mips/OP_USHR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    srl a0, a0, a1                                  #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: mips/OP_ADD_LONG_2ADDR.S */
+/*
+ *See OP_ADD_LONG.S for details
+ */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    addu v0, a2, a0                              #  optional op
+    addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: mips/OP_SUB_LONG_2ADDR.S */
+/*
+ * See comments in OP_SUB_LONG.S
+ */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    subu v0, a0, a2                              #  optional op
+    subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: mips/OP_MUL_LONG_2ADDR.S */
+    /*
+     * See comments in OP_MUL_LONG.S
+     */
+    /* mul-long/2addr vA, vB */
+    GET_OPA4(t0)                           #  t0 <- A+
+
+    EAS2(t0, rFP, t0)                      #  t0 <- &fp[A]
+    LOAD64(a0, a1, t0)                     #  vAA.low / high
+
+    GET_OPB(t1)                            #  t1 <- B
+    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
+    LOAD64(a2, a3, t1)                     #  vBB.low / high
+
+    mul       v1, a3, a0                   #  v1= a3a0
+    multu     a2, a0
+    mfhi      t1
+    mflo      v0                           #  v0= a2a0
+    mul       t2, a2, a1                   #  t2= a2a1
+    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
+    addu      v1, v1, t2                   #  v1= v1 + a2a1;
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    # vAA <- v0 (low)
+    STORE64(v0, v1, t0)                    #  vAA+1 <- v1 (high)
+    GOTO_OPCODE(t1)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: mips/OP_DIV_LONG_2ADDR.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+#else
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a3, a2, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a1, a0, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a3, a2             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__divdi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v1, v0, rOBJ)      #  vAA/vAA+1 <- v1/v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: mips/OP_REM_LONG_2ADDR.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)      #  vAA/vAA+1 <- v0/v1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+#else
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a3, a2, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a1, a0, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 1
+    or        t0, a3, a2             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    JAL(__moddi3)                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v1, v0, rOBJ)      #  vAA/vAA+1 <- v1/v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: mips/OP_AND_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    and a0, a0, a2                              #  optional op
+    and a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: mips/OP_OR_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    or a0, a0, a2                              #  optional op
+    or a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: mips/OP_XOR_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be a MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+     *      and-long/2addr, or-long/2addr, xor-long/2addr
+     *      rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(a0, a1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, a2, a3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    xor a0, a0, a2                              #  optional op
+    xor a1, a1, a3                                 #  result <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, rOBJ)      #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: mips/OP_SHL_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shl-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(rOBJ, rFP, t2)                    #  rOBJ <- &fp[A]
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
+    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
+    or      v1, a0                         #  rhi<- rhi | alo
+    andi    a2, 0x20                       #  shift< shift & 0x20
+    movn    v1, v0, a2                     #  rhi<- rlo (if shift&0x20)
+    movn    v0, zero, a2                   #  rlo<- 0  (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, rOBJ)                  #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: mips/OP_SHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* shr-long/2addr vA, vB */
+    GET_OPA4(t2)                           #  t2 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t2, rFP, t2)                      #  t2 <- &fp[A]
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     #  a3<- sign(ah)
+    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         #  rlo<- rlo | ahi
+    andi    a2, 0x20                       #  shift & 0x20
+    movn    v0, v1, a2                     #  rlo<- rhi (if shift&0x20)
+    movn    v1, a3, a2                     #  rhi<- sign(ahi) (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t2)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: mips/OP_USHR_LONG_2ADDR.S */
+    /*
+     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
+     * 32-bit shift distance.
+     */
+    /* ushr-long/2addr vA, vB */
+    GET_OPA4(t3)                           #  t3 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a2, a3)                       #  a2 <- vB
+    EAS2(t3, rFP, t3)                      #  t3 <- &fp[A]
+    LOAD64(a0, a1, t3)                     #  a0/a1 <- vAA/vAA+1
+
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
+    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
+    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
+    sll       a1, 1
+    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
+    or        v0, a1                       #  rlo<- rlo | ahi
+    andi      a2, 0x20                     #  shift & 0x20
+    movn      v0, v1, a2                   #  rlo<- rhi (if shift&0x20)
+    movn      v1, zero, a2                 #  rhi<- 0 (if shift&0x20)
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, t3)                    #  vAA/vAA+1 <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: mips/OP_ADD_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__addsf3)                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    add.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: mips/OP_SUB_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__subsf3)                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    sub.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: mips/OP_MUL_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__mulsf3)                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    mul.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: mips/OP_DIV_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__divsf3)                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    div.s fv0, fa0, fa1
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: mips/OP_REM_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+    /*
+     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" and
+     * "instr_f" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+     * div-float/2addr, rem-float/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPB(a3)                            #  a3 <- B
+#ifdef SOFT_FLOAT
+    GET_VREG(a0, rOBJ)                     #  a0 <- vA
+    GET_VREG(a1, a3)                       #  a1 <- vB
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+#else
+    GET_VREG_F(fa0, rOBJ)
+    GET_VREG_F(fa1, a3)
+    .if 0
+    # is second operand zero?
+    li.s      ft0, 0
+    c.eq.s    fcc0, ft0, fa1
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(fmodf)                                 #  result <- op, a0-a3 changed
+    SET_VREG(v0, rOBJ)                     #  vAA <- result
+#else
+    JAL(fmodf)
+    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: mips/OP_ADD_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__adddf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    add.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: mips/OP_SUB_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__subdf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    sub.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: mips/OP_MUL_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__muldf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    mul.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: mips/OP_DIV_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(__divdf3)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    div.d fv0, fa0, fa1
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: mips/OP_REM_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+    /*
+     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * This could be an MIPS instruction or a function call.
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+     *  div-double/2addr, rem-double/2addr
+     */
+    /* binop/2addr vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a1)                            #  a1 <- B
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+    LOAD64(rARG2, rARG3, a1)               #  a2/a3 <- vBB/vBB+1
+    LOAD64(rARG0, rARG1, rOBJ)             #  a0/a1 <- vAA/vAA+1
+    .if 0
+    or        t0, rARG2, rARG3             #  second arg (a2-a3) is zero?
+    beqz      t0, common_errDivideByZero
+    .endif
+#else
+    LOAD64_F(fa0, fa0f, rOBJ)
+    LOAD64_F(fa1, fa1f, a1)
+    .if 0
+    li.d      ft0, 0
+    c.eq.d    fcc0, fa1, ft0
+    bc1t      fcc0, common_errDivideByZero
+    .endif
+#endif
+1:
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+                                  #  optional op
+#ifdef SOFT_FLOAT
+    JAL(fmod)                                 #  result <- op, a0-a3 changed
+    STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+    JAL(fmod)
+    STORE64_F(fv0, fv0f, rOBJ)
+#endif
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+    /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: mips/OP_ADD_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: mips/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: mips/OP_MUL_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: mips/OP_DIV_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1; mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: mips/OP_REM_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 1
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    div zero, a0, a1; mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: mips/OP_AND_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: mips/OP_OR_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: mips/OP_XOR_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+    /*
+     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+     */
+    # binop/lit16 vA, vB,                  /* +CCCC */
+    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
+    GET_OPB(a2)                            #  a2 <- B
+    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_VREG(a0, a2)                       #  a0 <- vB
+    and       rOBJ, rOBJ, 15
+    .if 0
+    # cmp a1, 0; is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: mips/OP_ADD_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: mips/OP_RSUB_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: mips/OP_MUL_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: mips/OP_DIV_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div zero, a0, a1; mflo a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: mips/OP_REM_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 1
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    div zero, a0, a1; mfhi a0                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: mips/OP_AND_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: mips/OP_OR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: mips/OP_XOR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+                                  #  optional op
+    xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: mips/OP_SHL_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: mips/OP_SHR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: mips/OP_USHR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+    /*
+     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
+     * that specifies an instruction that performs "result = a0 op a1".
+     * This could be an MIPS instruction or a function call.  (If the result
+     * comes back in a register other than a0, you can override "result".)
+     *
+     * If "chkzero" is set to 1, we perform a divide-by-zero check on
+     * vCC (a1).  Useful for integer division and modulus.
+     *
+     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
+     */
+    # binop/lit8 vAA, vBB,                 /* +CC */
+    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
+    and       a2, a3, 255                  #  a2 <- BB
+    GET_VREG(a0, a2)                       #  a0 <- vBB
+    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
+    .if 0
+    # is second operand zero?
+    beqz      a1, common_errDivideByZero
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+
+    and a1, a1, 31                              #  optional op
+    srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
+    /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: mips/OP_IGET_VOLATILE.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_VOLATILE_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: mips/OP_IPUT_VOLATILE.S */
+/* File: mips/OP_IPUT.S */
+    /*
+     * General 32-bit instance field put.
+     *
+     * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_VOLATILE_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: mips/OP_SGET_VOLATILE.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_VOLATILE_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_VOLATILE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: mips/OP_SPUT_VOLATILE.S */
+/* File: mips/OP_SPUT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_VOLATILE_finish       #  is resolved entry null?
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_VOLATILE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: mips/OP_IGET_OBJECT_VOLATILE.S */
+/* File: mips/OP_IGET.S */
+    /*
+     * General 32-bit instance field get.
+     *
+     * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_OBJECT_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test results
+    move      a0, v0
+    bnez      v0, .LOP_IGET_OBJECT_VOLATILE_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: mips/OP_IGET_WIDE_VOLATILE.S */
+/* File: mips/OP_IGET_WIDE.S */
+    /*
+     * Wide 32-bit instance field get.
+     */
+    # iget-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_WIDE_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # test return code
+    move      a0, v0
+    bnez      v0, .LOP_IGET_WIDE_VOLATILE_finish
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: mips/OP_IPUT_WIDE_VOLATILE.S */
+/* File: mips/OP_IPUT_WIDE.S */
+    # iput-wide vA, vB, field              /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_WIDE_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_WIDE_VOLATILE_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: mips/OP_SGET_WIDE_VOLATILE.S */
+/* File: mips/OP_SGET_WIDE.S */
+    /*
+     * 64-bit SGET handler.
+     */
+    # sget-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_SGET_WIDE_VOLATILE_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+
+    b        .LOP_SGET_WIDE_VOLATILE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: mips/OP_SPUT_WIDE_VOLATILE.S */
+/* File: mips/OP_SPUT_WIDE.S */
+    /*
+     * 64-bit SPUT handler.
+     */
+    # sput-wide vAA, field                 /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    GET_OPA(t0)                            #  t0 <- AA
+    LOAD_eas2(a2, rBIX, a1)                #  a2 <- resolved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[AA]
+    # is resolved entry null?
+    beqz      a2, .LOP_SPUT_WIDE_VOLATILE_resolve      #  yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_finish:                        #  field ptr in a2, AA in rOBJ
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vAA/vAA+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if 1
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vAA/vAA+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_BREAKPOINT: /* 0xec */
+    /* (stub) */
+    SAVE_PC_TO_SELF()            # only need to export PC and FP
+    SAVE_FP_TO_SELF()
+    move        a0, rSELF        # self is first arg to function
+    JAL(dvmMterp_OP_BREAKPOINT)      # call
+    LOAD_PC_FROM_SELF()          # retrieve updated values
+    LOAD_FP_FROM_SELF()
+    FETCH_INST()                 # load next instruction from rPC
+    GET_INST_OPCODE(t0)          # ...trim down to just the opcode
+    GOTO_OPCODE(t0)              # ...and jump to the handler
+/* ------------------------------ */
+    .balign 128
+.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: mips/OP_THROW_VERIFICATION_ERROR.S */
+    /*
+     * Handle a throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by AA, with some detail provided by BBBB.
+     */
+    /* op AA, ref@BBBB */
+
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    FETCH(a2, 1)                           #  a2 <- BBBB
+    EXPORT_PC()                            #  export the PC
+    GET_OPA(a1)                            #  a1 <- AA
+    JAL(dvmThrowVerificationError)         #  always throws
+    b         common_exceptionThrown       #  handle exception
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: mips/OP_EXECUTE_INLINE.S */
+    /*
+     * Execute a "native inline" instruction.
+     *
+     * We need to call an InlineOp4Func:
+     *  bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+     *
+     * The first four args are in a0-a3, pointer to return value storage
+     * is on the stack.  The function's return value is a flag that tells
+     * us if an exception was thrown.
+     *
+     * TUNING: could maintain two tables, pointer in Thread and
+     * swap if profiler/debuggger active.
+     */
+    /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+    lhu       a2, offThread_subMode(rSELF)
+    FETCH(rBIX, 1)                         #  rBIX <- BBBB
+    EXPORT_PC()                            #  can throw
+    and       a2, kSubModeDebugProfile     #  Any going on?
+    bnez      a2, .LOP_EXECUTE_INLINE_debugmode    #  yes - take slow path
+.LOP_EXECUTE_INLINE_resume:
+    addu      a1, rSELF, offThread_retval  #  a1 <- &self->retval
+    GET_OPB(a0)                            #  a0 <- B
+    # Stack should have 16/20 available
+    sw        a1, STACK_OFFSET_ARG04(sp)   #  push &self->retval
+    BAL(.LOP_EXECUTE_INLINE_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    # test boolean result of inline
+    beqz      v0, common_exceptionThrown   #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: mips/OP_EXECUTE_INLINE_RANGE.S */
+    /*
+     * Execute a "native inline" instruction, using "/range" semantics.
+     * Same idea as execute-inline, but we get the args differently.
+     *
+     * We need to call an InlineOp4Func:
+     *  bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+     *
+     * The first four args are in a0-a3, pointer to return value storage
+     * is on the stack.  The function's return value is a flag that tells
+     * us if an exception was thrown.
+     */
+    /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+    lhu       a2, offThread_subMode(rSELF)
+    FETCH(rBIX, 1)                       # rBIX<- BBBB
+    EXPORT_PC()                          # can throw
+    and       a2, kSubModeDebugProfile   # Any going on?
+    bnez      a2, .LOP_EXECUTE_INLINE_RANGE_debugmode  # yes - take slow path
+.LOP_EXECUTE_INLINE_RANGE_resume:
+    addu      a1, rSELF, offThread_retval # a1<- &self->retval
+    GET_OPA(a0)
+    sw        a1, STACK_OFFSET_ARG04(sp)  # push &self->retval
+    BAL(.LOP_EXECUTE_INLINE_RANGE_continue)             # make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)     #  restore gp
+    beqz      v0, common_exceptionThrown  # returned false, handle exception
+    FETCH_ADVANCE_INST(3)                 # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                   # extract opcode from rINST
+    GOTO_OPCODE(t0)                       # jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: mips/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it unless a debugger is active.
+     */
+    FETCH(a1, 2)                  # a1<- CCCC
+    GET_VREG(a0, a1)                    # a0<- "this" ptr
+    # check for NULL
+    beqz    a0, common_errNullObject    # export PC and throw NPE
+    LOAD_base_offObject_clazz(a1, a0)   # a1<- obj->clazz
+    LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+    and     a2, CLASS_ISFINALIZABLE     # is this class finalizable?
+    beqz    a2, .LOP_INVOKE_OBJECT_INIT_RANGE_finish      # no, go
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_setFinal:
+    EXPORT_PC()                         # can throw
+    JAL(dvmSetFinalizable)              # call dvmSetFinalizable(obj)
+    LOAD_offThread_exception(a0, rSELF)	# a0<- self->exception
+    # exception pending?
+    bnez    a0, common_exceptionThrown  # yes, handle it
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_finish:
+    lhu     a1, offThread_subMode(rSELF)
+    and     a1, kSubModeDebuggerActive  # debugger active?
+    bnez    a1, .LOP_INVOKE_OBJECT_INIT_RANGE_debugger    # Yes - skip optimization
+    FETCH_ADVANCE_INST(2+1)       # advance to next instr, load rINST
+    GET_INST_OPCODE(t0)                 # t0<- opcode from rINST
+    GOTO_OPCODE(t0)                     # execute it
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: mips/OP_RETURN_VOID_BARRIER.S */
+    SMP_DMB
+    b         common_returnFromMethod
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: mips/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1 #
+    lw        a0, 0(t0)                    #  a0 <- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: mips/OP_IGET_WIDE_QUICK.S */
+    # iget-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1                   #  t0 <- a3 + a1
+    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: mips/OP_IGET_OBJECT_QUICK.S */
+/* File: mips/OP_IGET_QUICK.S */
+    /* For: iget-quick, iget-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    # check object for null
+    beqz      a3, common_errNullObject     #  object was null
+    addu      t0, a3, a1 #
+    lw        a0, 0(t0)                    #  a0 <- obj.field (always 32 bits)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: mips/OP_IPUT_QUICK.S */
+    /* For: iput-quick, iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sw        a0, 0(t0)                    #  obj.field (always 32 bits) <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: mips/OP_IPUT_WIDE_QUICK.S */
+    # iput-wide-quick vA, vB, offset       /* CCCC */
+    GET_OPA4(a0)                           #  a0 <- A(+)
+    GET_OPB(a1)                            #  a1 <- B
+    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
+    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
+    # check object for null
+    beqz      a2, common_errNullObject     #  object was null
+    FETCH(a3, 1)                           #  a3 <- field byte offset
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: mips/OP_IPUT_OBJECT_QUICK.S */
+    /* For: iput-object-quick */
+    # op vA, vB, offset                    /* CCCC */
+    GET_OPB(a2)                            #  a2 <- B
+    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
+    FETCH(a1, 1)                           #  a1 <- field byte offset
+    GET_OPA4(a2)                           #  a2 <- A(+)
+    beqz      a3, common_errNullObject     #  object was null
+    GET_VREG(a0, a2)                       #  a0 <- fp[A]
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    addu      t0, a3, a1
+    sw        a0, 0(t0)                    #  obj.field (always 32 bits) <- a0
+    beqz      a0, 1f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t1, a3, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, 0(t2)
+1:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(a3, 2)                           #  a3 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!0)
+    and       a3, a3, 15                   #  a3 <- C (or stays CCCC)
+    .endif
+    GET_VREG(rOBJ, a3)                     #  rOBJ <- vC ("this" ptr)
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a2, rOBJ)    #  a2 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- thisPtr->clazz->vtable
+    EXPORT_PC()                            #  invoke must export
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- vtable[BBBB]
+    b         common_invokeMethodNoRange #  (a0=method, r9="this")
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK.S */
+    /*
+     * Handle an optimized virtual method call.
+     *
+     * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(a3, 2)                           #  a3 <- FEDC or CCCC
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    .if (!1)
+    and       a3, a3, 15                   #  a3 <- C (or stays CCCC)
+    .endif
+    GET_VREG(rOBJ, a3)                     #  rOBJ <- vC ("this" ptr)
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a2, rOBJ)    #  a2 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- thisPtr->clazz->vtable
+    EXPORT_PC()                            #  invoke must export
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- vtable[BBBB]
+    b         common_invokeMethodRange #  (a0=method, r9="this")
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: mips/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    .if (!0)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offMethod_clazz(a2, a2)      #  a2 <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    LOAD_base_offClassObject_super(a2, a2) #  a2 <- method->clazz->super
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this"
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- ...clazz->super->vtable
+    # is "this" null ?
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- super->vtable[BBBB]
+    beqz      rOBJ, common_errNullObject   #  "this" is null, throw exception
+    b         common_invokeMethodNoRange #  (a0=method, rOBJ="this")
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: mips/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: mips/OP_INVOKE_SUPER_QUICK.S */
+    /*
+     * Handle an optimized "super" method call.
+     *
+     * for: [opt] invoke-super-quick, invoke-super-quick/range
+     */
+    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
+    # op vAA, {vCCCC..v(CCCC+AA-1)}, meth  /* BBBB */
+    FETCH(t0, 2)                           #  t0 <- GFED or CCCC
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    .if (!1)
+    and       t0, t0, 15                   #  t0 <- D (or stays CCCC)
+    .endif
+    FETCH(a1, 1)                           #  a1 <- BBBB
+    LOAD_base_offMethod_clazz(a2, a2)      #  a2 <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    LOAD_base_offClassObject_super(a2, a2) #  a2 <- method->clazz->super
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this"
+    LOAD_base_offClassObject_vtable(a2, a2) #  a2 <- ...clazz->super->vtable
+    # is "this" null ?
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- super->vtable[BBBB]
+    beqz      rOBJ, common_errNullObject   #  "this" is null, throw exception
+    b         common_invokeMethodRange #  (a0=method, rOBJ="this")
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: mips/OP_IPUT_OBJECT_VOLATILE.S */
+/* File: mips/OP_IPUT_OBJECT.S */
+    /*
+     * 32-bit instance field put.
+     *
+     * for: iput-object, iput-object-volatile
+     */
+    # op vA, vB, field                     /* CCCC */
+    GET_OPB(a0)                            #  a0 <- B
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref CCCC
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_OBJECT_VOLATILE_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    # success?
+    move      a0, v0
+    bnez      v0, .LOP_IPUT_OBJECT_VOLATILE_finish       #  yes, finish up
+    b         common_exceptionThrown
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: mips/OP_SGET_OBJECT_VOLATILE.S */
+/* File: mips/OP_SGET.S */
+    /*
+     * General 32-bit SGET handler.
+     *
+     * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+     */
+    # op vAA, field                        /* BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_OBJECT_VOLATILE_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_OBJECT_VOLATILE_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: mips/OP_SPUT_OBJECT_VOLATILE.S */
+/* File: mips/OP_SPUT_OBJECT.S */
+    /*
+     * General 32-bit SPUT handler.
+     *
+     * for: sput-object, sput-object-volatile
+     */
+    /* op vAA, field@BBBB */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           #  a1 <- field ref BBBB
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_OBJECT_VOLATILE_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b       .LOP_SPUT_OBJECT_VOLATILE_finish             # resume
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_DISPATCH_FF: /* 0xff */
+    /* (stub) */
+    SAVE_PC_TO_SELF()            # only need to export PC and FP
+    SAVE_FP_TO_SELF()
+    move        a0, rSELF        # self is first arg to function
+    JAL(dvmMterp_OP_DISPATCH_FF)      # call
+    LOAD_PC_FROM_SELF()          # retrieve updated values
+    LOAD_FP_FROM_SELF()
+    FETCH_INST()                 # load next instruction from rPC
+    GET_INST_OPCODE(t0)          # ...trim down to just the opcode
+    GOTO_OPCODE(t0)              # ...and jump to the handler
+/* ------------------------------ */
+    .balign 128
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: mips/OP_CONST_CLASS_JUMBO.S */
+    /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- self->methodClassDex
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- dvmDex->pResClasses
+    sll       a1,a1,16
+    or        a1, a0, a1                  # a1<- AAAAaaaa
+    FETCH(rOBJ, 3)                        # rOBJ<- BBBB
+    LOAD_eas2(v0, a2, a1)                  #  v0 <- pResClasses[BBBB]
+
+    bnez      v0, .LOP_CONST_CLASS_JUMBO_resolve      #  v0!=0 => resolved-ok
+    /*
+     * Continuation if the Class has not yet been resolved.
+     *  a1: AAAAAAAA (Class ref)
+     *  rOBJ: target register
+     */
+    EXPORT_PC()
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- Class reference
+    # failed==0?
+    beqz      v0, common_exceptionThrown   #  yup, handle the exception
+
+.LOP_CONST_CLASS_JUMBO_resolve:
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(v0, rOBJ, t0)            #  vBBBB <- v0
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: mips/OP_CHECK_CAST_JUMBO.S */
+    /*
+     * Check to see if a cast from one class to another is allowed.
+     */
+    /* check-cast/ jumbo vBBBB, class #AAAAAAAA */
+    FETCH(a0, 1)                        # a0<- aaaa (lo)
+    FETCH(a2, 2)                        # a2<- AAAA (hi)
+    FETCH(a3, 3)                        # a3<- BBBB
+    sll    a2,a2,16
+    or     a2, a0, a2                   # a2<- AAAAaaaa
+
+    GET_VREG(rOBJ, a3)                          # rOBJ<- object
+    LOAD_rSELF_methodClassDex(a0)   # a0<- pDvmDex
+    LOAD_base_offDvmDex_pResClasses(a0, a0) # a0<- pDvmDex->pResClasses
+                                                # is object null?
+    beqz     rOBJ, .LOP_CHECK_CAST_JUMBO_okay             # null obj, cast always succeeds
+    LOAD_eas2(a1, a0, a2)           # a1<- resolved class
+    LOAD_base_offObject_clazz(a0, rOBJ)   # a0<- obj->clazz
+                                                # have we resolved this before?
+    beqz    a1, .LOP_CHECK_CAST_JUMBO_resolve             # not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+                                                # same class (trivial success)?
+    bne     a0, a1, .LOP_CHECK_CAST_JUMBO_fullcheck       # no, do full check
+    b       .LOP_CHECK_CAST_JUMBO_okay                    # yes, finish up
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from BBBB
+     *  rOBJ holds object
+     */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+    move      rBIX,a1                      #  avoid ClassObject getting clobbered
+    JAL(dvmInstanceofNonTrivial)    # v0<- boolean result
+                                                # failed?
+    bnez    v0, .LOP_CHECK_CAST_JUMBO_okay                # no, success
+    b       .LOP_CHECK_CAST_JUMBO_castfailure
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: mips/OP_INSTANCE_OF_JUMBO.S */
+    /*
+     * Check to see if an object reference is an instance of a class.
+     *
+     * Most common situation is a non-null object, being compared against
+     * an already-resolved class.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_INSTANCE_OF.S.
+     */
+    /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(a3, 4)                           # a3<- vCCCC
+    FETCH(rOBJ, 3)                         # rOBJ<- vBBBB
+    GET_VREG(a0, a3)                       #  a0 <- vCCCC (object)
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- pDvmDex
+    # is object null?
+    beqz      a0, .LOP_INSTANCE_OF_JUMBO_store        #  null obj, not an instance, store a0
+    FETCH(a1, 1)                           # r1<- aaaa (lo)
+    FETCH(a3, 2)                           # r3<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a2, a2) #  a2 <- pDvmDex->pResClasses
+    sll     a3,a3,16
+    or      a3, a1, a3                     # a3<- AAAAaaaa
+
+    LOAD_eas2(a1, a2, a3)                  #  a1 <- resolved class
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    # have we resolved this before?
+    beqz      a1, .LOP_INSTANCE_OF_JUMBO_resolve      #  not resolved, do it now
+    b       .LOP_INSTANCE_OF_JUMBO_resolved           # resolved, continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: mips/OP_NEW_INSTANCE_JUMBO.S */
+    /*
+     * Create a new instance of a class.
+     */
+    /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+    FETCH(a0, 1)                            # a0<- aaaa (lo)DvmDex
+    FETCH(a1, 2)                            # a1<- AAAA (hi)BBB
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    sll      a1,a1,16
+    or       a1, a0, a1                    # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX <- &resolved_class
+#endif
+    EXPORT_PC()                            #  req'd for init, resolve, alloc
+    # already resolved?
+    beqz      a0, .LOP_NEW_INSTANCE_JUMBO_resolve      #  no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved:                   #  a0=class
+    lbu       a1, offClassObject_status(a0) #  a1 <- ClassStatus enum
+    # has class been initialized?
+    li        t0, CLASS_INITIALIZED
+    move      rOBJ, a0                     #  save a0
+    bne       a1, t0, .LOP_NEW_INSTANCE_JUMBO_needinit #  no, init class now
+
+.LOP_NEW_INSTANCE_JUMBO_initialized:                #  a0=class
+    LOAD_base_offClassObject_accessFlags(a3, a0) #  a3 <- clazz->accessFlags
+    li        a1, ALLOC_DONT_TRACK         #  flags for alloc call
+    # a0=class
+    JAL(dvmAllocObject)                    #  v0 <- new object
+    FETCH(a3, 3)                           # a3<- BBBB
+#if defined(WITH_JIT)
+    /*
+     * The JIT needs the class to be fully resolved before it can
+     * include this instruction in a trace.
+     */
+    lhu       a1, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    and       a1, kSubModeJitTraceBuild    #  under construction?
+    bnez      a1, .LOP_NEW_INSTANCE_JUMBO_jitCheck
+#else
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+#endif
+    b         .LOP_NEW_INSTANCE_JUMBO_continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: mips/OP_NEW_ARRAY_JUMBO.S */
+    /*
+     * Allocate an array of objects, specified with the array class
+     * and a count.
+     *
+     * The verifier guarantees that this is an array class, so we don't
+     * check for it here.
+     */
+    /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+    FETCH(a2, 1)                           # a2<- aaaa (lo)
+    FETCH(a3, 2)                           # a3<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- vCCCC
+    sll     a3,a3,16                       #
+    or      a2, a2, a3                     # a2<- AAAAaaaa
+
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    GET_VREG(a1, a0)                       #  a1 <- vCCCC (array length)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- resolved class
+    # check length
+    bltz      a1, common_errNegativeArraySize #  negative length, bail - len in a1
+    EXPORT_PC()                            #  req'd for resolve, alloc
+    # already resolved?
+    beqz      a0, .LOP_NEW_ARRAY_JUMBO_resolve      #  not resolved,
+    b         .LOP_NEW_ARRAY_JUMBO_finish
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: mips/OP_FILLED_NEW_ARRAY_JUMBO.S */
+    /*
+     * Create a new array with elements filled from registers.
+     *
+     * TODO: convert most of this into a common subroutine, shared with
+     *       OP_FILLED_NEW_ARRAY.S.
+     */
+     /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # r0<- aaaa (lo)
+    FETCH(a1, 2)                           # r1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResClasses(a3, a3) #  a3 <- pDvmDex->pResClasses
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved class
+    GET_OPA(rOBJ)                          #  rOBJ <- AA or BA
+    EXPORT_PC()                            #  need for resolve and alloc
+    # already resolved?
+    bnez      a0, .LOP_FILLED_NEW_ARRAY_JUMBO_continue     #  yes, continue on
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_JUMBO_resolved        # resolved, continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: mips/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll    a2,a2,16
+    or     a1, a1, a2                      # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[CCCC], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_WIDE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_WIDE_JUMBO_resolved           # resolved, continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: mips/OP_IGET_OBJECT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_OBJECT_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: mips/OP_IGET_BOOLEAN_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_BOOLEAN_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_BOOLEAN_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: mips/OP_IGET_BYTE_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_BYTE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_BYTE_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: mips/OP_IGET_CHAR_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_CHAR_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_CHAR_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: mips/OP_IGET_SHORT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_SHORT_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_SHORT_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_JUMBO_resolved           # resolved, continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: mips/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   # a1<- AAAAaaaa
+
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_WIDE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_JUMBO_resolved           # resolved, continue
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: mips/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll      a1,a1,16
+    or       a1, a1, a2                    # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_OBJECT_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b         .LOP_IPUT_OBJECT_JUMBO_resolved
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: mips/OP_IPUT_BOOLEAN_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_BOOLEAN_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_BOOLEAN_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: mips/OP_IPUT_BYTE_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_BYTE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_BYTE_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: mips/OP_IPUT_CHAR_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_CHAR_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_CHAR_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: mips/OP_IPUT_SHORT_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_SHORT_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_SHORT_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_JUMBO_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: mips/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(a2, a2) #  a2 <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                 # a1<- AAAAaaaa
+    LOAD_eas2(a0, a2, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_SGET_WIDE_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  a0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+    b        .LOP_SGET_WIDE_JUMBO_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: mips/OP_SGET_OBJECT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_OBJECT_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_OBJECT_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: mips/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_BOOLEAN_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_BOOLEAN_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: mips/OP_SGET_BYTE_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_BYTE_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_BYTE_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: mips/OP_SGET_CHAR_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_CHAR_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_CHAR_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: mips/OP_SGET_SHORT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_SHORT_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_SHORT_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_JUMBO_finish            # resume
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: mips/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a2,a2,16
+    or      a1, a1, a2                    # a1<- AAAAaaaa
+    FETCH(rOBJ, 3)                        # rOBJ<- BBBB    solved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[BBBB]
+    # is resolved entry null?
+    beqz      a2, .LOP_SPUT_WIDE_JUMBO_resolve      #  yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish:                        #  field ptr in a2, BBBB in rOBJ
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vBBBB/vBBBB+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if 0
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: mips/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a1,a1,16
+    or      a1,a0,a1                       # a1<- AAAAaaaa
+
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_OBJECT_JUMBO_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b         .LOP_SPUT_OBJECT_JUMBO_finish           #  resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: mips/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_BOOLEAN_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_BOOLEAN_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: mips/OP_SPUT_BYTE_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_BYTE_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_BYTE_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: mips/OP_SPUT_CHAR_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_CHAR_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_CHAR_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: mips/OP_SPUT_SHORT_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_SHORT_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_SHORT_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: mips/OP_INVOKE_VIRTUAL_JUMBO.S */
+    /*
+     * Handle a virtual method call.
+     */
+     /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # a1<- AAAAaaaa
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, .LOP_INVOKE_VIRTUAL_JUMBO_continue     #  yes, continue on
+
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    bnez      v0, .LOP_INVOKE_VIRTUAL_JUMBO_continue     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: mips/OP_INVOKE_SUPER_JUMBO.S */
+    /*
+     * Handle a "super" method call.
+    */
+    /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(t0, 4)                           # t0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    GET_VREG(rOBJ, t0)                     #  rOBJ <- "this" ptr
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved baseMethod
+    # null "this"?
+    LOAD_rSELF_method(t1)                  #  t1 <- current method
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    # cmp a0, 0; already resolved?
+    LOAD_base_offMethod_clazz(rBIX, t1)    #  rBIX <- method->clazz
+    EXPORT_PC()                            #  must export for invoke
+    bnez      a0, .LOP_INVOKE_SUPER_JUMBO_continue     #  resolved, continue on
+
+    move      a0, rBIX                     #  a0 <- method->clazz
+    li        a2, METHOD_VIRTUAL           #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         .LOP_INVOKE_SUPER_JUMBO_continue
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: mips/OP_INVOKE_DIRECT_JUMBO.S */
+    /*
+     * Handle a direct method call.
+     *
+     * (We could defer the "is 'this' pointer null" test to the common
+     * method invocation code, and use a flag to indicate that static
+     * calls don't count.  If we do this as part of copying the arguments
+     * out we could avoiding loading the first arg twice.)
+     *
+     */
+     /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # a1<- AAAAaaaa
+    FETCH(rBIX, 4)                         #  rBIX <- GFED or CCCC
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+    .if (!0)
+    and       rBIX, rBIX, 15               #  rBIX <- D (or stays CCCC)
+    .endif
+    EXPORT_PC()                            #  must export for invoke
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    # already resolved?
+    bnez      a0, 1f                       #  resolved, call the function
+
+    lw        a3, offThread_method(rSELF)  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_DIRECT            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+
+1:
+    bnez      rOBJ, common_invokeMethodJumbo #  a0=method, rOBJ="this"
+    b         common_errNullObject         #  yes, throw exception
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: mips/OP_INVOKE_STATIC_JUMBO.S */
+    /*
+     * Handle a static method call.
+     */
+     /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- pDvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResMethods(a3, a3) #  a3 <- pDvmDex->pResMethods
+    sll     a1,a1,16
+    or      a1, a0, a1                     # r1<- AAAAaaaa
+    li      rOBJ, 0                       #  null "this" in delay slot
+    LOAD_eas2(a0, a3, a1)                  #  a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+    EAS2(rBIX, a3, a1)                     #  rBIX<- &resolved_metherToCall
+#endif
+    EXPORT_PC()                            #  must export for invoke
+    # already resolved?
+    bnez      a0, common_invokeMethodJumboNoThis #  (a0 = method)
+    b         .LOP_INVOKE_STATIC_JUMBO_resolve
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: mips/OP_INVOKE_INTERFACE_JUMBO.S */
+    /*
+     * Handle an interface method call.
+     */
+    /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+    FETCH(a2, 4)                           # a2<- CCCC
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    EXPORT_PC()                            #  must export for invoke
+    sll       a1,a1,16
+    or        a1, a0, a1                   #  a1<- AAAAaaaa
+    GET_VREG(rOBJ, a2)                     #  rOBJ <- first arg ("this")
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- methodClassDex
+    LOAD_rSELF_method(a2)                  #  a2 <- method
+    # null obj?
+    beqz      rOBJ, common_errNullObject   #  yes, fail
+    LOAD_base_offObject_clazz(a0, rOBJ)      #  a0 <- thisPtr->clazz
+    JAL(dvmFindInterfaceMethodInCache)     #  v0 <- call(class, ref, method, dex)
+    move      a0, v0
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    b         common_invokeMethodJumbo #  (a0=method, rOBJ="this")
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: mips/OP_UNUSED_27FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: mips/OP_UNUSED_28FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: mips/OP_UNUSED_29FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: mips/OP_UNUSED_2AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: mips/OP_UNUSED_2BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: mips/OP_UNUSED_2CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: mips/OP_UNUSED_2DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: mips/OP_UNUSED_2EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: mips/OP_UNUSED_2FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: mips/OP_UNUSED_30FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: mips/OP_UNUSED_31FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: mips/OP_UNUSED_32FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: mips/OP_UNUSED_33FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: mips/OP_UNUSED_34FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: mips/OP_UNUSED_35FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: mips/OP_UNUSED_36FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: mips/OP_UNUSED_37FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: mips/OP_UNUSED_38FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: mips/OP_UNUSED_39FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: mips/OP_UNUSED_3AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: mips/OP_UNUSED_3BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: mips/OP_UNUSED_3CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: mips/OP_UNUSED_3DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: mips/OP_UNUSED_3EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: mips/OP_UNUSED_3FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: mips/OP_UNUSED_40FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: mips/OP_UNUSED_41FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: mips/OP_UNUSED_42FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: mips/OP_UNUSED_43FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: mips/OP_UNUSED_44FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: mips/OP_UNUSED_45FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: mips/OP_UNUSED_46FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: mips/OP_UNUSED_47FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: mips/OP_UNUSED_48FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: mips/OP_UNUSED_49FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: mips/OP_UNUSED_4AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: mips/OP_UNUSED_4BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: mips/OP_UNUSED_4CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: mips/OP_UNUSED_4DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: mips/OP_UNUSED_4EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: mips/OP_UNUSED_4FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: mips/OP_UNUSED_50FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: mips/OP_UNUSED_51FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: mips/OP_UNUSED_52FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: mips/OP_UNUSED_53FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: mips/OP_UNUSED_54FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: mips/OP_UNUSED_55FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: mips/OP_UNUSED_56FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: mips/OP_UNUSED_57FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: mips/OP_UNUSED_58FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: mips/OP_UNUSED_59FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: mips/OP_UNUSED_5AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: mips/OP_UNUSED_5BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: mips/OP_UNUSED_5CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: mips/OP_UNUSED_5DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: mips/OP_UNUSED_5EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: mips/OP_UNUSED_5FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: mips/OP_UNUSED_60FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: mips/OP_UNUSED_61FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: mips/OP_UNUSED_62FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: mips/OP_UNUSED_63FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: mips/OP_UNUSED_64FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: mips/OP_UNUSED_65FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: mips/OP_UNUSED_66FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: mips/OP_UNUSED_67FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: mips/OP_UNUSED_68FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: mips/OP_UNUSED_69FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: mips/OP_UNUSED_6AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: mips/OP_UNUSED_6BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: mips/OP_UNUSED_6CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: mips/OP_UNUSED_6DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: mips/OP_UNUSED_6EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: mips/OP_UNUSED_6FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: mips/OP_UNUSED_70FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: mips/OP_UNUSED_71FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: mips/OP_UNUSED_72FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: mips/OP_UNUSED_73FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: mips/OP_UNUSED_74FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: mips/OP_UNUSED_75FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: mips/OP_UNUSED_76FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: mips/OP_UNUSED_77FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: mips/OP_UNUSED_78FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: mips/OP_UNUSED_79FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: mips/OP_UNUSED_7AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: mips/OP_UNUSED_7BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: mips/OP_UNUSED_7CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: mips/OP_UNUSED_7DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: mips/OP_UNUSED_7EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: mips/OP_UNUSED_7FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: mips/OP_UNUSED_80FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: mips/OP_UNUSED_81FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: mips/OP_UNUSED_82FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: mips/OP_UNUSED_83FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: mips/OP_UNUSED_84FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: mips/OP_UNUSED_85FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: mips/OP_UNUSED_86FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: mips/OP_UNUSED_87FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: mips/OP_UNUSED_88FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: mips/OP_UNUSED_89FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: mips/OP_UNUSED_8AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: mips/OP_UNUSED_8BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: mips/OP_UNUSED_8CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: mips/OP_UNUSED_8DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: mips/OP_UNUSED_8EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: mips/OP_UNUSED_8FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: mips/OP_UNUSED_90FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: mips/OP_UNUSED_91FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: mips/OP_UNUSED_92FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: mips/OP_UNUSED_93FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: mips/OP_UNUSED_94FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: mips/OP_UNUSED_95FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: mips/OP_UNUSED_96FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: mips/OP_UNUSED_97FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: mips/OP_UNUSED_98FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: mips/OP_UNUSED_99FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: mips/OP_UNUSED_9AFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: mips/OP_UNUSED_9BFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: mips/OP_UNUSED_9CFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: mips/OP_UNUSED_9DFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: mips/OP_UNUSED_9EFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: mips/OP_UNUSED_9FFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: mips/OP_UNUSED_A0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: mips/OP_UNUSED_A1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: mips/OP_UNUSED_A2FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: mips/OP_UNUSED_A3FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: mips/OP_UNUSED_A4FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: mips/OP_UNUSED_A5FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: mips/OP_UNUSED_A6FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: mips/OP_UNUSED_A7FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: mips/OP_UNUSED_A8FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: mips/OP_UNUSED_A9FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: mips/OP_UNUSED_AAFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: mips/OP_UNUSED_ABFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: mips/OP_UNUSED_ACFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: mips/OP_UNUSED_ADFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: mips/OP_UNUSED_AEFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: mips/OP_UNUSED_AFFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: mips/OP_UNUSED_B0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: mips/OP_UNUSED_B1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: mips/OP_UNUSED_B2FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: mips/OP_UNUSED_B3FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: mips/OP_UNUSED_B4FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: mips/OP_UNUSED_B5FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: mips/OP_UNUSED_B6FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: mips/OP_UNUSED_B7FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: mips/OP_UNUSED_B8FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: mips/OP_UNUSED_B9FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: mips/OP_UNUSED_BAFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: mips/OP_UNUSED_BBFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: mips/OP_UNUSED_BCFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: mips/OP_UNUSED_BDFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: mips/OP_UNUSED_BEFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: mips/OP_UNUSED_BFFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: mips/OP_UNUSED_C0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: mips/OP_UNUSED_C1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: mips/OP_UNUSED_C2FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: mips/OP_UNUSED_C3FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: mips/OP_UNUSED_C4FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: mips/OP_UNUSED_C5FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: mips/OP_UNUSED_C6FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: mips/OP_UNUSED_C7FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: mips/OP_UNUSED_C8FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: mips/OP_UNUSED_C9FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: mips/OP_UNUSED_CAFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: mips/OP_UNUSED_CBFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: mips/OP_UNUSED_CCFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: mips/OP_UNUSED_CDFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: mips/OP_UNUSED_CEFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: mips/OP_UNUSED_CFFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: mips/OP_UNUSED_D0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: mips/OP_UNUSED_D1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: mips/OP_UNUSED_D2FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: mips/OP_UNUSED_D3FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: mips/OP_UNUSED_D4FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: mips/OP_UNUSED_D5FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: mips/OP_UNUSED_D6FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: mips/OP_UNUSED_D7FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: mips/OP_UNUSED_D8FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: mips/OP_UNUSED_D9FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: mips/OP_UNUSED_DAFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: mips/OP_UNUSED_DBFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: mips/OP_UNUSED_DCFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: mips/OP_UNUSED_DDFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: mips/OP_UNUSED_DEFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: mips/OP_UNUSED_DFFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: mips/OP_UNUSED_E0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: mips/OP_UNUSED_E1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: mips/OP_UNUSED_E2FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: mips/OP_UNUSED_E3FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: mips/OP_UNUSED_E4FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: mips/OP_UNUSED_E5FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: mips/OP_UNUSED_E6FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: mips/OP_UNUSED_E7FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: mips/OP_UNUSED_E8FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: mips/OP_UNUSED_E9FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: mips/OP_UNUSED_EAFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: mips/OP_UNUSED_EBFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: mips/OP_UNUSED_ECFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: mips/OP_UNUSED_EDFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: mips/OP_UNUSED_EEFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: mips/OP_UNUSED_EFFF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: mips/OP_UNUSED_F0FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: mips/OP_UNUSED_F1FF.S */
+/* File: mips/unused.S */
+    BAL(common_abort)
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: mips/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: mips/OP_INVOKE_OBJECT_INIT_RANGE.S */
+    /*
+     * Invoke Object.<init> on an object.  In practice we know that
+     * Object's nullary constructor doesn't do anything, so we just
+     * skip it unless a debugger is active.
+     */
+    FETCH(a1, 4)                  # a1<- CCCC
+    GET_VREG(a0, a1)                    # a0<- "this" ptr
+    # check for NULL
+    beqz    a0, common_errNullObject    # export PC and throw NPE
+    LOAD_base_offObject_clazz(a1, a0)   # a1<- obj->clazz
+    LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+    and     a2, CLASS_ISFINALIZABLE     # is this class finalizable?
+    beqz    a2, .LOP_INVOKE_OBJECT_INIT_JUMBO_finish      # no, go
+
+.LOP_INVOKE_OBJECT_INIT_JUMBO_setFinal:
+    EXPORT_PC()                         # can throw
+    JAL(dvmSetFinalizable)              # call dvmSetFinalizable(obj)
+    LOAD_offThread_exception(a0, rSELF)	# a0<- self->exception
+    # exception pending?
+    bnez    a0, common_exceptionThrown  # yes, handle it
+
+.LOP_INVOKE_OBJECT_INIT_JUMBO_finish:
+    lhu     a1, offThread_subMode(rSELF)
+    and     a1, kSubModeDebuggerActive  # debugger active?
+    bnez    a1, .LOP_INVOKE_OBJECT_INIT_JUMBO_debugger    # Yes - skip optimization
+    FETCH_ADVANCE_INST(4+1)       # advance to next instr, load rINST
+    GET_INST_OPCODE(t0)                 # t0<- opcode from rINST
+    GOTO_OPCODE(t0)                     # execute it
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: mips/OP_IGET_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_VOLATILE_JUMBO_resolved        # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: mips/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit instance field get.
+     */
+    /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll    a2,a2,16
+    or     a1, a1, a2                      # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[CCCC], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_WIDE_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_OBJECT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field get.
+     *
+     * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+     *      iget-char/jumbo, iget-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll     a2,a2,16
+    or      a1, a1, a2                     # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved        # resolved, continue
+
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: mips/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     *
+     * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+     *      iput-short/jumbo
+     */
+    /* exop vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   #  a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_VOLATILE_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_WIDE_JUMBO.S */
+    /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll       a2,a2,16
+    or        a1, a1, a2                   # a1<- AAAAaaaa
+
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b       .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved           # resolved, continue
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit instance field put.
+     */
+    /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    FETCH(a0, 4)                           # a0<- CCCC
+    LOAD_rSELF_methodClassDex(a3)          #  a3 <- DvmDex
+    sll      a1,a1,16
+    or       a1, a1, a2                    # a1<- AAAAaaaa
+    LOAD_base_offDvmDex_pResFields(a2, a3) #  a2 <- pDvmDex->pResFields
+    GET_VREG(rOBJ, a0)                     #  rOBJ <- fp[B], the object pointer
+    LOAD_eas2(a0, a2, a1)                  #  a0 <- resolved InstField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish       #  no, already resolved
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveInstField)               #  v0 <- resolved InstField ptr
+    b         .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: mips/OP_SGET_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_VOLATILE_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_VOLATILE_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: mips/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SGET handler.
+     */
+    /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(a2, a2) #  a2 <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                 # a1<- AAAAaaaa
+    LOAD_eas2(a0, a2, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry null?
+    bnez      a0, .LOP_SGET_WIDE_VOLATILE_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *
+     * Returns StaticField pointer in v0.
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  a0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+    b        .LOP_SGET_WIDE_VOLATILE_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_OBJECT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+     /*
+     * Jumbo 32-bit SGET handler.
+     *
+     * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+     *      sget-char/jumbo, sget-short/jumbo
+     */
+     /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    # is resolved entry !null?
+    bnez      a0, .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    # success?
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish            # resume
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: mips/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler.
+     *
+     * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+     *      sput-short/jumbo
+     */
+    /* exop vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll       a1,a1,16
+    or        a1, a0, a1                   # a1<- AAAAaaaa
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_VOLATILE_JUMBO_finish       #  is resolved entry null?
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b        .LOP_SPUT_VOLATILE_JUMBO_finish            # resume
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_WIDE_JUMBO.S */
+    /*
+     * Jumbo 64-bit SPUT handler.
+     */
+    /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a2,a2,16
+    or      a1, a1, a2                    # a1<- AAAAaaaa
+    FETCH(rOBJ, 3)                        # rOBJ<- BBBB    solved StaticField ptr
+    EAS2(rOBJ, rFP, t0)                    #  rOBJ<- &fp[BBBB]
+    # is resolved entry null?
+    beqz      a2, .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve      #  yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish:                        #  field ptr in a2, BBBB in rOBJ
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- vBBBB/vBBBB+1
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    .if 1
+    addu    a2, offStaticField_value       #  a2<- pointer to data
+    JAL(dvmQuasiAtomicSwap64Sync)          #  stores a0/a1 into addr a2
+    .else
+    STORE64_off(a0, a1, a2, offStaticField_value) #  field <- vBBBB/vBBBB+1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_OBJECT_JUMBO.S */
+    /*
+     * Jumbo 32-bit SPUT handler for objects
+     */
+    /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+    LOAD_rSELF_methodClassDex(a2)          #  a2 <- DvmDex
+    FETCH(a0, 1)                           # a0<- aaaa (lo)
+    FETCH(a1, 2)                           # a1<- AAAA (hi)
+    LOAD_base_offDvmDex_pResFields(rBIX, a2) #  rBIX <- dvmDex->pResFields
+    sll     a1,a1,16
+    or      a1,a0,a1                       # a1<- AAAAaaaa
+
+    LOAD_eas2(a0, rBIX, a1)                #  a0 <- resolved StaticField ptr
+    bnez      a0, .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish       #  is resolved entry null?
+
+    /* Continuation if the field has not yet been resolved.
+     * a1:  BBBB field ref
+     * rBIX: dvmDex->pResFields
+     */
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() may throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  success? no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    b         .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish           #  resume
+
+
+
+/* ------------------------------ */
+    .balign 128
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+    /*
+     * Handle a jumbo throw-verification-error instruction.  This throws an
+     * exception for an error discovered during verification.  The
+     * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+     */
+     /* exop BBBB, Class@AAAAAAAA */
+    FETCH(a1, 1)                           # a1<- aaaa (lo)
+    FETCH(a2, 2)                           # a2<- AAAA (hi)
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    sll    a2,a2,16
+    or     a2, a1, a2                      # a2<- AAAAaaaa
+    EXPORT_PC()                            #  export the PC
+    FETCH(a1, 3)                           # a1<- BBBB
+    JAL(dvmThrowVerificationError)         #  always throws
+    b         common_exceptionThrown       #  handle exception
+
+
+    .balign 128
+    .size   dvmAsmInstructionStart, .-dvmAsmInstructionStart
+    .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ *  Sister implementations
+ * ===========================================================================
+ */
+    .global dvmAsmSisterStart
+    .type   dvmAsmSisterStart, %function
+    .text
+    .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CHECK_CAST */
+
+.LOP_CHECK_CAST_castfailure:
+    # A cast has failed. We need to throw a ClassCastException with the
+    # class of the object that failed to be cast.
+    EXPORT_PC()                            #  about to throw
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    move      a1,rBIX                      #  r1<- desired class
+    JAL(dvmThrowClassCastException)
+    b         common_exceptionThrown
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a2   holds BBBB
+     *  rOBJ holds object
+     */
+.LOP_CHECK_CAST_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      a1, a2                       #  a1 <- BBBB
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    LOAD_base_offObject_clazz(a0, rOBJ)    #  a0 <- obj->clazz
+    b         .LOP_CHECK_CAST_resolved         #  pick up where we left off
+
+/* continuation for OP_INSTANCE_OF */
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0   holds obj->clazz
+     *  a1   holds class resolved from BBBB
+     *  rOBJ holds A
+     */
+.LOP_INSTANCE_OF_fullcheck:
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    move      a0, v0                       #  fall through to OP_INSTANCE_OF_store
+    b         .LOP_INSTANCE_OF_store
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a3   holds BBBB
+     *  rOBJ holds A
+     */
+.LOP_INSTANCE_OF_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    move      a1, a3                       #  a1 <- BBBB
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG(a0, a3)                       #  a0 <- vB (object)
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    b         .LOP_INSTANCE_OF_resolved         #  pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+.LOP_NEW_INSTANCE_continue:
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a3)                       #  vAA <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we need to stop the trace building early.
+     * v0: new object
+     * a3: vAA
+     */
+.LOP_NEW_INSTANCE_jitCheck:
+    lw        a1, 0(rBIX)                  #  reload resolved class
+    # okay?
+    bnez      a1, .LOP_NEW_INSTANCE_continue     #  yes, finish
+    move      rOBJ, v0                     #  preserve new object
+    move      rBIX, a3                     #  preserve vAA
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(rOBJ, rBIX)                   #  vAA <- new object
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+    /*
+     * Class initialization required.
+     *
+     *  a0 holds class object
+     */
+.LOP_NEW_INSTANCE_needinit:
+    JAL(dvmInitClass)                      #  initialize class
+    move      a0, rOBJ                     #  restore a0
+    # check boolean result
+    bnez      v0, .LOP_NEW_INSTANCE_initialized  #  success, continue
+    b         common_exceptionThrown       #  failed, deal with init exception
+
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a1 holds BBBB
+     */
+.LOP_NEW_INSTANCE_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    move      a0, v0
+    # got null?
+    bnez      v0, .LOP_NEW_INSTANCE_resolved     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+
+/* continuation for OP_NEW_ARRAY */
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  a1 holds array length
+     *  a2 holds class ref CCCC
+     */
+.LOP_NEW_ARRAY_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      rOBJ, a1                     #  rOBJ <- length (save)
+    move      a1, a2                       #  a1 <- CCCC
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a1, rOBJ                     #  a1 <- length (restore)
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a0, v0
+    b         .LOP_NEW_ARRAY_finish           #  continue with OP_NEW_ARRAY_finish
+
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+    /*
+     * On entry:
+     *  a0 holds array class
+     *  rOBJ holds AA or BA
+     */
+.LOP_FILLED_NEW_ARRAY_continue:
+    LOAD_base_offClassObject_descriptor(a3, a0) #  a3 <- arrayClass->descriptor
+    li        a2, ALLOC_DONT_TRACK         #  a2 <- alloc flags
+    lbu       rINST, 1(a3)                 #  rINST <- descriptor[1]
+    .if 0
+    move      a1, rOBJ                     #  a1 <- AA (length)
+    .else
+    srl       a1, rOBJ, 4                  #  rOBJ <- B (length)
+    .endif
+    seq       t0, rINST, 'I'               #  array of ints?
+    seq       t1, rINST, 'L'               #  array of objects?
+    or        t0, t1
+    seq       t1, rINST, '['               #  array of arrays?
+    or        t0, t1
+    move      rBIX, a1                     #  save length in rBIX
+    beqz      t0, .LOP_FILLED_NEW_ARRAY_notimpl      #  no, not handled yet
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(arClass, length, flags)
+    # null return?
+    beqz      v0, common_exceptionThrown   #  alloc failed, handle exception
+
+    FETCH(a1, 2)                           #  a1 <- FEDC or CCCC
+    sw        v0, offThread_retval(rSELF)  #  retval.l <- new array
+    sw        rINST, (offThread_retval+4)(rSELF) #  retval.h <- type
+    addu      a0, v0, offArrayObject_contents #  a0 <- newArray->contents
+    subu      rBIX, rBIX, 1                #  length--, check for neg
+    FETCH_ADVANCE_INST(3)                  #  advance to next instr, load rINST
+    bltz      rBIX, 2f                     #  was zero, bail
+
+    # copy values from registers into the array
+    # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+    move      t0, rBIX
+    .if 0
+    EAS2(a2, rFP, a1)                      #  a2 <- &fp[CCCC]
+1:
+    lw        a3, 0(a2)                    #  a3 <- *a2++
+    addu      a2, 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, (a0)                     #  *contents++ = vX
+    addu      a0, 4
+    bgez      t0, 1b
+
+    # continue at 2
+    .else
+    slt       t1, t0, 4                    #  length was initially 5?
+    and       a2, rOBJ, 15                 #  a2 <- A
+    bnez      t1, 1f                       #  <= 4 args, branch
+    GET_VREG(a3, a2)                       #  a3 <- vA
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 16(a0)                   #  contents[4] = vA
+1:
+    and       a2, a1, 15                   #  a2 <- F/E/D/C
+    GET_VREG(a3, a2)                       #  a3 <- vF/vE/vD/vC
+    srl       a1, a1, 4                    #  a1 <- next reg in low 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 0(a0)                    #  *contents++ = vX
+    addu      a0, a0, 4
+    bgez      t0, 1b
+    # continue at 2
+    .endif
+
+2:
+    lw        a0, offThread_retval(rSELF)  #  a0 <- object
+    lw        a1, (offThread_retval+4)(rSELF) #  a1 <- type
+    seq       t1, a1, 'I'                  #  Is int array?
+    bnez      t1, 3f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t3, a0, GC_CARD_SHIFT
+    addu      t2, a2, t3
+    sb        a2, (t2)
+3:
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    GOTO_OPCODE(t0)                        #  execute it
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+    la        a0, .LstrFilledNewArrayNotImpl
+    JAL(dvmThrowInternalError)
+    b         common_exceptionThrown
+
+    /*
+     * Ideally we'd only define this once, but depending on layout we can
+     * exceed the range of the load above.
+     */
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+    /*
+     * On entry:
+     *  a0 holds array class
+     *  rOBJ holds AA or BA
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+    LOAD_base_offClassObject_descriptor(a3, a0) #  a3 <- arrayClass->descriptor
+    li        a2, ALLOC_DONT_TRACK         #  a2 <- alloc flags
+    lbu       rINST, 1(a3)                 #  rINST <- descriptor[1]
+    .if 1
+    move      a1, rOBJ                     #  a1 <- AA (length)
+    .else
+    srl       a1, rOBJ, 4                  #  rOBJ <- B (length)
+    .endif
+    seq       t0, rINST, 'I'               #  array of ints?
+    seq       t1, rINST, 'L'               #  array of objects?
+    or        t0, t1
+    seq       t1, rINST, '['               #  array of arrays?
+    or        t0, t1
+    move      rBIX, a1                     #  save length in rBIX
+    beqz      t0, .LOP_FILLED_NEW_ARRAY_RANGE_notimpl      #  no, not handled yet
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(arClass, length, flags)
+    # null return?
+    beqz      v0, common_exceptionThrown   #  alloc failed, handle exception
+
+    FETCH(a1, 2)                           #  a1 <- FEDC or CCCC
+    sw        v0, offThread_retval(rSELF)  #  retval.l <- new array
+    sw        rINST, (offThread_retval+4)(rSELF) #  retval.h <- type
+    addu      a0, v0, offArrayObject_contents #  a0 <- newArray->contents
+    subu      rBIX, rBIX, 1                #  length--, check for neg
+    FETCH_ADVANCE_INST(3)                  #  advance to next instr, load rINST
+    bltz      rBIX, 2f                     #  was zero, bail
+
+    # copy values from registers into the array
+    # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+    move      t0, rBIX
+    .if 1
+    EAS2(a2, rFP, a1)                      #  a2 <- &fp[CCCC]
+1:
+    lw        a3, 0(a2)                    #  a3 <- *a2++
+    addu      a2, 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, (a0)                     #  *contents++ = vX
+    addu      a0, 4
+    bgez      t0, 1b
+
+    # continue at 2
+    .else
+    slt       t1, t0, 4                    #  length was initially 5?
+    and       a2, rOBJ, 15                 #  a2 <- A
+    bnez      t1, 1f                       #  <= 4 args, branch
+    GET_VREG(a3, a2)                       #  a3 <- vA
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 16(a0)                   #  contents[4] = vA
+1:
+    and       a2, a1, 15                   #  a2 <- F/E/D/C
+    GET_VREG(a3, a2)                       #  a3 <- vF/vE/vD/vC
+    srl       a1, a1, 4                    #  a1 <- next reg in low 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, 0(a0)                    #  *contents++ = vX
+    addu      a0, a0, 4
+    bgez      t0, 1b
+    # continue at 2
+    .endif
+
+2:
+    lw        a0, offThread_retval(rSELF)  #  a0 <- object
+    lw        a1, (offThread_retval+4)(rSELF) #  a1 <- type
+    seq       t1, a1, 'I'                  #  Is int array?
+    bnez      t1, 3f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t3, a0, GC_CARD_SHIFT
+    addu      t2, a2, t3
+    sb        a2, (t2)
+3:
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    GOTO_OPCODE(t0)                        #  execute it
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+    la        a0, .LstrFilledNewArrayNotImpl
+    JAL(dvmThrowInternalError)
+    b         common_exceptionThrown
+
+    /*
+     * Ideally we'd only define this once, but depending on layout we can
+     * exceed the range of the load above.
+     */
+
+/* continuation for OP_CMPL_FLOAT */
+
+OP_CMPL_FLOAT_nan:
+    li rTEMP, -1
+    b         OP_CMPL_FLOAT_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPL_FLOAT_continue:
+    JAL(__gtsf2)                           #  v0 <- (vBB > vCC)
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    bgtz      v0, OP_CMPL_FLOAT_finish
+    b         OP_CMPL_FLOAT_nan
+#endif
+
+OP_CMPL_FLOAT_finish:
+    GET_OPA(t0)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG(rTEMP, t0)                    #  vAA <- rTEMP
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)
+
+/* continuation for OP_CMPG_FLOAT */
+
+OP_CMPG_FLOAT_nan:
+    li rTEMP, 1
+    b         OP_CMPG_FLOAT_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPG_FLOAT_continue:
+    JAL(__gtsf2)                           #  v0 <- (vBB > vCC)
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    bgtz      v0, OP_CMPG_FLOAT_finish
+    b         OP_CMPG_FLOAT_nan
+#endif
+
+OP_CMPG_FLOAT_finish:
+    GET_OPA(t0)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    SET_VREG(rTEMP, t0)                    #  vAA <- rTEMP
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)
+
+/* continuation for OP_CMPL_DOUBLE */
+
+OP_CMPL_DOUBLE_nan:
+    li rTEMP, -1
+    b         OP_CMPL_DOUBLE_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPL_DOUBLE_continue:
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__gtdf2)                           #  fallthru
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    blez      v0, OP_CMPL_DOUBLE_nan            #  fall thru for finish
+#endif
+
+OP_CMPL_DOUBLE_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for OP_CMPG_DOUBLE */
+
+OP_CMPG_DOUBLE_nan:
+    li rTEMP, 1
+    b         OP_CMPG_DOUBLE_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPG_DOUBLE_continue:
+    LOAD64(rARG2, rARG3, rBIX)             #  a2/a3 <- vCC/vCC+1
+    JAL(__gtdf2)                           #  fallthru
+    li        rTEMP, 1                     #  rTEMP = 1 if v0 != 0
+    blez      v0, OP_CMPG_DOUBLE_nan            #  fall thru for finish
+#endif
+
+OP_CMPG_DOUBLE_finish:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
+
+/* continuation for OP_APUT_OBJECT */
+.LOP_APUT_OBJECT_checks:
+    LOAD_base_offObject_clazz(a0, rBIX)    #  a0 <- obj->clazz
+    LOAD_base_offObject_clazz(a1, rINST)   #  a1 <- arrayObj->clazz
+    JAL(dvmCanPutArrayElement)             #  test object type vs. array type
+    beqz      v0, .LOP_APUT_OBJECT_throw        #  okay ?
+    lw        a2, offThread_cardTable(rSELF)
+    srl       t1, rINST, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)
+    b         .LOP_APUT_OBJECT_finish           #  yes, skip type checks
+.LOP_APUT_OBJECT_throw:
+    LOAD_base_offObject_clazz(a0, rBIX)    #  a0 <- obj->clazz
+    LOAD_base_offObject_clazz(a1, rINST)   #  a1 <- arrayObj->clazz
+    EXPORT_PC()
+    JAL(dvmThrowArrayStoreExceptionIncompatibleElement)
+    b         common_exceptionThrown
+
+/* continuation for OP_IGET */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE */
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IGET_WIDE_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if 0
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[A]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_OBJECT_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_BOOLEAN_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_BYTE_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_CHAR_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_SHORT_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+        #  noop
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE */
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_WIDE_finish:
+    GET_OPA4(a2)                           #  a2 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[A]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if 0
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+/* continuation for OP_IPUT_OBJECT */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_OBJECT_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+        #  noop                            #  releasing store
+    sw a0, (t2)                        #  obj.field (32 bits) <- a0
+        #  noop
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_BOOLEAN_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+        #  noop
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_BYTE_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+        #  noop
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_CHAR_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+        #  noop
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_SHORT_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+        #  noop
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_SGET */
+
+.LOP_SGET_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SGET_WIDE */
+
+.LOP_SGET_WIDE_finish:
+    GET_OPA(a1)                            #  a1 <- AA
+    .if 0
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[AA]
+    STORE64(a2, a3, a1)                    #  vAA/vAA+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* continuation for OP_SGET_OBJECT */
+
+.LOP_SGET_OBJECT_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SGET_BOOLEAN */
+
+.LOP_SGET_BOOLEAN_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SGET_BYTE */
+
+.LOP_SGET_BYTE_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SGET_CHAR */
+
+.LOP_SGET_CHAR_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SGET_SHORT */
+
+.LOP_SGET_SHORT_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SPUT */
+
+.LOP_SPUT_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_WIDE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rOBJ:  &fp[AA]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.LOP_SPUT_WIDE_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .LOP_SPUT_WIDE_finish           # resume
+
+/* continuation for OP_SPUT_OBJECT */
+.LOP_SPUT_OBJECT_finish:                        #  field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+.LOP_SPUT_BOOLEAN_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_BYTE */
+
+.LOP_SPUT_BYTE_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_CHAR */
+
+.LOP_SPUT_CHAR_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_SHORT */
+
+.LOP_SPUT_SHORT_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    #  no-op                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    #  no-op
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.LOP_INVOKE_VIRTUAL_continue:
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a3, rOBJ)    #  a3 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a3, a3) #  a3 <- thisPtr->clazz->vtable
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodNoRange #  (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX = method->clazz
+     */
+.LOP_INVOKE_SUPER_continue:
+    LOAD_base_offClassObject_super(a1, rBIX) #  a1 <- method->clazz->super
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    LOAD_base_offClassObject_vtableCount(a3, a1) #  a3 <- super->vtableCount
+    EXPORT_PC()                            #  must export for invoke
+    # compare (methodIndex, vtableCount)
+    bgeu      a2, a3, .LOP_INVOKE_SUPER_nsm      #  method not present in superclass
+    LOAD_base_offClassObject_vtable(a1, a1) #  a1 <- ...clazz->super->vtable
+    LOAD_eas2(a0, a1, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodNoRange #  continue on
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  a0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_nsm:
+    LOAD_base_offMethod_name(a1, a0)       #  a1 <- method name
+    b         common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC */
+
+.LOP_INVOKE_STATIC_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_STATIC            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we're actively building a trace.  If so,
+     * we need to keep this instruction out of it.
+     * rBIX: &resolved_methodToCall
+     */
+    lhu       a2, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  null, handle exception
+    and       a2, kSubModeJitTraceBuild    #  trace under construction?
+    beqz      a2, common_invokeMethodNoRange #  no, (a0=method, rOBJ="this")
+    lw        a1, 0(rBIX)                  #  reload resolved method
+    # finished resloving?
+    bnez      a1, common_invokeMethodNoRange #  yes, (a0=method, rOBJ="this")
+    move      rBIX, a0                     #  preserve method
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    move      a0, rBIX
+    b         common_invokeMethodNoRange #  whew, finally!
+#else
+    # got null?
+    bnez      v0, common_invokeMethodNoRange #  (a0=method, rOBJ="this")
+    b         common_exceptionThrown       #  yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a3, rOBJ)    #  a3 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a3, a3) #  a3 <- thisPtr->clazz->vtable
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodRange #  (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX = method->clazz
+     */
+.LOP_INVOKE_SUPER_RANGE_continue:
+    LOAD_base_offClassObject_super(a1, rBIX) #  a1 <- method->clazz->super
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    LOAD_base_offClassObject_vtableCount(a3, a1) #  a3 <- super->vtableCount
+    EXPORT_PC()                            #  must export for invoke
+    # compare (methodIndex, vtableCount)
+    bgeu      a2, a3, .LOP_INVOKE_SUPER_RANGE_nsm      #  method not present in superclass
+    LOAD_base_offClassObject_vtable(a1, a1) #  a1 <- ...clazz->super->vtable
+    LOAD_eas2(a0, a1, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodRange #  continue on
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  a0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+    LOAD_base_offMethod_name(a1, a0)       #  a1 <- method name
+    b         common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC_RANGE */
+
+.LOP_INVOKE_STATIC_RANGE_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_STATIC            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we're actively building a trace.  If so,
+     * we need to keep this instruction out of it.
+     * rBIX: &resolved_methodToCall
+     */
+    lhu       a2, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  null, handle exception
+    and       a2, kSubModeJitTraceBuild    #  trace under construction?
+    beqz      a2, common_invokeMethodRange #  no, (a0=method, rOBJ="this")
+    lw        a1, 0(rBIX)                  #  reload resolved method
+    # finished resloving?
+    bnez      a1, common_invokeMethodRange #  yes, (a0=method, rOBJ="this")
+    move      rBIX, a0                     #  preserve method
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    move      a0, rBIX
+    b         common_invokeMethodRange #  whew, finally!
+#else
+    # got null?
+    bnez      v0, common_invokeMethodRange #  (a0=method, rOBJ="this")
+    b         common_exceptionThrown       #  yes, handle exception
+#endif
+
+/* continuation for OP_FLOAT_TO_INT */
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+    li        a1, 0x4f000000               #  (float)maxint
+    move      rBIX, a0
+    JAL(__gesf2)                           #  is arg >= maxint?
+    move      t0, v0
+    li        v0, ~0x80000000              #  return maxint (7fffffff)
+    bgez      t0, .LOP_FLOAT_TO_INT_set_vreg
+
+    move      a0, rBIX                     #  recover arg
+    li        a1, 0xcf000000               #  (float)minint
+    JAL(__lesf2)
+
+    move      t0, v0
+    li        v0, 0x80000000               #  return minint (80000000)
+    blez      t0, .LOP_FLOAT_TO_INT_set_vreg
+    move      a0, rBIX
+    move      a1, rBIX
+    JAL(__nesf2)
+
+    move      t0, v0
+    li        v0, 0                        #  return zero for NaN
+    bnez      t0, .LOP_FLOAT_TO_INT_set_vreg
+
+    move      a0, rBIX
+    JAL(__fixsfsi)
+    b         .LOP_FLOAT_TO_INT_set_vreg
+#else
+    l.s       fa1, .LFLOAT_TO_INT_max
+    c.ole.s   fcc0, fa1, fa0
+    l.s       fv0, .LFLOAT_TO_INT_ret_max
+    bc1t      .LOP_FLOAT_TO_INT_set_vreg_f
+
+    l.s       fa1, .LFLOAT_TO_INT_min
+    c.ole.s   fcc0, fa0, fa1
+    l.s       fv0, .LFLOAT_TO_INT_ret_min
+    bc1t      .LOP_FLOAT_TO_INT_set_vreg_f
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .LOP_FLOAT_TO_INT_set_vreg_f
+
+    trunc.w.s  fv0, fa0
+    b         .LOP_FLOAT_TO_INT_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+    .word 0x4f000000
+.LFLOAT_TO_INT_min:
+    .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+    .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+    .word 0x80000000
+
+
+/* continuation for OP_FLOAT_TO_LONG */
+
+f2l_doconv:
+#ifdef SOFT_FLOAT
+    li        a1, 0x5f000000
+    move      rBIX, a0
+    JAL(__gesf2)
+
+    move      t0, v0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bgez      t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+    move      a0, rBIX
+    li        a1, 0xdf000000
+    JAL(__lesf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    blez      t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+    move      a0, rBIX
+    move      a1, rBIX
+    JAL(__nesf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bnez      t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+    move      a0, rBIX
+    JAL(__fixsfdi)
+
+#else
+    l.s       fa1, .LLONG_TO_max
+    c.ole.s   fcc0, fa1, fa0
+    li        rRESULT0, ~0
+    li        rRESULT1, ~0x80000000
+    bc1t      .LOP_FLOAT_TO_LONG_set_vreg
+
+    l.s       fa1, .LLONG_TO_min
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0x80000000
+    bc1t      .LOP_FLOAT_TO_LONG_set_vreg
+
+    mov.s     fa1, fa0
+    c.un.s    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .LOP_FLOAT_TO_LONG_set_vreg
+
+    JAL(__fixsfdi)
+#endif
+
+    b         .LOP_FLOAT_TO_LONG_set_vreg
+
+.LLONG_TO_max:
+    .word 0x5f000000
+
+.LLONG_TO_min:
+    .word 0xdf000000
+
+/* continuation for OP_DOUBLE_TO_INT */
+
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64(rARG2, rARG3, t0)
+    move      rBIX, rARG0                  #  save a0
+    move      rTEMP, rARG1                 #  and a1
+    JAL(__gedf2)                           #  is arg >= maxint?
+
+    move      t0, v0
+    li        v0, ~0x80000000              #  return maxint (7fffffff)
+    bgez      t0, .LOP_DOUBLE_TO_INT_set_vreg     #  nonzero == yes
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)                           #  is arg <= minint?
+
+    move      t0, v0
+    li        v0, 0x80000000               #  return minint (80000000)
+    blez      t0, .LOP_DOUBLE_TO_INT_set_vreg     #  nonzero == yes
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    move      rARG2, rBIX                  #  compare against self
+    move      rARG3, rTEMP
+    JAL(__nedf2)                           #  is arg == self?
+
+    move      t0, v0                       #  zero == no
+    li        v0, 0
+    bnez      t0, .LOP_DOUBLE_TO_INT_set_vreg     #  return zero for NaN
+
+    move      rARG0, rBIX                  #  recover arg
+    move      rARG1, rTEMP
+    JAL(__fixdfsi)                         #  convert double to int
+    b         .LOP_DOUBLE_TO_INT_set_vreg
+#else
+    la        t0, .LDOUBLE_TO_INT_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    l.s       fv0, .LDOUBLE_TO_INT_maxret
+    bc1t      .LOP_DOUBLE_TO_INT_set_vreg_f
+
+    la        t0, .LDOUBLE_TO_INT_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    l.s       fv0, .LDOUBLE_TO_INT_minret
+    bc1t      .LOP_DOUBLE_TO_INT_set_vreg_f
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li.s      fv0, 0
+    bc1t      .LOP_DOUBLE_TO_INT_set_vreg_f
+
+    trunc.w.d  fv0, fa0
+    b         .LOP_DOUBLE_TO_INT_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+    .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+    .dword 0xc1e0000000000000              #  minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+    .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+    .word 0x80000000
+
+/* continuation for OP_DOUBLE_TO_LONG */
+
+d2l_doconv:
+#ifdef SOFT_FLOAT
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64(rARG2, rARG3, t0)
+    move      rBIX, rARG0                  #  save a0
+    move      rTEMP, rARG1                 #  and a1
+    JAL(__gedf2)
+
+    move      t1, v0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bgez      t1, .LOP_DOUBLE_TO_LONG_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64(rARG2, rARG3, t0)
+    JAL(__ledf2)
+
+    move      t1, v0
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    blez      t1, .LOP_DOUBLE_TO_LONG_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    move      rARG2, rBIX
+    move      rARG3, rTEMP
+    JAL(__nedf2)
+
+    move      t0, v0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bnez      t0, .LOP_DOUBLE_TO_LONG_set_vreg
+
+    move      rARG0, rBIX
+    move      rARG1, rTEMP
+    JAL(__fixdfdi)
+
+#else
+    la        t0, .LDOUBLE_TO_LONG_max
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa1, fa0
+    la        t0, .LDOUBLE_TO_LONG_ret_max
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .LOP_DOUBLE_TO_LONG_set_vreg
+
+    la        t0, .LDOUBLE_TO_LONG_min
+    LOAD64_F(fa1, fa1f, t0)
+    c.ole.d   fcc0, fa0, fa1
+    la        t0, .LDOUBLE_TO_LONG_ret_min
+    LOAD64(rRESULT0, rRESULT1, t0)
+    bc1t      .LOP_DOUBLE_TO_LONG_set_vreg
+
+    mov.d     fa1, fa0
+    c.un.d    fcc0, fa0, fa1
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1t      .LOP_DOUBLE_TO_LONG_set_vreg
+    JAL(__fixdfdi)
+#endif
+    b         .LOP_DOUBLE_TO_LONG_set_vreg
+
+
+.LDOUBLE_TO_LONG_max:
+    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+    .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+    .dword 0x8000000000000000
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(v0, v1, a0)                    #  vAA::vAA+1 <- v0(low) :: v1(high)
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_VOLATILE */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_VOLATILE_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    SMP_DMB                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_VOLATILE */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_VOLATILE_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+    SMP_DMB_ST                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    SMP_DMB
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_SGET_VOLATILE */
+
+.LOP_SGET_VOLATILE_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    SMP_DMB                               #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SPUT_VOLATILE */
+
+.LOP_SPUT_VOLATILE_finish:
+    # field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SMP_DMB_ST                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    SMP_DMB
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE */
+
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_OBJECT_VOLATILE_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    SMP_DMB                               #  acquiring load
+    GET_OPA4(a2)                           #  a2 <- A+
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(a0, a2)                       #  fp[A] <- a0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_VOLATILE */
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if 1
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[A]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[A] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_VOLATILE */
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_finish:
+    GET_OPA4(a2)                           #  a2 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[A]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[A]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if 1
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+/* continuation for OP_SGET_WIDE_VOLATILE */
+
+.LOP_SGET_WIDE_VOLATILE_finish:
+    GET_OPA(a1)                            #  a1 <- AA
+    .if 1
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[AA]
+    STORE64(a2, a3, a1)                    #  vAA/vAA+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+
+/* continuation for OP_SPUT_WIDE_VOLATILE */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  BBBB field ref
+     *  rOBJ:  &fp[AA]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .LOP_SPUT_WIDE_VOLATILE_finish           # resume
+
+/* continuation for OP_EXECUTE_INLINE */
+
+    /*
+     * Extract args, call function.
+     *  a0 = #of args (0-4)
+     *  rBIX = call index
+     *
+     * Other ideas:
+     * - Use a jump table from the main piece to jump directly into the
+     *   AND/LW pairs.  Costs a data load, saves a branch.
+     * - Have five separate pieces that do the loading, so we can work the
+     *   interleave a little better.  Increases code size.
+     */
+.LOP_EXECUTE_INLINE_continue:
+    FETCH(rINST, 2)                        #  rINST <- FEDC
+    beq       a0, 0, 0f
+    beq       a0, 1, 1f
+    beq       a0, 2, 2f
+    beq       a0, 3, 3f
+    beq       a0, 4, 4f
+    JAL(common_abort)                      #  too many arguments
+
+4:
+    and       t0, rINST, 0xf000            #  isolate F
+    ESRN(t1, rFP, t0, 10)
+    lw        a3, 0(t1)                    #  a3 <- vF (shift right 12, left 2)
+3:
+    and       t0, rINST, 0x0f00            #  isolate E
+    ESRN(t1, rFP, t0, 6)
+    lw        a2, 0(t1)                    #  a2 <- vE
+2:
+    and       t0, rINST, 0x00f0            #  isolate D
+    ESRN(t1, rFP, t0, 2)
+    lw        a1, 0(t1)                    #  a1 <- vD
+1:
+    and       t0, rINST, 0x000f            #  isolate C
+    EASN(t1, rFP, t0, 2)
+    lw        a0, 0(t1)                    #  a0 <- vC
+0:
+    la        rINST, gDvmInlineOpsTable    #  table of InlineOperation
+    EAS4(t1, rINST, rBIX)                  #  t1 <- rINST + rBIX<<4
+    lw        t9, 0(t1)
+    jr        t9                           #  sizeof=16, "func" is first entry
+    # (not reached)
+
+    /*
+     * We're debugging or profiling.
+     * rBIX: opIndex
+     */
+.LOP_EXECUTE_INLINE_debugmode:
+    move      a0, rBIX
+    JAL(dvmResolveInlineNative)
+    beqz      v0, .LOP_EXECUTE_INLINE_resume       #  did it resolve? no, just move on
+    move      rOBJ, v0                     #  remember method
+    move      a0, v0
+    move      a1, rSELF
+    JAL(dvmFastMethodTraceEnter)           #  (method, self)
+    addu      a1, rSELF, offThread_retval  #  a1<- &self->retval
+    GET_OPB(a0)                            #  a0 <- B
+    # Stack should have 16/20 available
+    sw        a1, 16(sp)                   #  push &self->retval
+    BAL(.LOP_EXECUTE_INLINE_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    move      rINST, v0                    #  save result of inline
+    move      a0, rOBJ                     #  a0<- method
+    move      a1, rSELF                    #  a1<- self
+    JAL(dvmFastMethodTraceExit)            #  (method, self)
+    beqz      v0, common_exceptionThrown   #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_EXECUTE_INLINE_RANGE */
+
+    /*
+     * Extract args, call function.
+     *  a0 = #of args (0-4)
+     *  rBIX = call index
+     *  ra = return addr, above  [DO NOT JAL out of here w/o preserving ra]
+     */
+.LOP_EXECUTE_INLINE_RANGE_continue:
+    FETCH(rOBJ, 2)                       # rOBJ <- CCCC
+    beq       a0, 0, 0f
+    beq       a0, 1, 1f
+    beq       a0, 2, 2f
+    beq       a0, 3, 3f
+    beq       a0, 4, 4f
+    JAL(common_abort)                      #  too many arguments
+
+4:
+    add       t0, rOBJ, 3
+    GET_VREG(a3, t0)
+3:
+    add       t0, rOBJ, 2
+    GET_VREG(a2, t0)
+2:
+    add       t0, rOBJ, 1
+    GET_VREG(a1, t0)
+1:
+    GET_VREG(a0, rOBJ)
+0:
+    la        rOBJ, gDvmInlineOpsTable      # table of InlineOperation
+    EAS4(t1, rOBJ, rBIX)                    # t1 <- rINST + rBIX<<4
+    lw        t9, 0(t1)
+    jr        t9                            # sizeof=16, "func" is first entry
+    # not reached
+
+    /*
+     * We're debugging or profiling.
+     * rBIX: opIndex
+     */
+.LOP_EXECUTE_INLINE_RANGE_debugmode:
+    move      a0, rBIX
+    JAL(dvmResolveInlineNative)
+    beqz      v0, .LOP_EXECUTE_INLINE_RANGE_resume       #  did it resolve? no, just move on
+    move      rOBJ, v0                     #  remember method
+    move      a0, v0
+    move      a1, rSELF
+    JAL(dvmFastMethodTraceEnter)           #  (method, self)
+    addu      a1, rSELF, offThread_retval  #  a1<- &self->retval
+    GET_OPA(a0)                            #  a0 <- A
+    # Stack should have 16/20 available
+    sw        a1, 16(sp)                   #  push &self->retval
+    move      rINST, rOBJ                  #  rINST<- method
+    BAL(.LOP_EXECUTE_INLINE_RANGE_continue)              #  make call; will return after
+    lw        gp, STACK_OFFSET_GP(sp)      #  restore gp
+    move      rOBJ, v0                     #  save result of inline
+    move      a0, rINST                    #  a0<- method
+    move      a1, rSELF                    #  a1<- self
+    JAL(dvmFastNativeMethodTraceExit)      #  (method, self)
+    beqz      rOBJ, common_exceptionThrown #  returned false, handle exception
+    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_INVOKE_OBJECT_INIT_RANGE */
+    /*
+     * A debugger is attached, so we need to go ahead and do
+     * this.  For simplicity, we'll just jump directly to the
+     * corresponding handler.  Note that we can't use
+     * rIBASE here because it may be in single-step mode.
+     * Load the primary table base directly.
+     */
+.LOP_INVOKE_OBJECT_INIT_RANGE_debugger:
+    lw      a1, offThread_mainHandlerTable(rSELF)
+    .if 0
+    li      t0, OP_INVOKE_DIRECT_JUMBO
+    .else
+    li      t0, OP_INVOKE_DIRECT_RANGE
+    .endif
+    GOTO_OPCODE_BASE(a1, t0)            # execute it
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_finish:
+    #BAL(common_squeak0)
+    GET_OPA4(a1)                           #  a1 <- A+
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    GET_VREG(a0, a1)                       #  a0 <- fp[A]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+    SMP_DMB_ST                            #  releasing store
+    sw a0, (t2)                        #  obj.field (32 bits) <- a0
+    SMP_DMB
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_SGET_OBJECT_VOLATILE */
+
+.LOP_SGET_OBJECT_VOLATILE_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    SMP_DMB                               #  acquiring load
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[AA] <- a1
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE */
+.LOP_SPUT_OBJECT_VOLATILE_finish:                        #  field ptr in a0
+    GET_OPA(a2)                            #  a2 <- AA
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[AA]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SMP_DMB_ST                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vAA
+    SMP_DMB
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+
+.LOP_CHECK_CAST_JUMBO_castfailure:
+    # A cast has failed.  We need to throw a ClassCastException with the
+    # class of the object that failed to be cast.
+    EXPORT_PC()                                 # about to throw
+    LOAD_base_offObject_clazz(a0, rOBJ)         # a0<- obj->clazz
+    move      a1,rBIX                      #  r1<- desired class
+    JAL(dvmThrowClassCastException)
+    b       common_exceptionThrown
+
+    /*
+     * Advance PC and get next opcode
+     *
+     */
+.LOP_CHECK_CAST_JUMBO_okay:
+    FETCH_ADVANCE_INST(4)                       # advance rPC, load rINST
+    GET_INST_OPCODE(t0)                         # extract opcode from rINST
+    GOTO_OPCODE(t0)                             # jump to next instruction
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a2   holds AAAAAAAA
+     *  rOBJ holds object
+     */
+.LOP_CHECK_CAST_JUMBO_resolve:
+    EXPORT_PC()                                 # resolve() could throw
+    LOAD_rSELF_method(a3)           # a3<- self->method
+    move     a1, a2                             # a1<- AAAAAAAA
+    li       a2, 0                              # a2<- false
+    LOAD_base_offMethod_clazz(a0, a3)   # a0<- method->clazz
+    JAL(dvmResolveClass)    # v0<- resolved ClassObject ptr
+                                                # got null?
+    beqz     v0, common_exceptionThrown         # yes, handle exception
+    move     a1, v0                             # a1<- class resolved from AAAAAAAA
+    LOAD_base_offObject_clazz(a0, rOBJ)   # a0<- obj->clazz
+    b       .LOP_CHECK_CAST_JUMBO_resolved                # pick up where we left off
+
+
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+     /*
+     * Class resolved, determine type of check necessary.  This is common.
+     *  r0 holds obj->clazz
+     *  r1 holds class resolved from AAAAAAAA
+     *  r9 holds BBBB
+     */
+
+.LOP_INSTANCE_OF_JUMBO_resolved:                   #  a0=obj->clazz, a1=resolved class
+    # same class (trivial success)?
+    beq       a0, a1, .LOP_INSTANCE_OF_JUMBO_trivial  #  yes, trivial finish
+    # fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+    /*
+     * Trivial test failed, need to perform full check.  This is common.
+     *  a0 holds obj->clazz
+     *  a1 holds class resolved from AAAAAAAA
+     *  rOBJ holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+    JAL(dvmInstanceofNonTrivial)           #  v0 <- boolean result
+    move      a0, v0
+    b         .LOP_INSTANCE_OF_JUMBO_store            #  go to OP_INSTANCE_OF_JUMBO_store
+
+.LOP_INSTANCE_OF_JUMBO_trivial:
+    li        a0, 1                        #  indicate success
+    # fall thru
+    /*
+     * a0   holds boolean result
+     * rOBJ holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_store:
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, rOBJ)                     #  vBBBB <- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a3   holds AAAAAAAA
+     *  rOBJ holds BBBB
+     */
+.LOP_INSTANCE_OF_JUMBO_resolve:
+    EXPORT_PC()                            #  resolve() could throw
+    LOAD_rSELF_method(a0)                  #  a0 <- self->method
+    move      a1, a3                       #  a1 <- AAAAAAAA
+    li        a2, 1                        #  a2 <- true
+    LOAD_base_offMethod_clazz(a0, a0)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    # got null?
+    move      a1, v0                       #  a1 <- class resolved from BBB
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    FETCH(ra, 4)                           #  a3<- vCCCC
+    move       a1, a0                       #  a1<- class resolved from AAAAAAAA
+
+    GET_VREG(a0, a3)                       #  a0 <- vCCCC (object)
+    LOAD_base_offObject_clazz(a0, a0)      #  a0 <- obj->clazz
+    b         .LOP_INSTANCE_OF_JUMBO_resolved         #  pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+.LOP_NEW_INSTANCE_JUMBO_continue:
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a3)                       #  vBBBB <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we need to stop the trace building early.
+     * v0: new object
+     * a3: vAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_jitCheck:
+    lw        a1, 0(rBIX)                  #  reload resolved class
+    # okay?
+    bnez      a1, .LOP_NEW_INSTANCE_JUMBO_continue     #  yes, finish
+    move      rOBJ, v0                     #  preserve new object
+    move      rBIX, a3                     #  preserve vAA
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(rOBJ, rBIX)                   #  vAA <- new object
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#endif
+
+    /*
+     * Class initialization required.
+     *
+     *  a0 holds class object
+     */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+    JAL(dvmInitClass)                      #  initialize class
+    move      a0, rOBJ                     #  restore a0
+    # check boolean result
+    bnez      v0, .LOP_NEW_INSTANCE_JUMBO_initialized  #  success, continue
+    b         common_exceptionThrown       #  failed, deal with init exception
+
+
+    /*
+     * Resolution required.  This is the least-likely path.
+     *
+     *  a1 holds AAAAAAAA
+     */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- resolved ClassObject ptr
+    move      a0, v0
+    # got null?
+    bnez      v0, .LOP_NEW_INSTANCE_JUMBO_resolved     #  no, continue
+    b         common_exceptionThrown       #  yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+    /*
+     * Finish allocation.
+     *
+     *  a0 holds class
+     *  a1 holds array length
+     */
+.LOP_NEW_ARRAY_JUMBO_finish:
+    li        a2, ALLOC_DONT_TRACK         #  don't track in local refs table
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(clazz, length, flags)
+    FETCH(a2, 3)                           # r2<- vBBBB
+    # failed?
+    beqz      v0, common_exceptionThrown   #  yes, handle the exception
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG(v0, a2)                       #  vBBBB <- v0
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+#%break
+
+
+
+    /*
+     * Resolve class.  (This is an uncommon case.)
+     *
+     *  a1 holds array length
+     *  a2 holds class ref AAAAAAAA
+     */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    move      rOBJ, a1                     #  rOBJ <- length (save)
+    move      a1, a2                       #  a1 <- AAAAAAAA
+    li        a2, 0                        #  a2 <- false
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    JAL(dvmResolveClass)                   #  v0 <- call(clazz, ref)
+    move      a1, rOBJ                     #  a1 <- length (restore)
+    # got null?
+    beqz      v0, common_exceptionThrown   #  yes, handle exception
+    move      a0, v0
+    b         .LOP_NEW_ARRAY_JUMBO_finish           #  continue with to OP_NEW_ARRAY_JUMBO_finish
+
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+    /*
+     * On entry:
+     *  a0 holds array class
+     *  rOBJ holds AA or BA
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+    LOAD_base_offClassObject_descriptor(a3, a0) #  a3 <- arrayClass->descriptor
+    li        a2, ALLOC_DONT_TRACK         #  a2 <- alloc flags
+    lbu       rINST, 1(a3)                 #  rINST <- descriptor[1]
+    FETCH(a1, 3)                           # a1<- BBBB (length)
+    seq       t0, rINST, 'I'               #  array of ints?
+    seq       t1, rINST, 'L'               #  array of objects?
+    or        t0, t1
+    seq       t1, rINST, '['               #  array of arrays?
+    or        t0, t1
+    move      rBIX, a1                     #  save length in rBIX
+    beqz      t0, .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl      #  no, not handled yet
+    JAL(dvmAllocArrayByClass)              #  v0 <- call(arClass, length, flags)
+    # null return?
+    beqz      v0, common_exceptionThrown   #  alloc failed, handle exception
+
+    FETCH(a1, 4)                           #  a1 CCCC
+    sw        v0, offThread_retval(rSELF)  #  retval.l <- new array
+    sw        rINST, (offThread_retval+4)(rSELF) #  retval.h <- type
+    addu      a0, v0, offArrayObject_contents #  a0 <- newArray->contents
+    subu      rBIX, rBIX, 1                #  length--, check for neg
+    FETCH_ADVANCE_INST(5)                  #  advance to next instr, load rINST
+    bltz      rBIX, 2f                     #  was zero, bail
+
+    # copy values from registers into the array
+    # a0=array, a1=CCCC, t0=BBBB(length)
+    move      t0, rBIX
+    EAS2(a2, rFP, a1)                      #  a2 <- &fp[CCCC]
+1:
+    lw        a3, 0(a2)                    #  a3 <- *a2++
+    addu      a2, 4
+    subu      t0, t0, 1                    #  count--
+    sw        a3, (a0)                     #  *contents++ = vX
+    addu      a0, 4
+    bgez      t0, 1b
+
+2:
+    lw        a0, offThread_retval(rSELF)  #  a0 <- object
+    lw        a1, (offThread_retval+4)(rSELF) #  a1 <- type
+    seq       t1, a1, 'I'                  #  Is int array?
+    bnez      t1, 3f
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    srl       t3, a0, GC_CARD_SHIFT
+    addu      t2, a2, t3
+    sb        a2, (t2)
+3:
+    GET_INST_OPCODE(t0)                    #  ip <- opcode from rINST
+    GOTO_OPCODE(t0)                        #  execute it
+
+
+    /*
+     * Throw an exception indicating that we have not implemented this
+     * mode of filled-new-array.
+     */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+    la        a0, .LstrFilledNewArrayNotImpl
+    JAL(dvmThrowInternalError)
+    b         common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+.LOP_IGET_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+.LOP_IGET_WIDE_JUMBO_resolved:
+    # test return code
+    move      a0, v0
+    bnez      v0, .LOP_IGET_WIDE_JUMBO_finish
+    b         common_exceptionThrown
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IGET_WIDE_JUMBO_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if 0
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[BBBB] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+.LOP_IGET_OBJECT_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_OBJECT_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+.LOP_IGET_BYTE_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_BYTE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+.LOP_IGET_CHAR_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_CHAR_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+.LOP_IGET_SHORT_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_SHORT_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+         # noop                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_JUMBO */
+
+.LOP_IPUT_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    # noop 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+.LOP_IPUT_WIDE_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_WIDE_JUMBO_finish
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_WIDE_JUMBO_finish:
+    FETCH(a2, 3)                           # a1<- BBBB
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[BBBB]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if 0
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+.LOP_IPUT_OBJECT_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                              # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+        #  noop                            #  releasing store
+    sw a0, (t2)                        #  obj.field (32 bits) <- a0
+        #  noop
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    # noop 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+.LOP_IPUT_BYTE_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_BYTE_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_BYTE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    # noop 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+.LOP_IPUT_CHAR_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_CHAR_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_CHAR_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    # noop 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+.LOP_IPUT_SHORT_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_SHORT_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_SHORT_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+        #  noop                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    # noop 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_SGET_JUMBO */
+
+.LOP_SGET_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+.LOP_SGET_WIDE_JUMBO_finish:
+    FETCH(a1, 3)                           # a1<- BBBB
+    .if 0
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[BBBB]
+    STORE64(a2, a3, a1)                    #  vBBBB/vBBBB+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+.LOP_SGET_OBJECT_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+.LOP_SGET_BOOLEAN_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+.LOP_SGET_BYTE_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+.LOP_SGET_CHAR_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+.LOP_SGET_SHORT_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+                      #  no-op                                #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SPUT_JUMBO */
+
+.LOP_SPUT_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rOBJ:  &fp[BBBB]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .LOP_SPUT_WIDE_JUMBO_finish           # resume
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+.LOP_SPUT_OBJECT_JUMBO_finish:                     #  field ptr in a0
+    FETCH(a2, 3)                        # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+    1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+.LOP_SPUT_BOOLEAN_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+.LOP_SPUT_BYTE_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+.LOP_SPUT_CHAR_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+.LOP_SPUT_SHORT_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+          #  no-op                             #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+          #  no-op 
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+     */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+    FETCH(rBIX,4)                          #  rBIX <- CCCC
+    GET_VREG(rOBJ, rBIX)                   #  rOBJ <- "this" ptr
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    # is "this" null?
+    beqz      rOBJ, common_errNullObject   #  null "this", throw exception
+    LOAD_base_offObject_clazz(a3, rOBJ)    #  a3 <- thisPtr->clazz
+    LOAD_base_offClassObject_vtable(a3, a3) #  a3 <- thisPtr->clazz->vtable
+    LOAD_eas2(a0, a3, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodJumbo     #  (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+    /*
+     * At this point:
+     *  a0 = resolved base method
+     *  rBIX = method->clazz
+     */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+    LOAD_base_offClassObject_super(a1, rBIX) #  a1 <- method->clazz->super
+    LOADu2_offMethod_methodIndex(a2, a0)   #  a2 <- baseMethod->methodIndex
+    LOAD_base_offClassObject_vtableCount(a3, a1) #  a3 <- super->vtableCount
+    EXPORT_PC()                            #  must export for invoke
+    # compare (methodIndex, vtableCount)
+    bgeu      a2, a3, .LOP_INVOKE_SUPER_JUMBO_nsm      #  method not present in superclass
+    LOAD_base_offClassObject_vtable(a1, a1) #  a1 <- ...clazz->super->vtable
+    LOAD_eas2(a0, a1, a2)                  #  a0 <- vtable[methodIndex]
+    b         common_invokeMethodJumbo     #  a0=method rOBJ="this"
+
+    /*
+     * Throw a NoSuchMethodError with the method name as the message.
+     *  a0 = resolved base method
+     */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+    LOAD_base_offMethod_name(a1, a0)       #  a1 <- method name
+    b         common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC_JUMBO */
+
+.LOP_INVOKE_STATIC_JUMBO_resolve:
+    LOAD_rSELF_method(a3)                  #  a3 <- self->method
+    LOAD_base_offMethod_clazz(a0, a3)      #  a0 <- method->clazz
+    li        a2, METHOD_STATIC            #  resolver method type
+    JAL(dvmResolveMethod)                  #  v0 <- call(clazz, ref, flags)
+    move      a0, v0
+#if defined(WITH_JIT)
+    /*
+     * Check to see if we're actively building a trace.  If so,
+     * we need to keep this instruction out of it.
+     * rBIX: &resolved_methodToCall
+     */
+    lhu        a2, offThread_subMode(rSELF)
+    beqz      v0, common_exceptionThrown   #  null, handle exception
+    and       a2, kSubModeJitTraceBuild    #  trace under construction?
+    beqz      a2, common_invokeMethodJumboNoThis #  no, (a0=method, rOBJ="this")
+    lw        a1, 0(rBIX)                  #  reload resolved method
+    # finished resloving?
+    bnez      a1, common_invokeMethodJumboNoThis #  yes, (a0=method, rOBJ="this")
+    move      rBIX, a0                     #  preserve method
+    move      a0, rSELF
+    move      a1, rPC
+    JAL(dvmJitEndTraceSelect)              #  (self, pc)
+    move      a0, rBIX
+    b         common_invokeMethodJumboNoThis #  whew, finally!
+#else
+    # got null?
+    bnez      v0, common_invokeMethodJumboNoThis #  (a0=method, rOBJ="this")
+    b         common_exceptionThrown       #  yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_OBJECT_INIT_JUMBO */
+    /*
+     * A debugger is attached, so we need to go ahead and do
+     * this.  For simplicity, we'll just jump directly to the
+     * corresponding handler.  Note that we can't use
+     * rIBASE here because it may be in single-step mode.
+     * Load the primary table base directly.
+     */
+.LOP_INVOKE_OBJECT_INIT_JUMBO_debugger:
+    lw      a1, offThread_mainHandlerTable(rSELF)
+    .if 1
+    li      t0, OP_INVOKE_DIRECT_JUMBO
+    .else
+    li      t0, OP_INVOKE_DIRECT_RANGE
+    .endif
+    GOTO_OPCODE_BASE(a1, t0)            # execute it
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    SMP_DMB                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+    # test return code
+    move      a0, v0
+    bnez      v0, .LOP_IGET_WIDE_VOLATILE_JUMBO_finish
+    b         common_exceptionThrown
+
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    beqz      rOBJ, common_errNullObject   #  object was null
+    GET_OPA4(a2)                           #  a2 <- A+
+    addu      rOBJ, rOBJ, a3               #  form address
+    .if 1
+    vLOAD64(a0, a1, rOBJ)                  #  a0/a1 <- obj.field (64-bit align ok)
+    .else
+    LOAD64(a0, a1, rOBJ)                   #  a0/a1 <- obj.field (64-bit align ok)
+    .endif
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    EAS2(a3, rFP, a2)                      #  a3 <- &fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    STORE64(a0, a1, a3)                    #  fp[BBBB] <- a0/a1
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+     # test results
+    move      a0, v0
+    beqz      a0,common_exceptionThrown
+    /*
+     * Currently:
+     *  v0 holds resolved field
+     *  rOBJ holds object (caller saved)
+     */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    addu      a3, a3, rOBJ                 #  form address
+    lw a0, (a3)                         #  a0 <- obj.field (8/16/32 bits)
+    SMP_DMB                               #  acquiring load
+    FETCH(a2, 3)                           #  a2<- BBBB
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    SET_VREG(a0, a2)                       #  fp[BBBB]<- a0
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                           # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      rOBJ, rOBJ, a3               #  form address
+    SMP_DMB_ST                            #  releasing store
+    sw a0, (rOBJ)                      #  obj.field (8/16/32 bits) <- a0
+    SMP_DMB
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+    /*
+     * Currently:
+     *  a0   holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(a2, 3)                           # a1<- BBBB
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BBBB]
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    LOAD64(a0, a1, a2)                     #  a0/a1 <- fp[BBBB]
+    GET_INST_OPCODE(rBIX)                  #  extract opcode from rINST
+    addu      a2, rOBJ, a3                 #  form address
+    .if 1
+    JAL(dvmQuasiAtomicSwap64Sync)          # stores r0/r1 into addr r2
+#    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .else
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0 a1
+    .endif
+    GOTO_OPCODE(rBIX)                      #  jump to next instruction
+
+
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+    /*
+     * Currently:
+     *  a0 holds resolved field
+     *  rOBJ holds object
+     */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+    move      a0, v0
+    beqz      a0, common_exceptionThrown
+    # fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+    #BAL(common_squeak0)
+    LOAD_base_offInstField_byteOffset(a3, a0) #  a3 <- byte offset of field
+    FETCH(a1, 3)                              # a1<- BBBB
+    GET_VREG(a0, a1)                       #  a0 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    # check object for null
+    beqz      rOBJ, common_errNullObject   #  object was null
+    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    addu      t2, rOBJ, a3                 #  form address
+    SMP_DMB_ST                            #  releasing store
+    sw a0, (t2)                        #  obj.field (32 bits) <- a0
+    SMP_DMB
+    beqz      a0, 1f                       #  stored a null reference?
+    srl       t1, rOBJ, GC_CARD_SHIFT
+    addu      t2, a2, t1
+    sb        a2, (t2)                     #  mark card if not
+1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+.LOP_SGET_VOLATILE_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    SMP_DMB                               #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+    FETCH(a1, 3)                           # a1<- BBBB
+    .if 1
+    vLOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .else
+    LOAD64_off(a2, a3, a0, offStaticField_value) #  a2/a3 <- field value (aligned)
+    .endif
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    EAS2(a1, rFP, a1)                      #  a1 <- &fp[BBBB]
+    STORE64(a2, a3, a1)                    #  vBBBB/vBBBB+1 <- a2/a3
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish:
+    LOAD_base_offStaticField_value(a1, a0) #  a1 <- field value
+    SMP_DMB                               #  acquiring load
+    FETCH(a2, 3)                           # r2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(a1, a2, t0)              #  fp[BBBB] <- a1
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+.LOP_SPUT_VOLATILE_JUMBO_finish:
+    # field ptr in a0
+    FETCH(a2, 3)                           # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SMP_DMB_ST                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+    SMP_DMB
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+    /*
+     * Continuation if the field has not yet been resolved.
+     *  a1:  AAAAAAAA field ref
+     *  rOBJ:  &fp[BBBB]
+     *  rBIX: dvmDex->pResFields
+     *
+     * Returns StaticField pointer in a2.
+     */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+    LOAD_rSELF_method(a2)                  #  a2 <- current method
+#if defined(WITH_JIT)
+    EAS2(rBIX, rBIX, a1)                   #  rBIX<- &dvmDex->pResFields[field]
+#endif
+    EXPORT_PC()                            #  resolve() could throw, so export now
+    LOAD_base_offMethod_clazz(a0, a2)      #  a0 <- method->clazz
+    JAL(dvmResolveStaticField)             #  v0 <- resolved StaticField ptr
+    # success ?
+    move      a0, v0
+    beqz      v0, common_exceptionThrown   #  no, handle exception
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including this instruction.
+     */
+    JAL(common_verifyField)
+#endif
+    move      a2, v0
+    b         .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish           # resume
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish:                     #  field ptr in a0
+    FETCH(a2, 3)                        # a2<- BBBB
+    FETCH_ADVANCE_INST(4)                  #  advance rPC, load rINST
+    GET_VREG(a1, a2)                       #  a1 <- fp[BBBB]
+    lw        a2, offThread_cardTable(rSELF) #  a2 <- card table base
+    lw        t1, offField_clazz(a0)       #  t1 <- field->clazz
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SMP_DMB_ST                            #  releasing store
+    sw        a1, offStaticField_value(a0) #  field <- vBBBB
+    SMP_DMB
+    beqz      a1, 1f
+    srl       t2, t1, GC_CARD_SHIFT
+    addu      t3, a2, t2
+    sb        a2, (t3)
+    1:
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+    .size   dvmAsmSisterStart, .-dvmAsmSisterStart
+    .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: mips/footer.S */
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align 2
+
+#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * "longjmp" to a translation after single-stepping.  Before returning
+ * to translation, must save state for self-verification.
+ */
+    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+    move    rSELF, a0                           # restore self
+    move    rPC, a1                             # restore Dalvik pc
+    move    rFP, a2                             # restore Dalvik fp
+    lw      rBIX, offThread_jitResumeNPC(rSELF)
+    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
+    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
+    b       jitSVShadowRunStart                 # resume as if cache hit
+                                                # expects resume addr in rBIX
+
+    .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+    li        a2, kSVSPunt                 #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+    move      rPC, a0                      # set up dalvik pc
+    EXPORT_PC()
+    sw        ra, offThread_jitResumeNPC(rSELF)
+    sw        a1, offThread_jitResumeDPC(rSELF)
+    li        a2, kSVSSingleStep           #  a2 <- interpreter entry point
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSNoProfile            #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSBackwardBranch       #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+    lw        a0, 0(ra)                   #  pass our target PC
+    li        a2, kSVSNormal               #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+
+    .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+    move      a0, rPC                      #  pass our target PC
+    li        a2, kSVSNoChain              #  a2 <- interpreter entry point
+    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
+    b         jitSVShadowRunEnd            #  doesn't return
+#else                                   /*  WITH_SELF_VERIFICATION */
+
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+    move    rSELF, a0                           # restore self
+    move    rPC, a1                             # restore Dalvik pc
+    move    rFP, a2                             # restore Dalvik fp
+    lw      a0, offThread_jitResumeNPC(rSELF)
+    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
+    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
+    jr      a0                                  # resume translation
+
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+    .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+    lw        gp, STACK_OFFSET_GP(sp)
+    move      rPC, a0
+#if defined(WITH_JIT_TUNING)
+    move      a0, ra
+    JAL(dvmBumpPunt)
+#endif
+    EXPORT_PC()
+    sw        zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * On entry:
+ *    rPC <= Dalvik PC of instrucion to interpret
+ *    a1 <= Dalvik PC of resume instruction
+ *    ra <= resume point in translation
+ */
+
+    .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+    lw        gp, STACK_OFFSET_GP(sp)
+    move      rPC, a0                       # set up dalvik pc
+    EXPORT_PC()
+    sw        ra, offThread_jitResumeNPC(rSELF)
+    sw        sp, offThread_jitResumeNSP(rSELF)
+    sw        a1, offThread_jitResumeDPC(rSELF)
+    li        a1, 1
+    sw        a1, offThread_singleStepCount(rSELF) # just step once
+    move      a0, rSELF
+    li        a1, kSubModeCountedStep
+    JAL(dvmEnableSubMode)                   # (self, subMode)
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target.  Commonly used for callees.
+ */
+    .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+    lw        gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+    move      a1, rPC                      # arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, 2f                       # 0 means translation does not exist
+    jr        a0
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target.  Commonly used following
+ * invokes.
+ */
+    .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+    lw        gp, STACK_OFFSET_GP(sp)
+    lw        rPC, (ra)                    #  get our target PC
+    subu      rINST, ra, 8                 #  save start of chain branch
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # @ (pc, self)
+    sw        v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+    beqz      v0, 2f
+    move      a0, v0
+    move      a1, rINST
+    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    move      a0, v0
+    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
+
+    jr        a0                           #  continue native execution
+
+/* No translation, so request one if profiling isn't disabled */
+2:
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    FETCH_INST()
+    li        t0, kJitTSelectRequestHot
+    movn      a2, t0, a0                   #  ask for trace selection
+    bnez      a0, common_selectTrace
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target.  If so, we do a translation chain and
+ * go back to native execution.  Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+    .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+    lw        gp, STACK_OFFSET_GP(sp)
+    lw        rPC, (ra)                    #  get our target PC
+    subu      rINST, ra, 8                 #  save start of chain branch
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNormal)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)           # @ (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    beqz      a0, toInterpreter            #  go if not, otherwise do chain
+    move      a1, rINST
+    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    move      a0, v0
+    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
+
+    jr        a0                           #  continue native execution
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+    .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, footer235
+
+    jr        a0                           #  continue native execution if so
+footer235:
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GOTO_OPCODE(t0)                        #  jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+
+    .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+    lw        gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+    JAL(dvmBumpNoChain)
+#endif
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+    beqz      a0, 1f
+    jr        a0                           #  continue native execution if so
+1:
+#endif                                  /*  WITH_SELF_VERIFICATION */
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here.  We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+
+toInterpreter:
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    lw        a0, offThread_pJitProfTable(rSELF)
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    # NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+
+common_testUpdateProfile:
+
+    beqz      a0, 4f
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ *    r0    <= pJitProfTable (verified non-NULL)
+ *    rPC   <= Dalvik PC
+ *    rINST <= next instruction
+ */
+common_updateProfile:
+    srl       a3, rPC, 12                  #  cheap, but fast hash function
+    xor       a3, a3, rPC
+    andi      a3, a3, JIT_PROF_SIZE-1      #  eliminate excess bits
+    addu      t1, a0, a3
+    lbu       a1, (t1)                     #  get counter
+    GET_INST_OPCODE(t0)
+    subu      a1, a1, 1                    #  decrement counter
+    sb        a1, (t1)                     #  and store it
+    beqz      a1, 1f
+    GOTO_OPCODE(t0)                        #  if not threshold, fallthrough otherwise
+1:
+    /* Looks good, reset the counter */
+    lw        a1, offThread_jitThreshold(rSELF)
+    sb        a1, (t1)
+    EXPORT_PC()
+    move      a0, rPC
+    move      a1, rSELF
+    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
+    move      a0, v0
+    sw        v0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
+    move      a1, rPC                      #  arg1 of translation may need this
+    move      ra, zero                     #  in case target is HANDLER_INTERPRET
+
+#if !defined(WITH_SELF_VERIFICATION)
+    li        t0, kJitTSelectRequest       #  ask for trace selection
+    movz      a2, t0, a0
+    beqz      a0, common_selectTrace
+    jr        a0                           #  jump to the translation
+#else
+
+    bne       a0, zero, skip_ask_for_trace_selection
+    li        a2, kJitTSelectRequest       #  ask for trace selection
+    j         common_selectTrace
+
+skip_ask_for_trace_selection:
+    /*
+     * At this point, we have a target translation.  However, if
+     * that translation is actually the interpret-only pseudo-translation
+     * we want to treat it the same as no translation.
+     */
+    move      rBIX, a0                     #  save target
+    jal       dvmCompilerGetInterpretTemplate
+    # special case?
+    bne       v0, rBIX, jitSVShadowRunStart  #  set up self verification shadow space
+    # Need to clear the inJitCodeCache flag
+    sw        zero, offThread_inJitCodeCache(rSELF) #  back to the interp land
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+    /* no return */
+#endif
+
+/*
+ * On entry:
+ *  r2 is jit state.
+ */
+
+common_selectTrace:
+    lhu        a0, offThread_subMode(rSELF)
+    andi       a0, (kSubModeJitTraceBuild | kSubModeJitSV)
+    bnez       a0, 3f                      # already doing JIT work, continue
+    sw         a2, offThread_jitState(rSELF)
+    move       a0, rSELF
+
+/*
+ * Call out to validate trace-building request.  If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+
+    EXPORT_PC()
+    SAVE_PC_TO_SELF()
+    SAVE_FP_TO_SELF()
+    JAL(dvmJitCheckTraceRequest)
+3:
+    FETCH_INST()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+4:
+    GET_INST_OPCODE(t0)                    # extract opcode from rINST
+    GOTO_OPCODE(t0)
+    /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ *    rPC, rFP, rSELF: the values that they should contain
+ *    r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+    move      a0, rPC                      #  r0 <- program counter
+    move      a1, rFP                      #  r1 <- frame pointer
+    move      a2, rSELF                    #  r2 <- InterpState pointer
+    move      a3, rBIX                     #  r3 <- target translation
+    jal       dvmSelfVerificationSaveState #  save registers to shadow space
+    lw        rFP, offShadowSpace_shadowFP(v0) #  rFP <- fp in shadow space
+    jr        rBIX                         #  jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+jitSVShadowRunEnd:
+    move      a1, rFP                      #  pass ending fp
+    move      a3, rSELF                    #  pass self ptr for convenience
+    jal       dvmSelfVerificationRestoreState #  restore pc and fp values
+    LOAD_PC_FP_FROM_SELF()                 #  restore pc, fp
+    lw        a1, offShadowSpace_svState(a0) #  get self verification state
+    beq       a1, zero, 1f                 #  check for punt condition
+
+    # Setup SV single-stepping
+    move      a0, rSELF
+    li        a1, kSubModeJitSV
+    JAL(dvmEnableSubMode)                  # (self, subMode)
+    li        a2, kJitSelfVerification     #  ask for self verification
+    sw        a2, offThread_jitState(rSELF)
+    # Intentional fallthrough
+
+1:
+    # exit to interpreter without check
+    EXPORT_PC()
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+    .ent common_gotoBail
+common_gotoBail:
+    SAVE_PC_FP_TO_SELF()                   # export state to "thread"
+    move      a0, rSELF                    # a0 <- self ptr
+    b         dvmMterpStdBail              # call(self, changeInterp)
+    .end common_gotoBail
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair.  Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+    beqz    rOBJ, 1f
+    lw      rOBJ, offObject_clazz(rOBJ)
+1:
+    sw      a0, offThread_methodToCall(rSELF)
+    sw      rOBJ, offThread_callsiteClass(rSELF)
+    jr      ra
+#endif
+
+/*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ */
+common_invokeMethodJumboNoThis:
+#if defined(WITH_JIT)
+ /* On entry: a0 is "Method* methodToCall */
+    li       rOBJ, 0                     # clear "this"
+#endif
+common_invokeMethodJumbo:
+ /* On entry: a0 is "Method* methodToCall, rOBJ is "this" */
+.LinvokeNewJumbo:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+/* prepare to copy args to "outs" area of current frame */
+1:
+    add      rPC, rPC, 4          # adjust pc to make return consistent
+    FETCH(a2, 1)
+    SAVEAREA_FROM_FP(rBIX, rFP)   # rBIX <- stack save area
+    beqz     a2, .LinvokeArgsDone  # if no args, skip the rest
+    FETCH(a1, 2)                  # a1 <- CCCC
+    b         .LinvokeRangeArgs   # handle args like invoke range
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ *  a0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+    # prepare to copy args to "outs" area of current frame
+1:
+    GET_OPA(a2)
+    SAVEAREA_FROM_FP(rBIX, rFP)              #  rBIX <- stack save area
+    beqz      a2, .LinvokeArgsDone
+    FETCH(a1, 2)                           #  a1 <- CCCC
+.LinvokeRangeArgs:
+    # a0=methodToCall, a1=CCCC, a2=count, rBIX=outs
+    # (very few methods have > 10 args; could unroll for common cases)
+    EAS2(a3, rFP, a1)
+    sll       t0, a2, 2
+    subu      rBIX, rBIX, t0
+
+1:
+    lw        a1, 0(a3)
+    addu      a3, a3, 4
+    subu      a2, a2, 1
+    sw        a1, 0(rBIX)
+    addu      rBIX, 4
+    bnez      a2, 1b
+    b         .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ *  a0 is "Method* methodToCall", "rOBJ is this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+    lhu      a1, offThread_subMode(rSELF)
+    andi     a1, kSubModeJitTraceBuild
+    beqz     a1, 1f
+    JAL(save_callsiteinfo)
+#endif
+
+    # prepare to copy args to "outs" area of current frame
+1:
+    GET_OPB(a2)
+    SAVEAREA_FROM_FP(rBIX, rFP)
+    beqz      a2, .LinvokeArgsDone
+    FETCH(a1, 2)
+
+    # a0=methodToCall, a1=GFED, a2=count,
+.LinvokeNonRange:
+    beq       a2, 0, 0f
+    beq       a2, 1, 1f
+    beq       a2, 2, 2f
+    beq       a2, 3, 3f
+    beq       a2, 4, 4f
+    beq       a2, 5, 5f
+
+5:
+    and       t0, rINST, 0x0f00
+    ESRN(t2, rFP, t0, 6)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+4:
+    and       t0, a1, 0xf000
+    ESRN(t2, rFP, t0, 10)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+3:
+    and       t0, a1, 0x0f00
+    ESRN(t2, rFP, t0, 6)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+2:
+    and       t0, a1, 0x00f0
+    ESRN(t2, rFP, t0, 2)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+1:
+    and       t0, a1, 0x000f
+    EASN(t2, rFP, t0, 2)
+    lw        a3, (t2)
+    subu      rBIX, 4
+    sw        a3, 0(rBIX)
+
+0:
+    #fall through .LinvokeArgsDone
+
+
+.LinvokeArgsDone:                          #  a0=methodToCall
+    lhu       rOBJ, offMethod_registersSize(a0)
+    lhu       a3, offMethod_outsSize(a0)
+    lw        a2, offMethod_insns(a0)
+    lw        rINST, offMethod_clazz(a0)
+    # find space for the new stack frame, check for overflow
+    SAVEAREA_FROM_FP(a1, rFP)              # a1 <- stack save area
+    sll       t0, rOBJ, 2                    #  a1 <- newFp (old savearea - regsSize)
+    subu      a1, a1, t0
+    SAVEAREA_FROM_FP(rBIX, a1)
+    lw        rOBJ, offThread_interpStackEnd(rSELF) #  t3 <- interpStackEnd
+    sll       t2, a3, 2
+    subu      t0, rBIX, t2
+    lhu       ra, offThread_subMode(rSELF)
+    lw        a3, offMethod_accessFlags(a0) #  a3 <- methodToCall->accessFlags
+    bltu      t0, rOBJ, .LstackOverflow      #  yes, this frame will overflow stack
+
+
+    # set up newSaveArea
+#ifdef EASY_GDB
+    SAVEAREA_FROM_FP(t0, rFP)
+    sw        t0, offStackSaveArea_prevSave(rBIX)
+#endif
+    sw        rFP, (offStackSaveArea_prevFrame)(rBIX)
+    sw        rPC, (offStackSaveArea_savedPc)(rBIX)
+#if defined(WITH_JIT)
+    sw        zero, (offStackSaveArea_returnAddr)(rBIX)
+#endif
+    sw        a0, (offStackSaveArea_method)(rBIX)
+    # Profiling?
+    bnez       ra, 2f
+1:
+    and       t2, a3, ACC_NATIVE
+    bnez      t2, .LinvokeNative
+    lhu       rOBJ, (a2)           # rOBJ -< load Inst from New PC
+    lw        a3, offClassObject_pDvmDex(rINST)
+    move      rPC, a2              # Publish new rPC
+    # Update state values for the new method
+    # a0=methodToCall, a1=newFp, a3=newMethodClass, rOBJ=newINST
+    sw        a0, offThread_method(rSELF)
+    sw        a3, offThread_methodClassDex(rSELF)
+    li        a2, 1
+    sw        a2, offThread_debugIsMethodEntry(rSELF)
+
+#if defined(WITH_JIT)
+    lw        a0, offThread_pJitProfTable(rSELF)
+    move      rFP, a1                    # fp = newFp
+    GET_PREFETCHED_OPCODE(t0, rOBJ)      # extract prefetched opcode from rOBJ
+    move      rINST, rOBJ                # publish new rINST
+    sw        a1, offThread_curFrame(rSELF)
+    bnez      a0, common_updateProfile
+    GOTO_OPCODE(t0)
+#else
+    move      rFP, a1
+    GET_PREFETCHED_OPCODE(t0, rOBJ)
+    move      rINST, rOBJ
+    sw        a1, offThread_curFrame(rSELF)
+    GOTO_OPCODE(t0)
+#endif
+
+2:
+    # Profiling - record method entry.  a0: methodToCall
+    STACK_STORE(a0, 0)
+    STACK_STORE(a1, 4)
+    STACK_STORE(a2, 8)
+    STACK_STORE(a3, 12)
+    sw       rPC, offThread_pc(rSELF)          # update interpSave.pc
+    move     a1, a0
+    move     a0, rSELF
+    JAL(dvmReportInvoke)
+    STACK_LOAD(a3, 12)                         # restore a0-a3
+    STACK_LOAD(a2, 8)
+    STACK_LOAD(a1, 4)
+    STACK_LOAD(a0, 0)
+    b        1b
+.LinvokeNative:
+    # Prep for the native call
+    # a0=methodToCall, a1=newFp, rBIX=newSaveArea
+    lhu       ra, offThread_subMode(rSELF)
+    lw        t3, offThread_jniLocal_topCookie(rSELF)
+    sw        a1, offThread_curFrame(rSELF)
+    sw        t3, offStackSaveArea_localRefCookie(rBIX) # newFp->localRefCookie=top
+    move      a2, a0
+    move      a0, a1
+    addu      a1, rSELF, offThread_retval
+    move      a3, rSELF
+#ifdef ASSIST_DEBUGGER
+    /* insert fake function header to help gdb find the stack frame */
+    b         .Lskip
+    .ent dalvik_mterp
+dalvik_mterp:
+    STACK_STORE_FULL()
+.Lskip:
+#endif
+    bnez      ra, 11f                          # Any special SubModes active?
+    lw        t9, offMethod_nativeFunc(a2)
+    jalr      t9
+    lw        gp, STACK_OFFSET_GP(sp)
+7:
+    # native return; rBIX=newSaveArea
+    # equivalent to dvmPopJniLocals
+    lw        a0, offStackSaveArea_localRefCookie(rBIX)
+    lw        a1, offThread_exception(rSELF)
+    sw        rFP, offThread_curFrame(rSELF)
+    sw        a0, offThread_jniLocal_topCookie(rSELF)    # new top <- old top
+    bnez      a1, common_exceptionThrown
+
+    FETCH_ADVANCE_INST(3)
+    GET_INST_OPCODE(t0)
+    GOTO_OPCODE(t0)
+11:
+    # a0=newFp, a1=&retval, a2=methodToCall, a3=self, ra=subModes
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    move      a0, a2                    # a0 <- methodToCall
+    move      a1, rSELF
+    move      a2, rFP
+    JAL(dvmReportPreNativeInvoke)       # (methodToCall, self, fp)
+    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    # Call the native method
+    lw       t9, offMethod_nativeFunc(a2)      # t9<-methodToCall->nativeFunc
+    jalr     t9
+    lw       gp, STACK_OFFSET_GP(sp)
+
+    # Restore the pre-call arguments
+    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a0, 0)
+
+    # Finish up any post-invoke subMode requirements
+    move      a0, a2
+    move      a1, rSELF
+    move      a2, rFP
+    JAL(dvmReportPostNativeInvoke)      # (methodToCall, self, fp)
+    b         7b
+
+
+.LstackOverflow:       # a0=methodToCall
+    move      a1, a0                    #  a1 <- methodToCall
+    move      a0, rSELF                 # a0 <- self
+    JAL(dvmHandleStackOverflow)         #  dvmHandleStackOverflow(self, methodToCall)
+    b         common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+    .end dalvik_mterp
+#endif
+
+    /*
+     * Common code for method invocation, calling through "glue code".
+     *
+     * TODO: now that we have range and non-range invoke handlers, this
+     *       needs to be split into two.  Maybe just create entry points
+     *       that set r9 and jump here?
+     *
+     * On entry:
+     *  r0 is "Method* methodToCall", the method we're trying to call
+     *  r9 is "bool methodCallRange", indicating if this is a /range variant
+     */
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+    lhu       t0, offThread_subMode(rSELF)
+    SAVEAREA_FROM_FP(a0, rFP)
+    lw        rOBJ, offStackSaveArea_savedPc(a0) # rOBJ = saveArea->savedPc
+    bnez      t0, 19f
+14:
+    lw        rFP, offStackSaveArea_prevFrame(a0) # fp = saveArea->prevFrame
+    lw        a2, (offStackSaveArea_method - sizeofStackSaveArea)(rFP)
+                                               # a2<- method we're returning to
+    # is this a break frame?
+    beqz      a2, common_gotoBail              # break frame, bail out completely
+
+    lw        rBIX, offMethod_clazz(a2)        # rBIX<- method->clazz
+    lw        rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+    PREFETCH_ADVANCE_INST(rINST, rOBJ, 3)      # advance rOBJ, update new rINST
+    sw        a2, offThread_method(rSELF)      # self->method = newSave->method
+    lw        a1, offClassObject_pDvmDex(rBIX) # r1<- method->clazz->pDvmDex
+    sw        rFP, offThread_curFrame(rSELF)   # curFrame = fp
+#if defined(WITH_JIT)
+    lw         rBIX, offStackSaveArea_returnAddr(a0)
+    move       rPC, rOBJ                       # publish new rPC
+    sw         a1, offThread_methodClassDex(rSELF)
+    sw         rBIX, offThread_inJitCodeCache(rSELF) # may return to JIT'ed land
+    beqz       rBIX, 15f                       # caller is compiled code
+    move       t9, rBIX
+    jalr       t9
+    lw         gp, STACK_OFFSET_GP(sp)
+15:
+    GET_INST_OPCODE(t0)                        # extract opcode from rINST
+    GOTO_OPCODE(t0)                            # jump to next instruction
+#else
+    GET_INST_OPCODE(t0)                        # extract opcode from rINST
+    move       rPC, rOBJ                       # publish new rPC
+    sw         a1, offThread_methodClassDex(rSELF)
+    GOTO_OPCODE(t0)
+#endif
+
+19:
+    # Handle special actions
+    # On entry, a0: StackSaveArea
+    lw         a1, offStackSaveArea_prevFrame(a0) # a1<- prevFP
+    sw         rPC, offThread_pc(rSELF)        # update interpSave.pc
+    sw         a1, offThread_curFrame(rSELF)   # update interpSave.curFrame
+    move       a0, rSELF
+    JAL(dvmReportReturn)
+    SAVEAREA_FROM_FP(a0, rFP)                  # restore StackSaveArea
+    b          14b
+
+    .if 0
+    /*
+     * Return handling, calls through "glue code".
+     */
+.LreturnOld:
+    SAVE_PC_FP_TO_SELF()                       # export state
+    move       a0, rSELF                       # arg to function
+    JAL(dvmMterp_returnFromMethod)
+    b          common_resumeAfterGlueCall
+    .endif
+
+/*
+ * Somebody has thrown an exception.  Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+    .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+    EXPORT_PC()
+    move     a0, rSELF
+    JAL(dvmCheckSuspendPending)
+    lw       rOBJ, offThread_exception(rSELF)
+    move     a1, rSELF
+    move     a0, rOBJ
+    JAL(dvmAddTrackedAlloc)
+    lhu      a2, offThread_subMode(rSELF)
+    sw       zero, offThread_exception(rSELF)
+
+    # Special subMode?
+    bnez     a2, 7f                     # any special subMode handling needed?
+8:
+    /* set up args and a local for "&fp" */
+    sw       rFP, 20(sp)                 #  store rFP => tmp
+    addu     t0, sp, 20                  #  compute &tmp
+    sw       t0, STACK_OFFSET_ARG04(sp)  #  save it in arg4 as per ABI
+    li       a3, 0                       #  a3 <- false
+    lw       a1, offThread_method(rSELF)
+    move     a0, rSELF
+    lw       a1, offMethod_insns(a1)
+    lhu      ra, offThread_subMode(rSELF)
+    move     a2, rOBJ
+    subu     a1, rPC, a1
+    sra      a1, a1, 1
+
+    /* call, r0 gets catchRelPc (a code-unit offset) */
+    JAL(dvmFindCatchBlock)           # call(self, relPc, exc, scan?, &fp)
+    lw        rFP, 20(sp)            # retrieve the updated rFP
+
+    /* update frame pointer and check result from dvmFindCatchBlock */
+    move      a0, v0
+    bltz      v0, .LnotCaughtLocally
+
+    /* fix earlier stack overflow if necessary; Preserve a0 */
+    lbu       a1, offThread_stackOverflowed(rSELF)
+    beqz      a1, 1f
+    move      rBIX, a0
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmCleanupStackOverflow)
+    move      a0, rBIX
+
+1:
+
+/* adjust locals to match self->interpSave.curFrame and updated PC */
+    SAVEAREA_FROM_FP(a1, rFP)           # a1<- new save area
+    lw        a1, offStackSaveArea_method(a1)
+    sw        a1, offThread_method(rSELF)
+    lw        a2, offMethod_clazz(a1)
+    lw        a3, offMethod_insns(a1)
+    lw        a2, offClassObject_pDvmDex(a2)
+    EAS1(rPC, a3, a0)
+    sw        a2, offThread_methodClassDex(rSELF)
+
+    /* release the tracked alloc on the exception */
+    move      a0, rOBJ
+    move      a1, rSELF
+    JAL(dvmReleaseTrackedAlloc)
+
+    /* restore the exception if the handler wants it */
+    lw        rIBASE, offThread_curHandlerTable(rSELF)
+    FETCH_INST()
+    GET_INST_OPCODE(t0)
+    bne       t0, OP_MOVE_EXCEPTION, 2f
+    sw        rOBJ, offThread_exception(rSELF)
+2:
+    GOTO_OPCODE(t0)
+
+    # Manage debugger bookkeeping
+7:
+    sw        rPC, offThread_pc(rSELF)
+    sw        rFP, offThread_curFrame(rSELF)
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmReportExceptionThrow)
+    b         8b
+
+.LnotCaughtLocally:                     #  rOBJ = exception
+    /* fix stack overflow if necessary */
+    lbu       a1, offThread_stackOverflowed(rSELF)
+    beqz      a1, 3f
+    move      a0, rSELF
+    move      a1, rOBJ
+    JAL(dvmCleanupStackOverflow)           #  dvmCleanupStackOverflow(self, exception)
+
+3:
+    # may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+    /* call __android_log_print(prio, tag, format, ...) */
+    /* "Exception %s from %s:%d not caught locally" */
+    lw        a0, offThread_method(rSELF)
+    lw        a1, offMethod_insns(a0)
+    subu      a1, rPC, a1
+    sra       a1, a1, 1
+    JAL(dvmLineNumFromPC)
+    sw        v0, 20(sp)
+    # dvmGetMethodSourceFile(method)
+    lw        a0, offThread_method(rSELF)
+    JAL(dvmGetMethodSourceFile)
+    sw        v0, 16(sp)
+    # exception->clazz->descriptor
+    lw        a3, offObject_clazz(rOBJ)
+    lw        a3, offClassObject_descriptor(a3)
+    la        a2, .LstrExceptionNotCaughtLocally
+    la        a1, .LstrLogTag
+    li        a0, 3
+    JAL(__android_log_print)
+#endif
+    sw        rOBJ, offThread_exception(rSELF)
+    move      a0, rOBJ
+    move      a1, rSELF
+    JAL(dvmReleaseTrackedAlloc)
+    b         common_gotoBail
+
+    /*
+     * Exception handling, calls through "glue code".
+     */
+    .if     0
+.LexceptionOld:
+    SAVE_PC_TO_SELF()                # export state
+    SAVE_FP_TO_SELF()
+    move     a0, rSELF               # arg to function
+    JAL(dvmMterp_exceptionThrown)
+    b       common_resumeAfterGlueCall
+    .endif
+
+#if defined(WITH_JIT)
+    /*
+     * If the JIT is actively building a trace we need to make sure
+     * that the field is fully resolved before including the current
+     * instruction.
+     *
+     * On entry:
+     *     rBIX: &dvmDex->pResFields[field]
+     *     a0:  field pointer (must preserve)
+     */
+common_verifyField:
+     lhu     a3, offThread_subMode(rSELF)
+     andi    a3, kSubModeJitTraceBuild
+     bnez    a3, 1f                 # Not building trace, continue
+     jr      ra
+1:
+     lw      a1, (rBIX)
+     beqz    a1, 2f                 # resolution complete ?
+     jr      ra
+2:
+    SCRATCH_STORE(a0, 0)
+    SCRATCH_STORE(a1, 4)
+    SCRATCH_STORE(a2, 8)
+    SCRATCH_STORE(a3, 12)
+    SCRATCH_STORE(ra, 16)
+    move    a0, rSELF
+    move    a1, rPC
+    JAL(dvmJitEndTraceSelect)        #(self,pc) end trace before this inst)
+    SCRATCH_LOAD(a0, 0)
+    SCRATCH_LOAD(a1, 4)
+    SCRATCH_LOAD(a2, 8)
+    SCRATCH_LOAD(a3, 12)
+    SCRATCH_LOAD(ra, 16)
+    jr      ra                       # return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+    LOAD_PC_FP_FROM_SELF()           #  pull rPC and rFP out of thread
+    lw      rIBASE, offThread_curHandlerTable(rSELF) # refresh
+    FETCH_INST()                     #  load rINST from rPC
+    GET_INST_OPCODE(t0)              #  extract opcode from rINST
+    GOTO_OPCODE(t0)                  #  jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use a1
+ * and a3 because those just happen to be the registers all our callers are
+ * using. We move a3 before calling the C function, but a1 happens to match.
+ * a1: index
+ * a3: size
+ */
+common_errArrayIndex:
+    EXPORT_PC()
+    move      a0, a3
+    JAL(dvmThrowArrayIndexOutOfBoundsException)
+    b         common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+    EXPORT_PC()
+    la     a0, .LstrDivideByZero
+    JAL(dvmThrowArithmeticException)
+    b       common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in a1
+ */
+common_errNegativeArraySize:
+    EXPORT_PC()
+    move    a0, a1                                # arg0 <- len
+    JAL(dvmThrowNegativeArraySizeException)    # (len)
+    b       common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in a1
+ */
+common_errNoSuchMethod:
+    EXPORT_PC()
+    move     a0, a1
+    JAL(dvmThrowNoSuchMethodError)
+    b       common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one.  We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+    EXPORT_PC()
+    li      a0, 0
+    JAL(dvmThrowNullPointerException)
+    b       common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will be in ra. Use a jal to jump here.
+ */
+common_abort:
+    lw      zero,-4(zero)            #  generate SIGSEGV
+
+/*
+ * Spit out a "we were here", preserving all registers.
+ */
+    .macro SQUEAK num
+common_squeak\num:
+    STACK_STORE_RA();
+    la        a0, .LstrSqueak
+    LOAD_IMM(a1, \num);
+    JAL(printf);
+    STACK_LOAD_RA();
+    RETURN;
+    .endm
+
+    SQUEAK 0
+    SQUEAK 1
+    SQUEAK 2
+    SQUEAK 3
+    SQUEAK 4
+    SQUEAK 5
+
+/*
+ * Spit out the number in a0, preserving registers.
+ */
+common_printNum:
+    STACK_STORE_RA()
+    MOVE_REG(a1, a0)
+    la        a0, .LstrSqueak
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+    STACK_STORE_RA()
+    la        a0, .LstrNewline
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN
+
+    /*
+     * Print the 32-bit quantity in a0 as a hex value, preserving registers.
+     */
+common_printHex:
+    STACK_STORE_RA()
+    MOVE_REG(a1, a0)
+    la        a0, .LstrPrintHex
+    JAL(printf)
+    STACK_LOAD_RA()
+RETURN;
+
+/*
+ * Print the 64-bit quantity in a0-a1, preserving registers.
+ */
+common_printLong:
+    STACK_STORE_RA()
+    MOVE_REG(a3, a1)
+    MOVE_REG(a2, a0)
+    la        a0, .LstrPrintLong
+    JAL(printf)
+    STACK_LOAD_RA()
+    RETURN;
+
+/*
+ * Print full method info.  Pass the Method* in a0.  Preserves regs.
+ */
+common_printMethod:
+    STACK_STORE_RA()
+    JAL(dvmMterpPrintMethod)
+    STACK_LOAD_RA()
+    RETURN
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info.  Requires the C function to be compiled in.
+ */
+    .if 0
+common_dumpRegs:
+    STACK_STORE_RA()
+    JAL(dvmMterpDumpMipsRegs)
+    STACK_LOAD_RA()
+    RETURN
+    .endif
+
+/*
+ * Zero-terminated ASCII string data.
+ */
+    .data
+
+.LstrBadEntryPoint:
+    .asciiz "Bad entry point %d\n"
+.LstrDivideByZero:
+    .asciiz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+    .asciiz "filled-new-array only implemented for 'int'"
+.LstrLogTag:
+    .asciiz  "mterp"
+.LstrExceptionNotCaughtLocally:
+    .asciiz  "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+    .asciiz "\n"
+.LstrSqueak:
+    .asciiz "<%d>"
+.LstrPrintHex:
+    .asciiz "<0x%x>"
+.LstrPrintLong:
+    .asciiz "<%lld>"
+
+
+    .global dvmAsmAltInstructionStart
+    .type   dvmAsmAltInstructionStart, %function
+    .text
+
+dvmAsmAltInstructionStart = .L_ALT_OP_NOP
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (0 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (1 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (2 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (3 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (4 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (5 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (6 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (7 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (8 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (9 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (10 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (11 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (12 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (13 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (14 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (15 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (16 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (17 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (18 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (19 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (20 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (21 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (22 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (23 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (24 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (25 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (26 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (27 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (28 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (29 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (30 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (31 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (32 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (33 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (34 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (35 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (36 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (37 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (38 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (39 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (40 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (41 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (42 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (43 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (44 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (45 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (46 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (47 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (48 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (49 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (50 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (51 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (52 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (53 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (54 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (55 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (56 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (57 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (58 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (59 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (60 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (61 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (62 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (63 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (64 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (65 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (66 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (67 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (68 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (69 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (70 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (71 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (72 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (73 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (74 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (75 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (76 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (77 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (78 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (79 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (80 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (81 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (82 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (83 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (84 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (85 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (86 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (87 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (88 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (89 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (90 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (91 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (92 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (93 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (94 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (95 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (96 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (97 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (98 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (99 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (100 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (101 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (102 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (103 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (104 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (105 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (106 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (107 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (108 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (109 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (110 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (111 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (112 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (113 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (114 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (115 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (116 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (117 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (118 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (119 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (120 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (121 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (122 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (123 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (124 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (125 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (126 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (127 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (128 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (129 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (130 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (131 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (132 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (133 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (134 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (135 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (136 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (137 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (138 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (139 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (140 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (141 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (142 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (143 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (144 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (145 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (146 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (147 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (148 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (149 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (150 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (151 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (152 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (153 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (154 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (155 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (156 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (157 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (158 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (159 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (160 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (161 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (162 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (163 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (164 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (165 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (166 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (167 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (168 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (169 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (170 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (171 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (172 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (173 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (174 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (175 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (176 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (177 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (178 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (179 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (180 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (181 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (182 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (183 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (184 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (185 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (186 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (187 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (188 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (189 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (190 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (191 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (192 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (193 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (194 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (195 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (196 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (197 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (198 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (199 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (200 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (201 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (202 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (203 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (204 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (205 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (206 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (207 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (208 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (209 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (210 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (211 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (212 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (213 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (214 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (215 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (216 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (217 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (218 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (219 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (220 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (221 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (222 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (223 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (224 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (225 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (226 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (227 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (228 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (229 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (230 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (231 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (232 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (233 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (234 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (235 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (236 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (237 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (238 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (239 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (240 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (241 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (242 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (243 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (244 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (245 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (246 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (247 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (248 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (249 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (250 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (251 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (252 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (253 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (254 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (255 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (256 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (257 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (258 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (259 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (260 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (261 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (262 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (263 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (264 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (265 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (266 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (267 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (268 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (269 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (270 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (271 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (272 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (273 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (274 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (275 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (276 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (277 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (278 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (279 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (280 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (281 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (282 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (283 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (284 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (285 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (286 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (287 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (288 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (289 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (290 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (291 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (292 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (293 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (294 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (295 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (296 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (297 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (298 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (299 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (300 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (301 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (302 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (303 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (304 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (305 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (306 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (307 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (308 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (309 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (310 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (311 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (312 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (313 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (314 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (315 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (316 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (317 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (318 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (319 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (320 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (321 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (322 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (323 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (324 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (325 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (326 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (327 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (328 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (329 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (330 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (331 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (332 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (333 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (334 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (335 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (336 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (337 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (338 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (339 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (340 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (341 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (342 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (343 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (344 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (345 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (346 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (347 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (348 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (349 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (350 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (351 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (352 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (353 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (354 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (355 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (356 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (357 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (358 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (359 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (360 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (361 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (362 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (363 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (364 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (365 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (366 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (367 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (368 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (369 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (370 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (371 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (372 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (373 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (374 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (375 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (376 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (377 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (378 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (379 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (380 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (381 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (382 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (383 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (384 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (385 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (386 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (387 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (388 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (389 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (390 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (391 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (392 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (393 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (394 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (395 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (396 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (397 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (398 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (399 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (400 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (401 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (402 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (403 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (404 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (405 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (406 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (407 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (408 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (409 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (410 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (411 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (412 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (413 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (414 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (415 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (416 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (417 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (418 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (419 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (420 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (421 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (422 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (423 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (424 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (425 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (426 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (427 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (428 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (429 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (430 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (431 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (432 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (433 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (434 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (435 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (436 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (437 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (438 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (439 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (440 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (441 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (442 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (443 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (444 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (445 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (446 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (447 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (448 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (449 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (450 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (451 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (452 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (453 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (454 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (455 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (456 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (457 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (458 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (459 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (460 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (461 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (462 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (463 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (464 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (465 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (466 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (467 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (468 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (469 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (470 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (471 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (472 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (473 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (474 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (475 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (476 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (477 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (478 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (479 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (480 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (481 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (482 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (483 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (484 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (485 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (486 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (487 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (488 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (489 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (490 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (491 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (492 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (493 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (494 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (495 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (496 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (497 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (498 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (499 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (500 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (501 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (502 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (503 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (504 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (505 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (506 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (507 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (508 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (509 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (510 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+/* ------------------------------ */
+    .balign 128
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub.  Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler.    Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0.  Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+    lbu    a3, offThread_breakFlags(rSELF)
+    la     rBIX, dvmAsmInstructionStart + (511 * 128)
+    lw     rIBASE, offThread_curHandlerTable(rSELF)
+    bnez   a3, 1f
+    jr     rBIX            # nothing to do - jump to real handler
+1:
+    EXPORT_PC()
+    move   a0, rPC         # arg0
+    move   a1, rFP         # arg1
+    move   a2, rSELF       # arg2
+    JAL(dvmCheckBefore)
+    jr     rBIX
+
+    .balign 128
+    .size   dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+    .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
diff --git a/vm/mterp/out/InterpAsm-x86-atom.S b/vm/mterp/out/InterpAsm-x86-atom.S
index e7ca17c..f4dfbf7 100644
--- a/vm/mterp/out/InterpAsm-x86-atom.S
+++ b/vm/mterp/out/InterpAsm-x86-atom.S
@@ -27556,7 +27556,10 @@
     movl        rGLUE, rINST            # %ecx<- pMterpGlue
     movl        offGlue_pSelfSuspendCount(rINST), %edx # %ebx<- pSuspendCount (int)
     movl        offGlue_pDebuggerActive(rINST), %eax # %eax<- pDebuggerActive
+    testl       %eax, %eax
+    je          5f
     movl        (%eax), %eax            # %eax<- get debuggerActive (boolean)
+5:
     and         $7, %eax               # %eax<- mask for boolean (just how many bits does it take?)
     cmp         $0, (%edx)             # check if suspend is pending
     jne         2f                      # handle suspend
diff --git a/vm/mterp/out/InterpC-allstubs.cpp b/vm/mterp/out/InterpC-allstubs.cpp
index 49a67bb..8e5196c 100644
--- a/vm/mterp/out/InterpC-allstubs.cpp
+++ b/vm/mterp/out/InterpC-allstubs.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1932,7 +1942,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
@@ -1963,7 +1973,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
@@ -4344,7 +4354,7 @@
  * Handler function table, one entry per opcode.
  */
 #undef H
-#define H(_op) dvmMterp_##_op
+#define H(_op) (const void*) dvmMterp_##_op
 DEFINE_GOTO_TABLE(gDvmMterpHandlers)
 
 #undef H
@@ -4363,12 +4373,12 @@
 {
     jmp_buf jmpBuf;
 
-    self->bailPtr = &jmpBuf;
+    self->interpSave.bailPtr = &jmpBuf;
 
     /* We exit via a longjmp */
     if (setjmp(jmpBuf)) {
         LOGVV("mterp threadid=%d returning", dvmThreadSelf()->threadId);
-        return
+        return;
     }
 
     /* run until somebody longjmp()s out */
@@ -4382,8 +4392,8 @@
          * FINISH code.  For allstubs, we must do an explicit check
          * in the interpretation loop.
          */
-        if (self-interpBreak.ctl.subMode) {
-            dvmCheckBefore(pc, fp, self, curMethod);
+        if (self->interpBreak.ctl.subMode) {
+            dvmCheckBefore(pc, fp, self);
         }
         Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
         (void) gDvmMterpHandlerNames;   /* avoid gcc "defined but not used" */
@@ -4398,7 +4408,7 @@
  */
 void dvmMterpStdBail(Thread* self)
 {
-    jmp_buf* pJmpBuf = self->bailPtr;
+    jmp_buf* pJmpBuf = (jmp_buf*) self->interpSave.bailPtr;
     longjmp(*pJmpBuf, 1);
 }
 
@@ -5366,7 +5376,8 @@
             self->interpSave.method = curMethod;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            self->interpSave.curFrame = fp = newFp;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -5384,7 +5395,7 @@
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPreNativeInvoke(methodToCall, self, fp);
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -5398,12 +5409,13 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPostNativeInvoke(methodToCall, self, fp);
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             /* pop frame off */
             dvmPopJniLocals(self, newSaveArea);
-            self->interpSave.curFrame = fp;
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
 
             /*
              * If the native code threw an exception, or interpreted code
diff --git a/vm/mterp/out/InterpC-armv5te-vfp.cpp b/vm/mterp/out/InterpC-armv5te-vfp.cpp
index 24fbfdc..f406bc5 100644
--- a/vm/mterp/out/InterpC-armv5te-vfp.cpp
+++ b/vm/mterp/out/InterpC-armv5te-vfp.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
diff --git a/vm/mterp/out/InterpC-armv5te.cpp b/vm/mterp/out/InterpC-armv5te.cpp
index b750929..106f53a 100644
--- a/vm/mterp/out/InterpC-armv5te.cpp
+++ b/vm/mterp/out/InterpC-armv5te.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
diff --git a/vm/mterp/out/InterpC-armv7-a-neon.cpp b/vm/mterp/out/InterpC-armv7-a-neon.cpp
index 69bf469..1f86f6b 100644
--- a/vm/mterp/out/InterpC-armv7-a-neon.cpp
+++ b/vm/mterp/out/InterpC-armv7-a-neon.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
diff --git a/vm/mterp/out/InterpC-armv7-a.cpp b/vm/mterp/out/InterpC-armv7-a.cpp
index 4ca2a1c..ec73724 100644
--- a/vm/mterp/out/InterpC-armv7-a.cpp
+++ b/vm/mterp/out/InterpC-armv7-a.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
diff --git a/vm/mterp/out/InterpC-mips.cpp b/vm/mterp/out/InterpC-mips.cpp
new file mode 100644
index 0000000..02f8856
--- /dev/null
+++ b/vm/mterp/out/InterpC-mips.cpp
@@ -0,0 +1,2439 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.cpp */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h>                   // needed for fmod, fmodf
+#include "mterp/common/FindInterface.h"
+
+/*
+ * Configuration defines.  These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ *   WITH_INSTR_CHECKS
+ *   WITH_TRACKREF_CHECKS
+ *   EASY_GDB
+ *   NDEBUG
+ */
+
+#ifdef WITH_INSTR_CHECKS            /* instruction-level paranoia (slow!) */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types.  We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
+ *
+ * There are two common approaches:
+ *  (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ *  (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other.  For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call.  The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy().  The current gcc for ARM seems to do
+ * better with the union.
+ */
+#if defined(__ARM_EABI__)
+# define NO_UNALIGN_64__UNION
+#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
+
+
+//#define LOG_INSTR                   /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Export another copy of the PC on every instruction; this is largely
+ * redundant with EXPORT_PC and the debugger code.  This value can be
+ * compared against what we have stored on the stack with EXPORT_PC to
+ * help ensure that we aren't missing any export calls.
+ */
+#if WITH_EXTRA_GC_CHECKS > 1
+# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
+#else
+# define EXPORT_EXTRA_PC()
+#endif
+
+/*
+ * Adjust the program counter.  "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do {                                            \
+        int myoff = _offset;        /* deref only once */                   \
+        if (pc + myoff < curMethod->insns ||                                \
+            pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+        {                                                                   \
+            char* desc;                                                     \
+            desc = dexProtoCopyMethodDescriptor(&curMethod->prototype);     \
+            LOGE("Invalid branch %d at 0x%04x in %s.%s %s",                 \
+                myoff, (int) (pc - curMethod->insns),                       \
+                curMethod->clazz->descriptor, curMethod->name, desc);       \
+            free(desc);                                                     \
+            dvmAbort();                                                     \
+        }                                                                   \
+        pc += myoff;                                                        \
+        EXPORT_EXTRA_PC();                                                  \
+    } while (false)
+#else
+# define ADJUST_PC(_offset) do {                                            \
+        pc += _offset;                                                      \
+        EXPORT_EXTRA_PC();                                                  \
+    } while (false)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do {                                             \
+        char debugStrBuf[128];                                              \
+        snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__);            \
+        if (curMethod != NULL)                                              \
+            LOG(_level, LOG_TAG"i", "%-2d|%04x%s",                          \
+                self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+        else                                                                \
+            LOG(_level, LOG_TAG"i", "%-2d|####%s",                          \
+                self->threadId, debugStrBuf);                               \
+    } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = "            ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { s8 ll; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.parts[0] = ptr[0];
+    conv.parts[1] = ptr[1];
+    return conv.ll;
+#else
+    s8 val;
+    memcpy(&val, &ptr[idx], 8);
+    return val;
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { s8 ll; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.ll = val;
+    ptr[0] = conv.parts[0];
+    ptr[1] = conv.parts[1];
+#else
+    memcpy(&ptr[idx], &val, 8);
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { double d; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.parts[0] = ptr[0];
+    conv.parts[1] = ptr[1];
+    return conv.d;
+#else
+    double dval;
+    memcpy(&dval, &ptr[idx], 8);
+    return dval;
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+    union { double d; u4 parts[2]; } conv;
+
+    ptr += idx;
+    conv.d = dval;
+    ptr[0] = conv.parts[0];
+    ptr[1] = conv.parts[1];
+#else
+    memcpy(&ptr[idx], &dval, 8);
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access.  Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+    ( (_idx) < curMethod->registersSize ? \
+        (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+    ( (_idx) < curMethod->registersSize ? \
+        (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx)       ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+    ( (_idx) < curMethod->registersSize-1 ? \
+        getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+    ( (_idx) < curMethod->registersSize-1 ? \
+        (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+# define GET_REGISTER_FLOAT(_idx) \
+    ( (_idx) < curMethod->registersSize ? \
+        (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+    ( (_idx) < curMethod->registersSize ? \
+        (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+    ( (_idx) < curMethod->registersSize-1 ? \
+        getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+    ( (_idx) < curMethod->registersSize-1 ? \
+        (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+#else
+# define GET_REGISTER(_idx)                 (fp[(_idx)])
+# define SET_REGISTER(_idx, _val)           (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx)       ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx)             ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val)       SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx)            getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val)      putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx)           (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val)     (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx)          getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val)    putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter.  We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset)     (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst)    ((_inst) & 0xff)
+
+/*
+ * Replace the opcode (used when handling breakpoints).  _opcode is a u1.
+ */
+#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst)       (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst)       ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst)      ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly.  If we don't do this,
+ * the offset within the current method won't be shown correctly.  See the
+ * notes in Exception.c.
+ *
+ * This is also used to determine the address for precise GC.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC()         (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Check to see if "obj" is NULL.  If so, throw an exception.  Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+    if (obj == NULL) {
+        dvmThrowNullPointerException(NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsHeapAddressObject(obj)) {
+        LOGE("Invalid object %p", obj);
+        dvmAbort();
+    }
+#endif
+#ifndef NDEBUG
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        /* probable heap corruption */
+        LOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+        dvmAbort();
+    }
+#endif
+    return true;
+}
+
+/*
+ * Check to see if "obj" is NULL.  If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+    if (obj == NULL) {
+        EXPORT_PC();
+        dvmThrowNullPointerException(NULL);
+        return false;
+    }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+    if (!dvmIsHeapAddress(obj)) {
+        LOGE("Invalid object %p", obj);
+        dvmAbort();
+    }
+#endif
+#ifndef NDEBUG
+    if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+        /* probable heap corruption */
+        LOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+        dvmAbort();
+    }
+#endif
+    return true;
+}
+
+/* File: cstubs/stubdefs.cpp */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...)                                      \
+    extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
+
+/* (void)xxx to quiet unused variable compiler warnings. */
+#define GOTO_TARGET(_target, ...)                                           \
+    void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) {                 \
+        u2 ref, vsrc1, vsrc2, vdst;                                         \
+        u2 inst = FETCH(0);                                                 \
+        const Method* methodToCall;                                         \
+        StackSaveArea* debugSaveArea;                                       \
+        (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;        \
+        (void)methodToCall; (void)debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into Thread struct
+ * references.  (These are undefined down in "footer.cpp".)
+ */
+#define retval                  self->interpSave.retval
+#define pc                      self->interpSave.pc
+#define fp                      self->interpSave.curFrame
+#define curMethod               self->interpSave.method
+#define methodClassDex          self->interpSave.methodClassDex
+#define debugTrackedRefStart    self->interpSave.debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+#if defined(WITH_JIT)
+#define JIT_STUB_HACK(x) x
+#else
+#define JIT_STUB_HACK(x)
+#endif
+
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine.  Because the portable interpreter uses local
+ * variables for these, we must flush prior.  Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
+#define PC_TO_SELF()
+
+/*
+ * Opcode handler framing macros.  Here, each opcode is a separate function
+ * that takes a "self" argument and returns void.  We can't declare
+ * these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
+ */
+#define HANDLE_OPCODE(_op)                                                  \
+    extern "C" void dvmMterp_##_op(Thread* self);                           \
+    void dvmMterp_##_op(Thread* self) {                                     \
+        u4 ref;                                                             \
+        u2 vsrc1, vsrc2, vdst;                                              \
+        u2 inst = FETCH(0);                                                 \
+        (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done.  Further, debugger/profiler checks are handled
+ * before handler execution in mterp, so we don't do them here either.
+ */
+#if defined(WITH_JIT)
+#define FINISH(_offset) {                                                   \
+        ADJUST_PC(_offset);                                                 \
+        if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) {        \
+            dvmCheckJit(pc, self);                                          \
+        }                                                                   \
+        return;                                                             \
+    }
+#else
+#define FINISH(_offset) {                                                   \
+        ADJUST_PC(_offset);                                                 \
+        return;                                                             \
+    }
+#endif
+
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements.  Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown()                                              \
+    do {                                                                    \
+        dvmMterp_exceptionThrown(self);                                     \
+        return;                                                             \
+    } while(false)
+
+#define GOTO_returnFromMethod()                                             \
+    do {                                                                    \
+        dvmMterp_returnFromMethod(self);                                    \
+        return;                                                             \
+    } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat)                \
+    do {                                                                    \
+        dvmMterp_##_target(self, _methodCallRange, _jumboFormat);           \
+        return;                                                             \
+    } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst)   \
+    do {                                                                    \
+        dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall,        \
+            _vsrc1, _vdst);                                                 \
+        return;                                                             \
+    } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.
+ */
+#define GOTO_bail()                                                         \
+    dvmMterpStdBail(self)
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started.
+ */
+#define PERIODIC_CHECKS(_pcadj) {                              \
+        if (dvmCheckSuspendQuick(self)) {                                   \
+            EXPORT_PC();  /* need for precise GC */                         \
+            dvmCheckSuspendPending(self);                                   \
+        }                                                                   \
+    }
+
+/* File: c/opcommon.cpp */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+    u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor.  These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype)                \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_totype(vdst,                                         \
+            GET_REGISTER##_fromtype(vsrc1));                                \
+        FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype,       \
+        _tovtype, _tortype)                                                 \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+    {                                                                       \
+        /* spec defines specific handling for +/- inf and NaN values */     \
+        _fromvtype val;                                                     \
+        _tovtype intMin, intMax, result;                                    \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        val = GET_REGISTER##_fromrtype(vsrc1);                              \
+        intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1);                 \
+        intMax = ~intMin;                                                   \
+        result = (_tovtype) val;                                            \
+        if (val >= intMax)          /* +inf */                              \
+            result = intMax;                                                \
+        else if (val <= intMin)     /* -inf */                              \
+            result = intMin;                                                \
+        else if (val != val)        /* NaN */                               \
+            result = 0;                                                     \
+        else                                                                \
+            result = (_tovtype) val;                                        \
+        SET_REGISTER##_tortype(vdst, result);                               \
+    }                                                                       \
+    FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type)                        \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1);                \
+        SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1));                    \
+        FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal)          \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        int result;                                                         \
+        u2 regs;                                                            \
+        _varType val1, val2;                                                \
+        vdst = INST_AA(inst);                                               \
+        regs = FETCH(1);                                                    \
+        vsrc1 = regs & 0xff;                                                \
+        vsrc2 = regs >> 8;                                                  \
+        ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);         \
+        val1 = GET_REGISTER##_type(vsrc1);                                  \
+        val2 = GET_REGISTER##_type(vsrc2);                                  \
+        if (val1 == val2)                                                   \
+            result = 0;                                                     \
+        else if (val1 < val2)                                               \
+            result = -1;                                                    \
+        else if (val1 > val2)                                               \
+            result = 1;                                                     \
+        else                                                                \
+            result = (_nanVal);                                             \
+        ILOGV("+ result=%d", result);                                       \
+        SET_REGISTER(vdst, result);                                         \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp)                             \
+    HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/)                                \
+        vsrc1 = INST_A(inst);                                               \
+        vsrc2 = INST_B(inst);                                               \
+        if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) {       \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2,        \
+                branchOffset);                                              \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(branchOffset);                              \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2);             \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp)                            \
+    HANDLE_OPCODE(_opcode /*vAA, +BBBB*/)                                   \
+        vsrc1 = INST_AA(inst);                                              \
+        if ((s4) GET_REGISTER(vsrc1) _cmp 0) {                              \
+            int branchOffset = (s2)FETCH(1);    /* sign-extended */         \
+            ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset);    \
+            ILOGV("> branch taken");                                        \
+            if (branchOffset < 0)                                           \
+                PERIODIC_CHECKS(branchOffset);                              \
+            FINISH(branchOffset);                                           \
+        } else {                                                            \
+            ILOGV("|if-%s v%d,-", (_opname), vsrc1);                        \
+            FINISH(2);                                                      \
+        }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type)                    \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1);                       \
+        SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx);    \
+        FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        if (_chkdiv != 0) {                                                 \
+            s4 firstVal, secondVal, result;                                 \
+            firstVal = GET_REGISTER(vsrc1);                                 \
+            secondVal = GET_REGISTER(vsrc2);                                \
+            if (secondVal == 0) {                                           \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op secondVal;                            \
+            }                                                               \
+            SET_REGISTER(vdst, result);                                     \
+        } else {                                                            \
+            /* non-div/rem case */                                          \
+            SET_REGISTER(vdst,                                              \
+                (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2));     \
+        }                                                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op)                     \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1);                   \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f));    \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/)                               \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        vsrc2 = FETCH(1);                                                   \
+        ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x",                             \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv != 0) {                                                 \
+            s4 firstVal, result;                                            \
+            firstVal = GET_REGISTER(vsrc1);                                 \
+            if ((s2) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) {         \
+                /* won't generate /lit16 instr for this; check anyway */    \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op (s2) vsrc2;                           \
+            }                                                               \
+            SET_REGISTER(vdst, result);                                     \
+        } else {                                                            \
+            /* non-div/rem case */                                          \
+            SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2);         \
+        }                                                                   \
+        FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        if (_chkdiv != 0) {                                                 \
+            s4 firstVal, result;                                            \
+            firstVal = GET_REGISTER(vsrc1);                                 \
+            if ((s1) vsrc2 == 0) {                                          \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) {         \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op ((s1) vsrc2);                         \
+            }                                                               \
+            SET_REGISTER(vdst, result);                                     \
+        } else {                                                            \
+            SET_REGISTER(vdst,                                              \
+                (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2);                   \
+        }                                                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op)                \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/)                               \
+    {                                                                       \
+        u2 litInfo;                                                         \
+        vdst = INST_AA(inst);                                               \
+        litInfo = FETCH(1);                                                 \
+        vsrc1 = litInfo & 0xff;                                             \
+        vsrc2 = litInfo >> 8;       /* constant */                          \
+        ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x",                              \
+            (_opname), vdst, vsrc1, vsrc2);                                 \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f));                  \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        if (_chkdiv != 0) {                                                 \
+            s4 firstVal, secondVal, result;                                 \
+            firstVal = GET_REGISTER(vdst);                                  \
+            secondVal = GET_REGISTER(vsrc1);                                \
+            if (secondVal == 0) {                                           \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u4)firstVal == 0x80000000 && secondVal == -1) {            \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op secondVal;                            \
+            }                                                               \
+            SET_REGISTER(vdst, result);                                     \
+        } else {                                                            \
+            SET_REGISTER(vdst,                                              \
+                (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1));      \
+        }                                                                   \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op)               \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1);             \
+        SET_REGISTER(vdst,                                                  \
+            _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f));     \
+        FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        if (_chkdiv != 0) {                                                 \
+            s8 firstVal, secondVal, result;                                 \
+            firstVal = GET_REGISTER_WIDE(vsrc1);                            \
+            secondVal = GET_REGISTER_WIDE(vsrc2);                           \
+            if (secondVal == 0LL) {                                         \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u8)firstVal == 0x8000000000000000ULL &&                    \
+                secondVal == -1LL)                                          \
+            {                                                               \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op secondVal;                            \
+            }                                                               \
+            SET_REGISTER_WIDE(vdst, result);                                \
+        } else {                                                            \
+            SET_REGISTER_WIDE(vdst,                                         \
+                (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+        }                                                                   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op)                    \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);       \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        if (_chkdiv != 0) {                                                 \
+            s8 firstVal, secondVal, result;                                 \
+            firstVal = GET_REGISTER_WIDE(vdst);                             \
+            secondVal = GET_REGISTER_WIDE(vsrc1);                           \
+            if (secondVal == 0LL) {                                         \
+                EXPORT_PC();                                                \
+                dvmThrowArithmeticException("divide by zero");              \
+                GOTO_exceptionThrown();                                     \
+            }                                                               \
+            if ((u8)firstVal == 0x8000000000000000ULL &&                    \
+                secondVal == -1LL)                                          \
+            {                                                               \
+                if (_chkdiv == 1)                                           \
+                    result = firstVal;  /* division */                      \
+                else                                                        \
+                    result = 0;         /* remainder */                     \
+            } else {                                                        \
+                result = firstVal _op secondVal;                            \
+            }                                                               \
+            SET_REGISTER_WIDE(vdst, result);                                \
+        } else {                                                            \
+            SET_REGISTER_WIDE(vdst,                                         \
+                (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+        }                                                                   \
+        FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op)              \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1);            \
+        SET_REGISTER_WIDE(vdst,                                             \
+            _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+        FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op)                            \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);      \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2));       \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op)                           \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        u2 srcRegs;                                                         \
+        vdst = INST_AA(inst);                                               \
+        srcRegs = FETCH(1);                                                 \
+        vsrc1 = srcRegs & 0xff;                                             \
+        vsrc2 = srcRegs >> 8;                                               \
+        ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);     \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2));     \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op)                      \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1);           \
+        SET_REGISTER_FLOAT(vdst,                                            \
+            GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1));        \
+        FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op)                     \
+    HANDLE_OPCODE(_opcode /*vA, vB*/)                                       \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);                                               \
+        ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1);          \
+        SET_REGISTER_DOUBLE(vdst,                                           \
+            GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1));      \
+        FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);                                               \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;    /* array ptr */                        \
+        vsrc2 = arrayInfo >> 8;      /* index */                            \
+        ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO_exceptionThrown();                                         \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
+            GOTO_exceptionThrown();                                         \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]);      \
+        ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));   \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/)                                \
+    {                                                                       \
+        ArrayObject* arrayObj;                                              \
+        u2 arrayInfo;                                                       \
+        EXPORT_PC();                                                        \
+        vdst = INST_AA(inst);       /* AA: source value */                  \
+        arrayInfo = FETCH(1);                                               \
+        vsrc1 = arrayInfo & 0xff;   /* BB: array ptr */                     \
+        vsrc2 = arrayInfo >> 8;     /* CC: index */                         \
+        ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2);        \
+        arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);                      \
+        if (!checkForNull((Object*) arrayObj))                              \
+            GOTO_exceptionThrown();                                         \
+        if (GET_REGISTER(vsrc2) >= arrayObj->length) {                      \
+            dvmThrowArrayIndexOutOfBoundsException(                         \
+                arrayObj->length, GET_REGISTER(vsrc2));                     \
+            GOTO_exceptionThrown();                                         \
+        }                                                                   \
+        ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+        ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] =          \
+            GET_REGISTER##_regsize(vdst);                                   \
+    }                                                                       \
+    FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits.  Consider:
+ *   short foo = -1  (sets a 32-bit register to 0xffffffff)
+ *   iput-quick foo  (writes all 32 bits to the field)
+ *   short bar = 1   (sets a 32-bit register to 0x00000001)
+ *   iput-short      (writes the low 16 bits to the field)
+ *   iget-quick foo  (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field.  This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time.  On
+ * a device with a 16-bit data bus this is sub-optimal.  (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst,                                        \
+            dvmGetField##_ftype(obj, ifield->byteOffset));                  \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(5);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iget%s-quick v%d,v%d,field@+%u",                            \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO_exceptionThrown();                                         \
+        SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref));        \
+        ILOGV("+ IGETQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/)                 \
+    {                                                                       \
+        InstField* ifield;                                                  \
+        Object* obj;                                                        \
+        EXPORT_PC();                                                        \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        vsrc1 = FETCH(4);                      /* object ptr */             \
+        ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNull(obj))                                             \
+            GOTO_exceptionThrown();                                         \
+        ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref);  \
+        if (ifield == NULL) {                                               \
+            ifield = dvmResolveInstField(curMethod->clazz, ref);            \
+            if (ifield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+        }                                                                   \
+        dvmSetField##_ftype(obj, ifield->byteOffset,                        \
+            GET_REGISTER##_regsize(vdst));                                  \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(5);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/)                           \
+    {                                                                       \
+        Object* obj;                                                        \
+        vdst = INST_A(inst);                                                \
+        vsrc1 = INST_B(inst);   /* object ptr */                            \
+        ref = FETCH(1);         /* field offset */                          \
+        ILOGV("|iput%s-quick v%d,v%d,field@0x%04x",                         \
+            (_opname), vdst, vsrc1, ref);                                   \
+        obj = (Object*) GET_REGISTER(vsrc1);                                \
+        if (!checkForNullExportPC(obj, fp, pc))                             \
+            GOTO_exceptionThrown();                                         \
+        dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst));        \
+        ILOGV("+ IPUTQ %d=0x%08llx", ref,                                   \
+            (u8) GET_REGISTER##_regsize(vdst));                             \
+    }                                                                       \
+    FINISH(2);
+
+/*
+ * The JIT needs dvmDexGetResolvedField() to return non-null.
+ * Because the portable interpreter is not involved with the JIT
+ * and trace building, we only need the extra check here when this
+ * code is massaged into a stub called from an assembly interpreter.
+ * This is controlled by the JIT_STUB_HACK maco.
+ */
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc));               \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc));               \
+            }                                                               \
+        }                                                                   \
+        SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
+        ILOGV("+ SGET '%s'=0x%08llx",                                       \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
+    }                                                                       \
+    FINISH(4);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize)                   \
+    HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/)                              \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        vdst = INST_AA(inst);                                               \
+        ref = FETCH(1);         /* field ref */                             \
+        ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref);           \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc));               \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
+    }                                                                       \
+    FINISH(2);
+
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize)             \
+    HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/)                        \
+    {                                                                       \
+        StaticField* sfield;                                                \
+        ref = FETCH(1) | (u4)FETCH(2) << 16;   /* field ref */              \
+        vdst = FETCH(3);                                                    \
+        ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref);     \
+        sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+        if (sfield == NULL) {                                               \
+            EXPORT_PC();                                                    \
+            sfield = dvmResolveStaticField(curMethod->clazz, ref);          \
+            if (sfield == NULL)                                             \
+                GOTO_exceptionThrown();                                     \
+            if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) {      \
+                JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc));               \
+            }                                                               \
+        }                                                                   \
+        dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
+        ILOGV("+ SPUT '%s'=0x%08llx",                                       \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
+    }                                                                       \
+    FINISH(4);
+
+/* File: c/OP_BREAKPOINT.cpp */
+HANDLE_OPCODE(OP_BREAKPOINT)
+    {
+        /*
+         * Restart this instruction with the original opcode.  We do
+         * this by simply jumping to the handler.
+         *
+         * It's probably not necessary to update "inst", but we do it
+         * for the sake of anything that needs to do disambiguation in a
+         * common handler with INST_INST.
+         *
+         * The breakpoint itself is handled over in updateDebugger(),
+         * because we need to detect other events (method entry, single
+         * step) and report them in the same event packet, and we're not
+         * yet handling those through breakpoint instructions.  By the
+         * time we get here, the breakpoint has already been handled and
+         * the thread resumed.
+         */
+        u1 originalOpcode = dvmGetOriginalOpcode(pc);
+        LOGV("+++ break 0x%02x (0x%04x -> 0x%04x)", originalOpcode, inst,
+            INST_REPLACE_OP(inst, originalOpcode));
+        inst = INST_REPLACE_OP(inst, originalOpcode);
+        FINISH_BKPT(originalOpcode);
+    }
+OP_END
+
+/* File: c/OP_DISPATCH_FF.cpp */
+HANDLE_OPCODE(OP_DISPATCH_FF)
+    /*
+     * Indicates extended opcode.  Use next 8 bits to choose where to branch.
+     */
+    DISPATCH_EXTENDED(INST_AA(inst));
+OP_END
+
+/* File: c/gotoTargets.cpp */
+/*
+ * C footer.  This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target".  In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction.  Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
+    {
+        ClassObject* arrayClass;
+        ArrayObject* newArray;
+        u4* contents;
+        char typeCh;
+        int i;
+        u4 arg5;
+
+        EXPORT_PC();
+
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* class ref */
+            vsrc1 = FETCH(3);                     /* #of elements */
+            vdst = FETCH(4);                      /* range base */
+            arg5 = -1;                            /* silence compiler warning */
+            ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        } else {
+            ref = FETCH(1);             /* class ref */
+            vdst = FETCH(2);            /* first 4 regs -or- range base */
+
+            if (methodCallRange) {
+                vsrc1 = INST_AA(inst);  /* #of elements */
+                arg5 = -1;              /* silence compiler warning */
+                ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+            } else {
+                arg5 = INST_A(inst);
+                vsrc1 = INST_B(inst);   /* #of elements */
+                ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+                   vsrc1, ref, vdst, arg5);
+            }
+        }
+
+        /*
+         * Resolve the array class.
+         */
+        arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+        if (arrayClass == NULL) {
+            arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+            if (arrayClass == NULL)
+                GOTO_exceptionThrown();
+        }
+        /*
+        if (!dvmIsArrayClass(arrayClass)) {
+            dvmThrowRuntimeException(
+                "filled-new-array needs array class");
+            GOTO_exceptionThrown();
+        }
+        */
+        /* verifier guarantees this is an array class */
+        assert(dvmIsArrayClass(arrayClass));
+        assert(dvmIsClassInitialized(arrayClass));
+
+        /*
+         * Create an array of the specified type.
+         */
+        LOGVV("+++ filled-new-array type is '%s'", arrayClass->descriptor);
+        typeCh = arrayClass->descriptor[1];
+        if (typeCh == 'D' || typeCh == 'J') {
+            /* category 2 primitives not allowed */
+            dvmThrowRuntimeException("bad filled array req");
+            GOTO_exceptionThrown();
+        } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+            /* TODO: requires multiple "fill in" loops with different widths */
+            LOGE("non-int primitives not implemented");
+            dvmThrowInternalError(
+                "filled-new-array not implemented for anything but 'int'");
+            GOTO_exceptionThrown();
+        }
+
+        newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+        if (newArray == NULL)
+            GOTO_exceptionThrown();
+
+        /*
+         * Fill in the elements.  It's legal for vsrc1 to be zero.
+         */
+        contents = (u4*)(void*)newArray->contents;
+        if (methodCallRange) {
+            for (i = 0; i < vsrc1; i++)
+                contents[i] = GET_REGISTER(vdst+i);
+        } else {
+            assert(vsrc1 <= 5);
+            if (vsrc1 == 5) {
+                contents[4] = GET_REGISTER(arg5);
+                vsrc1--;
+            }
+            for (i = 0; i < vsrc1; i++) {
+                contents[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+        }
+        if (typeCh == 'L' || typeCh == '[') {
+            dvmWriteBarrierArray(newArray, 0, newArray->length);
+        }
+
+        retval.l = (Object*)newArray;
+    }
+    if (jumboFormat) {
+        FINISH(5);
+    } else {
+        FINISH(3);
+    }
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
+    {
+        Method* baseMethod;
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO_exceptionThrown();
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied");
+                GOTO_exceptionThrown();
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+        self->methodToCall = methodToCall;
+        self->callsiteClass = thisPtr->clazz;
+#endif
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            /*
+             * This can happen if you create two classes, Base and Sub, where
+             * Sub is a sub-class of Base.  Declare a protected abstract
+             * method foo() in Base, and invoke foo() from a method in Base.
+             * Base is an "abstract base class" and is never instantiated
+             * directly.  Now, Override foo() in Sub, and use Sub.  This
+             * Works fine unless Sub stops providing an implementation of
+             * the method.
+             */
+            dvmThrowAbstractMethodError("abstract method not implemented");
+            GOTO_exceptionThrown();
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ base=%s.%s virtual[%d]=%s.%s",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            (u4) baseMethod->methodIndex,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+#if 0
+        if (vsrc1 != methodToCall->insSize) {
+            LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s",
+                baseMethod->clazz->descriptor, baseMethod->name,
+                (u4) baseMethod->methodIndex,
+                methodToCall->clazz->descriptor, methodToCall->name);
+            //dvmDumpClass(baseMethod->clazz);
+            //dvmDumpClass(methodToCall->clazz);
+            dvmDumpAllClasses(0);
+        }
+#endif
+
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
+    {
+        Method* baseMethod;
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
+        }
+
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO_exceptionThrown();
+
+        /*
+         * Resolve the method.  This is the correct method for the static
+         * type of the object.  We also verify access permissions here.
+         * The first arg to dvmResolveMethod() is just the referring class
+         * (used for class loaders and such), so we don't want to pass
+         * the superclass into the resolution call.
+         */
+        baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (baseMethod == NULL) {
+            baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+            if (baseMethod == NULL) {
+                ILOGV("+ unknown method or access denied");
+                GOTO_exceptionThrown();
+            }
+        }
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in that class' superclass.
+         */
+        if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+            /*
+             * Method does not exist in the superclass.  Could happen if
+             * superclass gets updated.
+             */
+            dvmThrowNoSuchMethodError(baseMethod->name);
+            GOTO_exceptionThrown();
+        }
+        methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowAbstractMethodError("abstract method not implemented");
+            GOTO_exceptionThrown();
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ base=%s.%s super-virtual=%s.%s",
+            baseMethod->clazz->descriptor, baseMethod->name,
+            methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
+    {
+        Object* thisPtr;
+        ClassObject* thisClass;
+
+        EXPORT_PC();
+
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            /*
+             * The object against which we are executing a method is always
+             * in the first argument.
+             */
+            if (methodCallRange) {
+                assert(vsrc1 > 0);
+                ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisPtr = (Object*) GET_REGISTER(vdst);
+            } else {
+                assert((vsrc1>>4) > 0);
+                ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+            }
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO_exceptionThrown();
+
+        thisClass = thisPtr->clazz;
+
+
+        /*
+         * Given a class and a method index, find the Method* with the
+         * actual code we want to execute.
+         */
+        methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+                        methodClassDex);
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+        self->callsiteClass = thisClass;
+        self->methodToCall = methodToCall;
+#endif
+        if (methodToCall == NULL) {
+            assert(dvmCheckException(self));
+            GOTO_exceptionThrown();
+        }
+
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        if (jumboFormat) {
+            ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+            vsrc1 = FETCH(3);                     /* count */
+            vdst = FETCH(4);                      /* first reg */
+            ADJUST_PC(2);     /* advance pc partially to make returns easier */
+            ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+            ref = FETCH(1);             /* method ref */
+            vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+            if (methodCallRange) {
+                ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+                    vsrc1, ref, vdst, vdst+vsrc1-1);
+                thisReg = vdst;
+            } else {
+                ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+                    vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+                thisReg = vdst & 0x0f;
+            }
+        }
+
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO_exceptionThrown();
+
+        methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+        if (methodToCall == NULL) {
+            methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+                            METHOD_DIRECT);
+            if (methodToCall == NULL) {
+                ILOGV("+ unknown direct method");     // should be impossible
+                GOTO_exceptionThrown();
+            }
+        }
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
+    EXPORT_PC();
+
+    if (jumboFormat) {
+        ref = FETCH(1) | (u4)FETCH(2) << 16;  /* method ref */
+        vsrc1 = FETCH(3);                     /* count */
+        vdst = FETCH(4);                      /* first reg */
+        ADJUST_PC(2);     /* advance pc partially to make returns easier */
+        ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+            vsrc1, ref, vdst, vdst+vsrc1-1);
+    } else {
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* method ref */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange)
+            ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+        else
+            ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+    }
+
+    methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+    if (methodToCall == NULL) {
+        methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+        if (methodToCall == NULL) {
+            ILOGV("+ unknown method");
+            GOTO_exceptionThrown();
+        }
+
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+        /*
+         * The JIT needs dvmDexGetResolvedMethod() to return non-null.
+         * Include the check if this code is being used as a stub
+         * called from the assembly interpreter.
+         */
+        if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) &&
+            (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) {
+            /* Class initialization is still ongoing */
+            dvmJitEndTraceSelect(self,pc);
+        }
+#endif
+    }
+    GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
+    {
+        Object* thisPtr;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        /*
+         * The object against which we are executing a method is always
+         * in the first argument.
+         */
+        if (methodCallRange) {
+            assert(vsrc1 > 0);
+            ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisPtr = (Object*) GET_REGISTER(vdst);
+        } else {
+            assert((vsrc1>>4) > 0);
+            ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+        }
+
+        if (!checkForNull(thisPtr))
+            GOTO_exceptionThrown();
+
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method.
+         */
+        assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
+        methodToCall = thisPtr->clazz->vtable[ref];
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+        self->callsiteClass = thisPtr->clazz;
+        self->methodToCall = methodToCall;
+#endif
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowAbstractMethodError("abstract method not implemented");
+            GOTO_exceptionThrown();
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+
+        LOGVV("+++ virtual[%d]=%s.%s",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
+    {
+        u2 thisReg;
+
+        EXPORT_PC();
+
+        vsrc1 = INST_AA(inst);      /* AA (count) or BA (count + arg 5) */
+        ref = FETCH(1);             /* vtable index */
+        vdst = FETCH(2);            /* 4 regs -or- first reg */
+
+        if (methodCallRange) {
+            ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+                vsrc1, ref, vdst, vdst+vsrc1-1);
+            thisReg = vdst;
+        } else {
+            ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+                vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+            thisReg = vdst & 0x0f;
+        }
+        /* impossible in well-formed code, but we must check nevertheless */
+        if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+            GOTO_exceptionThrown();
+
+#if 0   /* impossible in optimized + verified code */
+        if (ref >= curMethod->clazz->super->vtableCount) {
+            dvmThrowNoSuchMethodError(NULL);
+            GOTO_exceptionThrown();
+        }
+#else
+        assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
+#endif
+
+        /*
+         * Combine the object we found with the vtable offset in the
+         * method's class.
+         *
+         * We're using the current method's class' superclass, not the
+         * superclass of "this".  This is because we might be executing
+         * in a method inherited from a superclass, and we want to run
+         * in the method's class' superclass.
+         */
+        methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+        if (dvmIsAbstractMethod(methodToCall)) {
+            dvmThrowAbstractMethodError("abstract method not implemented");
+            GOTO_exceptionThrown();
+        }
+#else
+        assert(!dvmIsAbstractMethod(methodToCall) ||
+            methodToCall->nativeFunc != NULL);
+#endif
+        LOGVV("+++ super-virtual[%d]=%s.%s",
+            ref, methodToCall->clazz->descriptor, methodToCall->name);
+        assert(methodToCall != NULL);
+        GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * General handling for return-void, return, and return-wide.  Put the
+     * return value in "retval" before jumping here.
+     */
+GOTO_TARGET(returnFromMethod)
+    {
+        StackSaveArea* saveArea;
+
+        /*
+         * We must do this BEFORE we pop the previous stack frame off, so
+         * that the GC can see the return value (if any) in the local vars.
+         *
+         * Since this is now an interpreter switch point, we must do it before
+         * we do anything at all.
+         */
+        PERIODIC_CHECKS(0);
+
+        ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+            retval.j, curMethod->clazz->descriptor, curMethod->name,
+            curMethod->shorty);
+        //DUMP_REGS(curMethod, fp);
+
+        saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+        debugSaveArea = saveArea;
+#endif
+
+        /* back up to previous frame and see if we hit a break */
+        fp = (u4*)saveArea->prevFrame;
+        assert(fp != NULL);
+
+        /* Handle any special subMode requirements */
+        if (self->interpBreak.ctl.subMode != 0) {
+            PC_FP_TO_SELF();
+            dvmReportReturn(self);
+        }
+
+        if (dvmIsBreakFrame(fp)) {
+            /* bail without popping the method frame from stack */
+            LOGVV("+++ returned into break frame");
+            GOTO_bail();
+        }
+
+        /* update thread FP, and reset local variables */
+        self->interpSave.curFrame = fp;
+        curMethod = SAVEAREA_FROM_FP(fp)->method;
+        self->interpSave.method = curMethod;
+        //methodClass = curMethod->clazz;
+        methodClassDex = curMethod->clazz->pDvmDex;
+        pc = saveArea->savedPc;
+        ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+            curMethod->name, curMethod->shorty);
+
+        /* use FINISH on the caller's invoke instruction */
+        //u2 invokeInstr = INST_INST(FETCH(0));
+        if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+            invokeInstr <= OP_INVOKE_INTERFACE*/)
+        {
+            FINISH(3);
+        } else {
+            //LOGE("Unknown invoke instr %02x at %d",
+            //    invokeInstr, (int) (pc - curMethod->insns));
+            assert(false);
+        }
+    }
+GOTO_TARGET_END
+
+
+    /*
+     * Jump here when the code throws an exception.
+     *
+     * By the time we get here, the Throwable has been created and the stack
+     * trace has been saved off.
+     */
+GOTO_TARGET(exceptionThrown)
+    {
+        Object* exception;
+        int catchRelPc;
+
+        PERIODIC_CHECKS(0);
+
+        /*
+         * We save off the exception and clear the exception status.  While
+         * processing the exception we might need to load some Throwable
+         * classes, and we don't want class loader exceptions to get
+         * confused with this one.
+         */
+        assert(dvmCheckException(self));
+        exception = dvmGetException(self);
+        dvmAddTrackedAlloc(exception, self);
+        dvmClearException(self);
+
+        LOGV("Handling exception %s at %s:%d",
+            exception->clazz->descriptor, curMethod->name,
+            dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+        /*
+         * Report the exception throw to any "subMode" watchers.
+         *
+         * TODO: if the exception was thrown by interpreted code, control
+         * fell through native, and then back to us, we will report the
+         * exception at the point of the throw and again here.  We can avoid
+         * this by not reporting exceptions when we jump here directly from
+         * the native call code above, but then we won't report exceptions
+         * that were thrown *from* the JNI code (as opposed to *through* it).
+         *
+         * The correct solution is probably to ignore from-native exceptions
+         * here, and have the JNI exception code do the reporting to the
+         * debugger.
+         */
+        if (self->interpBreak.ctl.subMode != 0) {
+            PC_FP_TO_SELF();
+            dvmReportExceptionThrow(self, exception);
+        }
+
+        /*
+         * We need to unroll to the catch block or the nearest "break"
+         * frame.
+         *
+         * A break frame could indicate that we have reached an intermediate
+         * native call, or have gone off the top of the stack and the thread
+         * needs to exit.  Either way, we return from here, leaving the
+         * exception raised.
+         *
+         * If we do find a catch block, we want to transfer execution to
+         * that point.
+         *
+         * Note this can cause an exception while resolving classes in
+         * the "catch" blocks.
+         */
+        catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+                    exception, false, (void**)(void*)&fp);
+
+        /*
+         * Restore the stack bounds after an overflow.  This isn't going to
+         * be correct in all circumstances, e.g. if JNI code devours the
+         * exception this won't happen until some other exception gets
+         * thrown.  If the code keeps pushing the stack bounds we'll end
+         * up aborting the VM.
+         *
+         * Note we want to do this *after* the call to dvmFindCatchBlock,
+         * because that may need extra stack space to resolve exception
+         * classes (e.g. through a class loader).
+         *
+         * It's possible for the stack overflow handling to cause an
+         * exception (specifically, class resolution in a "catch" block
+         * during the call above), so we could see the thread's overflow
+         * flag raised but actually be running in a "nested" interpreter
+         * frame.  We don't allow doubled-up StackOverflowErrors, so
+         * we can check for this by just looking at the exception type
+         * in the cleanup function.  Also, we won't unroll past the SOE
+         * point because the more-recent exception will hit a break frame
+         * as it unrolls to here.
+         */
+        if (self->stackOverflowed)
+            dvmCleanupStackOverflow(self, exception);
+
+        if (catchRelPc < 0) {
+            /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+            LOGD("Exception %s from %s:%d not caught locally",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+                dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+            dvmSetException(self, exception);
+            dvmReleaseTrackedAlloc(exception, self);
+            GOTO_bail();
+        }
+
+#if DVM_SHOW_EXCEPTION >= 3
+        {
+            const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+            LOGD("Exception %s thrown from %s:%d to %s:%d",
+                exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+                dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+                dvmGetMethodSourceFile(catchMethod),
+                dvmLineNumFromPC(catchMethod, catchRelPc));
+        }
+#endif
+
+        /*
+         * Adjust local variables to match self->interpSave.curFrame and the
+         * updated PC.
+         */
+        //fp = (u4*) self->interpSave.curFrame;
+        curMethod = SAVEAREA_FROM_FP(fp)->method;
+        self->interpSave.method = curMethod;
+        //methodClass = curMethod->clazz;
+        methodClassDex = curMethod->clazz->pDvmDex;
+        pc = curMethod->insns + catchRelPc;
+        ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+            curMethod->name, curMethod->shorty);
+        DUMP_REGS(curMethod, fp, false);            // show all regs
+
+        /*
+         * Restore the exception if the handler wants it.
+         *
+         * The Dalvik spec mandates that, if an exception handler wants to
+         * do something with the exception, the first instruction executed
+         * must be "move-exception".  We can pass the exception along
+         * through the thread struct, and let the move-exception instruction
+         * clear it for us.
+         *
+         * If the handler doesn't call move-exception, we don't want to
+         * finish here with an exception still pending.
+         */
+        if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+            dvmSetException(self, exception);
+
+        dvmReleaseTrackedAlloc(exception, self);
+        FINISH(0);
+    }
+GOTO_TARGET_END
+
+
+
+    /*
+     * General handling for invoke-{virtual,super,direct,static,interface},
+     * including "quick" variants.
+     *
+     * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+     * depending on whether this is a "/range" instruction.
+     *
+     * For a range call:
+     *  "vsrc1" holds the argument count (8 bits)
+     *  "vdst" holds the first argument in the range
+     * For a non-range call:
+     *  "vsrc1" holds the argument count (4 bits) and the 5th argument index
+     *  "vdst" holds four 4-bit register indices
+     *
+     * The caller must EXPORT_PC before jumping here, because any method
+     * call can throw a stack overflow exception.
+     */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+    u2 count, u2 regs)
+    {
+        STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+        //printf("range=%d call=%p count=%d regs=0x%04x\n",
+        //    methodCallRange, methodToCall, count, regs);
+        //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+        //    methodToCall->name, methodToCall->shorty);
+
+        u4* outs;
+        int i;
+
+        /*
+         * Copy args.  This may corrupt vsrc1/vdst.
+         */
+        if (methodCallRange) {
+            // could use memcpy or a "Duff's device"; most functions have
+            // so few args it won't matter much
+            assert(vsrc1 <= curMethod->outsSize);
+            assert(vsrc1 == methodToCall->insSize);
+            outs = OUTS_FROM_FP(fp, vsrc1);
+            for (i = 0; i < vsrc1; i++)
+                outs[i] = GET_REGISTER(vdst+i);
+        } else {
+            u4 count = vsrc1 >> 4;
+
+            assert(count <= curMethod->outsSize);
+            assert(count == methodToCall->insSize);
+            assert(count <= 5);
+
+            outs = OUTS_FROM_FP(fp, count);
+#if 0
+            if (count == 5) {
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+                count--;
+            }
+            for (i = 0; i < (int) count; i++) {
+                outs[i] = GET_REGISTER(vdst & 0x0f);
+                vdst >>= 4;
+            }
+#else
+            // This version executes fewer instructions but is larger
+            // overall.  Seems to be a teensy bit faster.
+            assert((vdst >> 16) == 0);  // 16 bits -or- high 16 bits clear
+            switch (count) {
+            case 5:
+                outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+            case 4:
+                outs[3] = GET_REGISTER(vdst >> 12);
+            case 3:
+                outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+            case 2:
+                outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+            case 1:
+                outs[0] = GET_REGISTER(vdst & 0x0f);
+            default:
+                ;
+            }
+#endif
+        }
+    }
+
+    /*
+     * (This was originally a "goto" target; I've kept it separate from the
+     * stuff above in case we want to refactor things again.)
+     *
+     * At this point, we have the arguments stored in the "outs" area of
+     * the current method's stack frame, and the method to call in
+     * "methodToCall".  Push a new stack frame.
+     */
+    {
+        StackSaveArea* newSaveArea;
+        u4* newFp;
+
+        ILOGV("> %s%s.%s %s",
+            dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+            methodToCall->clazz->descriptor, methodToCall->name,
+            methodToCall->shorty);
+
+        newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+        newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+        /* verify that we have enough space */
+        if (true) {
+            u1* bottom;
+            bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+            if (bottom < self->interpStackEnd) {
+                /* stack overflow */
+                LOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')",
+                    self->interpStackStart, self->interpStackEnd, bottom,
+                    (u1*) fp - bottom, self->interpStackSize,
+                    methodToCall->name);
+                dvmHandleStackOverflow(self, methodToCall);
+                assert(dvmCheckException(self));
+                GOTO_exceptionThrown();
+            }
+            //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p",
+            //    fp, newFp, newSaveArea, bottom);
+        }
+
+#ifdef LOG_INSTR
+        if (methodToCall->registersSize > methodToCall->insSize) {
+            /*
+             * This makes valgrind quiet when we print registers that
+             * haven't been initialized.  Turn it off when the debug
+             * messages are disabled -- we want valgrind to report any
+             * used-before-initialized issues.
+             */
+            memset(newFp, 0xcc,
+                (methodToCall->registersSize - methodToCall->insSize) * 4);
+        }
+#endif
+
+#ifdef EASY_GDB
+        newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+        newSaveArea->prevFrame = fp;
+        newSaveArea->savedPc = pc;
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+        newSaveArea->returnAddr = 0;
+#endif
+        newSaveArea->method = methodToCall;
+
+        if (self->interpBreak.ctl.subMode != 0) {
+            /*
+             * We mark ENTER here for both native and non-native
+             * calls.  For native calls, we'll mark EXIT on return.
+             * For non-native calls, EXIT is marked in the RETURN op.
+             */
+            PC_TO_SELF();
+            dvmReportInvoke(self, methodToCall);
+        }
+
+        if (!dvmIsNativeMethod(methodToCall)) {
+            /*
+             * "Call" interpreted code.  Reposition the PC, update the
+             * frame pointer and other local state, and continue.
+             */
+            curMethod = methodToCall;
+            self->interpSave.method = curMethod;
+            methodClassDex = curMethod->clazz->pDvmDex;
+            pc = methodToCall->insns;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
+#ifdef EASY_GDB
+            debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+            self->debugIsMethodEntry = true;        // profiling, debugging
+            ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+                curMethod->name, curMethod->shorty);
+            DUMP_REGS(curMethod, fp, true);         // show input args
+            FINISH(0);                              // jump to method start
+        } else {
+            /* set this up for JNI locals, even if not a JNI native */
+            newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
+
+            self->interpSave.curFrame = newFp;
+
+            DUMP_REGS(methodToCall, newFp, true);   // show input args
+
+            if (self->interpBreak.ctl.subMode != 0) {
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
+            }
+
+            ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+                  methodToCall->name, methodToCall->shorty);
+
+            /*
+             * Jump through native call bridge.  Because we leave no
+             * space for locals on native calls, "newFp" points directly
+             * to the method arguments.
+             */
+            (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+            if (self->interpBreak.ctl.subMode != 0) {
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
+            }
+
+            /* pop frame off */
+            dvmPopJniLocals(self, newSaveArea);
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
+
+            /*
+             * If the native code threw an exception, or interpreted code
+             * invoked by the native call threw one and nobody has cleared
+             * it, jump to our local exception handling.
+             */
+            if (dvmCheckException(self)) {
+                LOGV("Exception thrown by/below native code");
+                GOTO_exceptionThrown();
+            }
+
+            ILOGD("> retval=0x%llx (leaving native)", retval.j);
+            ILOGD("> (return from native %s.%s to %s.%s %s)",
+                methodToCall->clazz->descriptor, methodToCall->name,
+                curMethod->clazz->descriptor, curMethod->name,
+                curMethod->shorty);
+
+            //u2 invokeInstr = INST_INST(FETCH(0));
+            if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+                invokeInstr <= OP_INVOKE_INTERFACE*/)
+            {
+                FINISH(3);
+            } else {
+                //LOGE("Unknown invoke instr %02x at %d",
+                //    invokeInstr, (int) (pc - curMethod->insns));
+                assert(false);
+            }
+        }
+    }
+    assert(false);      // should not get here
+GOTO_TARGET_END
+
+/* File: cstubs/enddefs.cpp */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: mips/debug.cpp */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose MIPS registers, along with some other info.
+ *
+ */
+void dvmMterpDumpMipsRegs(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
+{
+    register uint32_t rPC       asm("s0");
+    register uint32_t rFP       asm("s1");
+    register uint32_t rSELF     asm("s2");
+    register uint32_t rIBASE    asm("s3");
+    register uint32_t rINST     asm("s4");
+    register uint32_t rOBJ      asm("s5");
+    register uint32_t rBIX      asm("s6");
+    register uint32_t rTEMP	asm("s7");
+
+    //extern char dvmAsmInstructionStart[];
+
+    printf("REGS: a0=%08x a1=%08x a2=%08x a3=%08x\n", a0, a1, a2, a3);
+    printf("    : rPC=%08x rFP=%08x rSELF=%08x rIBASE=%08x\n",
+        rPC, rFP, rSELF, rIBASE);
+    printf("    : rINST=%08x rOBJ=%08x rBIX=%08x rTEMP=%08x \n", rINST, rOBJ, rBIX, rTEMP);
+
+    //Thread* self = (Thread*) rSELF;
+    //const Method* method = self->method;
+    printf("    + self is %p\n", dvmThreadSelf());
+    //printf("    + currently in %s.%s %s\n",
+    //    method->clazz->descriptor, method->name, method->signature);
+    //printf("    + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+    //printf("    + next handler for 0x%02x = %p\n",
+    //    rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+    StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+    printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+    printf("  prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+        saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc);
+#else
+    printf("  prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+        saveArea->prevFrame, saveArea->savedPc,
+        saveArea->method, saveArea->xtra.currentPc,
+        *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+    /*
+     * It is a direct (non-virtual) method if it is static, private,
+     * or a constructor.
+     */
+    bool isDirect =
+        ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+        (method->name[0] == '<');
+
+    char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+    printf("<%c:%s.%s %s> ",
+            isDirect ? 'D' : 'V',
+            method->clazz->descriptor,
+            method->name,
+            desc);
+
+    free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-portable.cpp b/vm/mterp/out/InterpC-portable.cpp
index d6a23c0..0c80873 100644
--- a/vm/mterp/out/InterpC-portable.cpp
+++ b/vm/mterp/out/InterpC-portable.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -983,7 +991,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1010,7 +1018,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1054,7 +1062,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1081,7 +1089,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1131,7 +1139,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1154,7 +1162,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1177,7 +1185,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1200,7 +1208,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1951,7 +1959,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|packed-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
@@ -1982,7 +1990,7 @@
 
         vsrc1 = INST_AA(inst);
         offset = FETCH(1) | (((s4) FETCH(2)) << 16);
-        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+        ILOGV("|sparse-switch v%d +0x%04x", vsrc1, offset);
         switchData = pc + offset;       // offset in 16-bit units
 #ifndef NDEBUG
         if (switchData < curMethod->insns ||
@@ -5322,7 +5330,8 @@
             self->interpSave.method = curMethod;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            self->interpSave.curFrame = fp = newFp;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -5340,7 +5349,7 @@
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPreNativeInvoke(methodToCall, self, fp);
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -5354,12 +5363,13 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPostNativeInvoke(methodToCall, self, fp);
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             /* pop frame off */
             dvmPopJniLocals(self, newSaveArea);
-            self->interpSave.curFrame = fp;
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
 
             /*
              * If the native code threw an exception, or interpreted code
diff --git a/vm/mterp/out/InterpC-x86-atom.cpp b/vm/mterp/out/InterpC-x86-atom.cpp
index 5648aff..399a918 100644
--- a/vm/mterp/out/InterpC-x86-atom.cpp
+++ b/vm/mterp/out/InterpC-x86-atom.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -2455,7 +2465,8 @@
             self->interpSave.method = curMethod;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            self->interpSave.curFrame = fp = newFp;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -2473,7 +2484,7 @@
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPreNativeInvoke(methodToCall, self, fp);
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -2487,12 +2498,13 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPostNativeInvoke(methodToCall, self, fp);
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             /* pop frame off */
             dvmPopJniLocals(self, newSaveArea);
-            self->interpSave.curFrame = fp;
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
 
             /*
              * If the native code threw an exception, or interpreted code
diff --git a/vm/mterp/out/InterpC-x86.cpp b/vm/mterp/out/InterpC-x86.cpp
index 1623720..529bcb1 100644
--- a/vm/mterp/out/InterpC-x86.cpp
+++ b/vm/mterp/out/InterpC-x86.cpp
@@ -68,6 +68,14 @@
 #if defined(__ARM_EABI__)
 # define NO_UNALIGN_64__UNION
 #endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
 
 
 //#define LOG_INSTR                   /* verbose debugging */
@@ -452,6 +460,8 @@
     }
 #endif
 
+#define FINISH_BKPT(_opcode)       /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
 
 /*
  * The "goto label" statements turn into function calls followed by
@@ -488,7 +498,7 @@
  * As a special case, "goto bail" turns into a longjmp.
  */
 #define GOTO_bail()                                                         \
-    dvmMterpStdBail(self, false);
+    dvmMterpStdBail(self)
 
 /*
  * Periodically check for thread suspension.
@@ -1031,7 +1041,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1058,7 +1068,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst,                                        \
             dvmGetField##_ftype(obj, ifield->byteOffset));                  \
-        ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IGET '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1102,7 +1112,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(2);
@@ -1129,7 +1139,7 @@
         }                                                                   \
         dvmSetField##_ftype(obj, ifield->byteOffset,                        \
             GET_REGISTER##_regsize(vdst));                                  \
-        ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name,                   \
+        ILOGV("+ IPUT '%s'=0x%08llx", ifield->name,                         \
             (u8) GET_REGISTER##_regsize(vdst));                             \
     }                                                                       \
     FINISH(5);
@@ -1179,7 +1189,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1202,7 +1212,7 @@
         }                                                                   \
         SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield));    \
         ILOGV("+ SGET '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -1225,7 +1235,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(2);
 
@@ -1248,7 +1258,7 @@
         }                                                                   \
         dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst));    \
         ILOGV("+ SPUT '%s'=0x%08llx",                                       \
-            sfield->field.name, (u8)GET_REGISTER##_regsize(vdst));          \
+            sfield->name, (u8)GET_REGISTER##_regsize(vdst));                \
     }                                                                       \
     FINISH(4);
 
@@ -2397,7 +2407,8 @@
             self->interpSave.method = curMethod;
             methodClassDex = curMethod->clazz->pDvmDex;
             pc = methodToCall->insns;
-            self->interpSave.curFrame = fp = newFp;
+            self->interpSave.curFrame = newFp;
+            fp = newFp;
 #ifdef EASY_GDB
             debugSaveArea = SAVEAREA_FROM_FP(newFp);
 #endif
@@ -2415,7 +2426,7 @@
             DUMP_REGS(methodToCall, newFp, true);   // show input args
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPreNativeInvoke(methodToCall, self, fp);
+                dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -2429,12 +2440,13 @@
             (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
 
             if (self->interpBreak.ctl.subMode != 0) {
-                dvmReportPostNativeInvoke(methodToCall, self, fp);
+                dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
             }
 
             /* pop frame off */
             dvmPopJniLocals(self, newSaveArea);
-            self->interpSave.curFrame = fp;
+            self->interpSave.curFrame = newSaveArea->prevFrame;
+            fp = newSaveArea->prevFrame;
 
             /*
              * If the native code threw an exception, or interpreted code
diff --git a/vm/mterp/rebuild.sh b/vm/mterp/rebuild.sh
index 2014324..e550a5d 100755
--- a/vm/mterp/rebuild.sh
+++ b/vm/mterp/rebuild.sh
@@ -20,7 +20,7 @@
 #
 set -e
 
-for arch in portable allstubs armv5te armv5te-vfp armv7-a armv7-a-neon x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
+for arch in portable allstubs armv5te armv5te-vfp armv7-a armv7-a-neon mips x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
 
 # These aren't actually used, so just go ahead and remove them.  The correct
 # approach is to prevent them from being generated in the first place, but
diff --git a/vm/mterp/x86-atom/footer.S b/vm/mterp/x86-atom/footer.S
index 7b5ed9c..ab4e63d 100644
--- a/vm/mterp/x86-atom/footer.S
+++ b/vm/mterp/x86-atom/footer.S
@@ -353,7 +353,10 @@
     movl        rGLUE, rINST            # %ecx<- pMterpGlue
     movl        offGlue_pSelfSuspendCount(rINST), %edx # %ebx<- pSuspendCount (int)
     movl        offGlue_pDebuggerActive(rINST), %eax # %eax<- pDebuggerActive
+    testl       %eax, %eax
+    je          5f
     movl        (%eax), %eax            # %eax<- get debuggerActive (boolean)
+5:
     and         $$7, %eax               # %eax<- mask for boolean (just how many bits does it take?)
     cmp         $$0, (%edx)             # check if suspend is pending
     jne         2f                      # handle suspend
diff --git a/vm/native/dalvik_system_Zygote.cpp b/vm/native/dalvik_system_Zygote.cpp
index e93c133..e5ba597 100644
--- a/vm/native/dalvik_system_Zygote.cpp
+++ b/vm/native/dalvik_system_Zygote.cpp
@@ -30,6 +30,7 @@
 #include <grp.h>
 #include <errno.h>
 #include <paths.h>
+#include <sys/personality.h>
 
 #if defined(HAVE_PRCTL)
 # include <sys/prctl.h>
@@ -489,6 +490,12 @@
             dvmAbort();
         }
 
+        int current = personality(0xffffFFFF);
+        int success = personality((ADDR_NO_RANDOMIZE | current));
+        if (success == -1) {
+          LOGW("Personality switch failed. current=%d error=%d\n", current, errno);
+        }
+
         err = setCapabilities(permittedCapabilities, effectiveCapabilities);
         if (err != 0) {
             LOGE("cannot set capabilities (%llx,%llx): %s",
diff --git a/vm/reflect/Reflect.cpp b/vm/reflect/Reflect.cpp
index 8caddfc..c3d59e7 100644
--- a/vm/reflect/Reflect.cpp
+++ b/vm/reflect/Reflect.cpp
@@ -901,6 +901,9 @@
     };
 
     enum Conversion conv;
+#ifdef ARCH_HAVE_ALIGNED_DOUBLES
+    double ret;
+#endif
 
     assert((srcType != PRIM_VOID) && (srcType != PRIM_NOT));
     assert((dstType != PRIM_VOID) && (dstType != PRIM_NOT));
@@ -978,9 +981,15 @@
         case OK4:  *dstPtr = *srcPtr;                                   return 1;
         case OK8:  *(s8*) dstPtr = *(s8*)srcPtr;                        return 2;
         case ItoJ: *(s8*) dstPtr = (s8) (*(s4*) srcPtr);                return 2;
+#ifndef ARCH_HAVE_ALIGNED_DOUBLES
         case ItoD: *(double*) dstPtr = (double) (*(s4*) srcPtr);        return 2;
         case JtoD: *(double*) dstPtr = (double) (*(long long*) srcPtr); return 2;
         case FtoD: *(double*) dstPtr = (double) (*(float*) srcPtr);     return 2;
+#else
+        case ItoD: ret = (double) (*(s4*) srcPtr); memcpy(dstPtr, &ret, 8); return 2;
+        case JtoD: ret = (double) (*(long long*) srcPtr); memcpy(dstPtr, &ret, 8); return 2;
+        case FtoD: ret = (double) (*(float*) srcPtr); memcpy(dstPtr, &ret, 8); return 2;
+#endif
         case ItoF: *(float*) dstPtr = (float) (*(int*) srcPtr);         return 1;
         case JtoF: *(float*) dstPtr = (float) (*(long long*) srcPtr);   return 1;
         case bad: {