Remove MIPS mterp.

Test: aosp_taimen-userdebug boots
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 147346243
Change-Id: Ie2509fd87f302bb370d759db0ee205a6dd3ac54f
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 63c5d13..6e8aac7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -361,7 +361,6 @@
             srcs: [
                 "interpreter/mterp/mterp.cc",
                 "interpreter/mterp/nterp_stub.cc",
-                ":libart_mterp.mips",
                 "arch/mips/context_mips.cc",
                 "arch/mips/entrypoints_init_mips.cc",
                 "arch/mips/jni_entrypoints_mips.S",
@@ -375,7 +374,6 @@
             srcs: [
                 "interpreter/mterp/mterp.cc",
                 "interpreter/mterp/nterp_stub.cc",
-                ":libart_mterp.mips64",
                 "arch/mips64/context_mips64.cc",
                 "arch/mips64/entrypoints_init_mips64.cc",
                 "arch/mips64/jni_entrypoints_mips64.S",
@@ -762,28 +760,6 @@
 }
 
 genrule {
-    name: "libart_mterp.mips",
-    out: ["mterp_mips.S"],
-    srcs: ["interpreter/mterp/mips/*.S"],
-    tool_files: [
-        "interpreter/mterp/gen_mterp.py",
-        "interpreter/mterp/common/gen_setup.py",
-    ],
-    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
-    name: "libart_mterp.mips64",
-    out: ["mterp_mips64.S"],
-    srcs: ["interpreter/mterp/mips64/*.S"],
-    tool_files: [
-        "interpreter/mterp/gen_mterp.py",
-        "interpreter/mterp/common/gen_setup.py",
-    ],
-    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
     name: "libart_mterp.x86",
     out: ["mterp_x86.S"],
     srcs: ["interpreter/mterp/x86/*.S"],
diff --git a/runtime/interpreter/mterp/mips/arithmetic.S b/runtime/interpreter/mterp/mips/arithmetic.S
deleted file mode 100644
index 9ae10f2..0000000
--- a/runtime/interpreter/mterp/mips/arithmetic.S
+++ /dev/null
@@ -1,803 +0,0 @@
-%def binop(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-
-%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
-
-%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if $chkzero
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
-
-%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-
-%def binopWide(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
-
-%def binopWide2addr(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vA/vA+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- $result0/$result1
-
-%def unop(preinstr="", result0="a0", instr=""):
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result0, t0, t1)        #  vA <- result0
-
-%def unopNarrower(load="LOAD64_F(fa0, fa0f, a3)", instr=""):
-    /*
-     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: double-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    $load
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-%def unopWide(preinstr="", result0="a0", result1="a1", instr=""):
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-%def unopWider(preinstr="", result0="a0", result1="a1", instr=""):
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result0/result1 = op a0".
-     *
-     * For: int-to-long
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-%def op_add_int():
-%  binop(instr="addu a0, a0, a1")
-
-%def op_add_int_2addr():
-%  binop2addr(instr="addu a0, a0, a1")
-
-%def op_add_int_lit16():
-%  binopLit16(instr="addu a0, a0, a1")
-
-%def op_add_int_lit8():
-%  binopLit8(instr="addu a0, a0, a1")
-
-%def op_add_long():
-/*
- *  The compiler generates the following sequence for
- *  [v1 v0] =  [a1 a0] + [a3 a2];
- *    addu v0,a2,a0
- *    addu a1,a3,a1
- *    sltu v1,v0,a2
- *    addu v1,v1,a1
- */
-%  binopWide(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
-
-%def op_add_long_2addr():
-/*
- * See op_add_long.S for details
- */
-%  binopWide2addr(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
-
-%def op_and_int():
-%  binop(instr="and a0, a0, a1")
-
-%def op_and_int_2addr():
-%  binop2addr(instr="and a0, a0, a1")
-
-%def op_and_int_lit16():
-%  binopLit16(instr="and a0, a0, a1")
-
-%def op_and_int_lit8():
-%  binopLit8(instr="and a0, a0, a1")
-
-%def op_and_long():
-%  binopWide(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
-
-%def op_and_long_2addr():
-%  binopWide2addr(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
-
-%def op_cmp_long():
-    /*
-     * Compare two 64-bit values
-     *    x = y     return  0
-     *    x < y     return -1
-     *    x > y     return  1
-     *
-     * I think I can improve on the ARM code by the following observation
-     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
-     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
-     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    slt       t0, a1, a3                   #  compare hi
-    sgt       t1, a1, a3
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
-    bnez      v0, .L${opcode}_finish
-    # at this point x.hi==y.hi
-    sltu      t0, a0, a2                   #  compare lo
-    sgtu      t1, a0, a2
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
-
-.L${opcode}_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
-
-%def op_div_int():
-#ifdef MIPS32REVGE6
-%  binop(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binop(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_2addr():
-#ifdef MIPS32REVGE6
-%  binop2addr(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binop2addr(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_lit16():
-#ifdef MIPS32REVGE6
-%  binopLit16(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binopLit16(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_lit8():
-#ifdef MIPS32REVGE6
-%  binopLit8(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binopLit8(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_long():
-%  binopWide(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
-
-%def op_div_long_2addr():
-%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
-
-%def op_int_to_byte():
-%  unop(instr="SEB(a0, a0)")
-
-%def op_int_to_char():
-%  unop(preinstr="", instr="and a0, 0xffff")
-
-%def op_int_to_long():
-%  unopWider(instr="sra a1, a0, 31")
-
-%def op_int_to_short():
-%  unop(instr="SEH(a0, a0)")
-
-%def op_long_to_int():
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%  op_move()
-
-%def op_mul_int():
-%  binop(instr="mul a0, a0, a1")
-
-%def op_mul_int_2addr():
-%  binop2addr(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit16():
-%  binopLit16(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit8():
-%  binopLit8(instr="mul a0, a0, a1")
-
-%def op_mul_long():
-    /*
-     * Signed 64-bit integer multiply.
-     *         a1   a0
-     *   x     a3   a2
-     *   -------------
-     *       a2a1 a2a0
-     *       a3a0
-     *  a3a1 (<= unused)
-     *  ---------------
-     *         v1   v0
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       t0, a0, 255                  #  a2 <- BB
-    srl       t1, a0, 8                    #  a3 <- CC
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
-
-    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
-    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
-#endif
-    mul       t0, a2, a1                   #  t0= a2a1
-    addu      v1, v1, t1                   #  v1+= hi(a2a0)
-    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
-
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    b         .L${opcode}_finish
-%def op_mul_long_helper_code():
-
-.Lop_mul_long_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
-
-%def op_mul_long_2addr():
-    /*
-     * See op_mul_long.S for more details
-     */
-    /* mul-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  vAA.low / high
-
-    GET_OPB(t1)                            #  t1 <- B
-    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
-    LOAD64(a2, a3, t1)                     #  vBB.low / high
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
- #endif
-    mul       t2, a2, a1                   #  t2= a2a1
-    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
-    addu      v1, v1, t2                   #  v1= v1 + a2a1;
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
-
-%def op_neg_int():
-%  unop(instr="negu a0, a0")
-
-%def op_neg_long():
-%  unopWide(result0="v0", result1="v1", preinstr="negu v0, a0", instr="negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0")
-
-%def op_not_int():
-%  unop(instr="not a0, a0")
-
-%def op_not_long():
-%  unopWide(preinstr="not a0, a0", instr="not a1, a1")
-
-%def op_or_int():
-%  binop(instr="or a0, a0, a1")
-
-%def op_or_int_2addr():
-%  binop2addr(instr="or a0, a0, a1")
-
-%def op_or_int_lit16():
-%  binopLit16(instr="or a0, a0, a1")
-
-%def op_or_int_lit8():
-%  binopLit8(instr="or a0, a0, a1")
-
-%def op_or_long():
-%  binopWide(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
-
-%def op_or_long_2addr():
-%  binopWide2addr(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
-
-%def op_rem_int():
-#ifdef MIPS32REVGE6
-%  binop(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binop(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_2addr():
-#ifdef MIPS32REVGE6
-%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binop2addr(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_lit16():
-#ifdef MIPS32REVGE6
-%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binopLit16(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_lit8():
-#ifdef MIPS32REVGE6
-%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binopLit8(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_long():
-%  binopWide(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
-
-%def op_rem_long_2addr():
-%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
-
-%def op_rsub_int():
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%  binopLit16(instr="subu a0, a1, a0")
-
-%def op_rsub_int_lit8():
-%  binopLit8(instr="subu a0, a1, a0")
-
-%def op_shl_int():
-%  binop(instr="sll a0, a0, a1")
-
-%def op_shl_int_2addr():
-%  binop2addr(instr="sll a0, a0, a1")
-
-%def op_shl_int_lit8():
-%  binopLit8(instr="sll a0, a0, a1")
-
-%def op_shl_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t2)                            #  t2 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
-%def op_shl_long_helper_code():
-
-.Lop_shl_long_finish:
-    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
-
-%def op_shl_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
-%def op_shl_long_2addr_helper_code():
-
-.Lop_shl_long_2addr_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
-
-%def op_shr_int():
-%  binop(instr="sra a0, a0, a1")
-
-%def op_shr_int_2addr():
-%  binop2addr(instr="sra a0, a0, a1")
-
-%def op_shr_int_lit8():
-%  binopLit8(instr="sra a0, a0, a1")
-
-%def op_shr_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t3)                            #  t3 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
-%def op_shr_long_helper_code():
-
-.Lop_shr_long_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
-
-%def op_shr_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    GET_OPA4(t2)                           #  t2 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
-%def op_shr_long_2addr_helper_code():
-
-.Lop_shr_long_2addr_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
-
-%def op_sub_int():
-%  binop(instr="subu a0, a0, a1")
-
-%def op_sub_int_2addr():
-%  binop2addr(instr="subu a0, a0, a1")
-
-%def op_sub_long():
-/*
- * For little endian the code sequence looks as follows:
- *    subu    v0,a0,a2
- *    subu    v1,a1,a3
- *    sltu    a0,a0,v0
- *    subu    v1,v1,a0
- */
-%  binopWide(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
-
-%def op_sub_long_2addr():
-/*
- * See op_sub_long.S for more details
- */
-%  binopWide2addr(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
-
-%def op_ushr_int():
-%  binop(instr="srl a0, a0, a1")
-
-%def op_ushr_int_2addr():
-%  binop2addr(instr="srl a0, a0, a1 ")
-
-%def op_ushr_int_lit8():
-%  binopLit8(instr="srl a0, a0, a1")
-
-%def op_ushr_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
-%def op_ushr_long_helper_code():
-
-.Lop_ushr_long_finish:
-    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
-
-%def op_ushr_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    GET_OPA4(t3)                           #  t3 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
-%def op_ushr_long_2addr_helper_code():
-
-.Lop_ushr_long_2addr_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
-
-%def op_xor_int():
-%  binop(instr="xor a0, a0, a1")
-
-%def op_xor_int_2addr():
-%  binop2addr(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit16():
-%  binopLit16(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit8():
-%  binopLit8(instr="xor a0, a0, a1")
-
-%def op_xor_long():
-%  binopWide(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
-
-%def op_xor_long_2addr():
-%  binopWide2addr(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
diff --git a/runtime/interpreter/mterp/mips/array.S b/runtime/interpreter/mterp/mips/array.S
deleted file mode 100644
index 57ab147..0000000
--- a/runtime/interpreter/mterp/mips/array.S
+++ /dev/null
@@ -1,239 +0,0 @@
-%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $load a2, $data_offset(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-%def op_aget_boolean():
-%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
-
-%def op_aget_short():
-%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
-
-%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-%def op_aput_boolean():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpAputObject)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_aput_short():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t0)                            #  t0 <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
-    # compare unsigned index, length
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    JR(t0)                                 #  jump to next instruction
-
-%def op_array_length():
-    /*
-     * Return the length of an array.
-     */
-    /* array-length vA, vB */
-    GET_OPB(a1)                            #  a1 <- B
-    GET_OPA4(a2)                           #  a2 <- A+
-    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
-    # is object null?
-    beqz      a0, common_errNullObject     #  yup, fail
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
-
-%def op_fill_array_data():
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
-    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
-    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
-    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
-    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
-    move   a1, rPC
-    move   a2, rSELF
-    JAL($helper)                           #  v0 <- helper(shadow_frame, pc, self)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_filled_new_array_range():
-%  op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    move   a3, rSELF
-    JAL(MterpNewArray)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/control_flow.S b/runtime/interpreter/mterp/mips/control_flow.S
deleted file mode 100644
index 88e1f0e..0000000
--- a/runtime/interpreter/mterp/mips/control_flow.S
+++ /dev/null
@@ -1,214 +0,0 @@
-%def bincmp(condition=""):
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    b${condition} a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def zcmp(condition=""):
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    b${condition} a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_goto():
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sll       a0, rINST, 16                #  a0 <- AAxx0000
-    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
-    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
-    b         MterpCommonTakenBranchNoFlags
-
-%def op_if_eq():
-%  bincmp(condition="eq")
-
-%def op_if_eqz():
-%  zcmp(condition="eq")
-
-%def op_if_ge():
-%  bincmp(condition="ge")
-
-%def op_if_gez():
-%  zcmp(condition="ge")
-
-%def op_if_gt():
-%  bincmp(condition="gt")
-
-%def op_if_gtz():
-%  zcmp(condition="gt")
-
-%def op_if_le():
-%  bincmp(condition="le")
-
-%def op_if_lez():
-%  zcmp(condition="le")
-
-%def op_if_lt():
-%  bincmp(condition="lt")
-
-%def op_if_ltz():
-%  zcmp(condition="lt")
-
-%def op_if_ne():
-%  bincmp(condition="ne")
-
-%def op_if_nez():
-%  zcmp(condition="ne")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_VREG(a1, a3)                       #  a1 <- vAA
-    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
-    JAL($func)                             #  a0 <- code-unit branch offset
-    move      rINST, v0
-    b         MterpCommonTakenBranchNoFlags
-
-%def op_return():
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    GET_VREG(v0, a2)                       #  v0 <- vAA
-    move      v1, zero
-    b         MterpReturn
-
-%def op_return_object():
-%  op_return()
-
-%def op_return_void():
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move      v0, zero
-    move      v1, zero
-    b         MterpReturn
-
-%def op_return_void_no_barrier():
-    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
-    move   a0, rSELF
-    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz   ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move   v0, zero
-    move   v1, zero
-    b      MterpReturn
-
-%def op_return_wide():
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
-    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
-    b         MterpReturn
-
-%def op_sparse_switch():
-%  op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC()                              #  exception handler can throw
-    GET_OPA(a2)                              #  a2 <- AA
-    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
-    # null object?
-    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
-    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
-    b         MterpException
diff --git a/runtime/interpreter/mterp/mips/floating_point.S b/runtime/interpreter/mterp/mips/floating_point.S
deleted file mode 100644
index 20df51e..0000000
--- a/runtime/interpreter/mterp/mips/floating_point.S
+++ /dev/null
@@ -1,518 +0,0 @@
-%def fbinop(instr=""):
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-%def fbinop2addr(instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-%def fbinopWide(instr=""):
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-%def fbinopWide2addr(instr=""):
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-%def funop(instr=""):
-    /*
-     * Generic 32-bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
-
-%def funopWider(instr=""):
-    /*
-     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
-
-%def op_add_double():
-%  fbinopWide(instr="add.d fv0, fa0, fa1")
-
-%def op_add_double_2addr():
-%  fbinopWide2addr(instr="add.d fv0, fa0, fa1")
-
-%def op_add_float():
-%  fbinop(instr="add.s fv0, fa0, fa1")
-
-%def op_add_float_2addr():
-%  fbinop2addr(instr="add.s fv0, fa0, fa1")
-
-%def op_cmpg_double():
-%  op_cmpl_double(gt_bias="1")
-
-%def op_cmpg_float():
-%  op_cmpl_float(gt_bias="1")
-
-%def op_cmpl_double(gt_bias="0"):
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  rOBJ <- BB
-    srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
-    LOAD64_F(ft0, ft0f, rOBJ)
-    LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
-    cmp.eq.d  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.d    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.d   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.d   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-%def op_cmpl_float(gt_bias="0"):
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * for: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8
-    GET_VREG_F(ft0, a2)
-    GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
-    cmp.eq.s  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.s    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.s   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.s   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-%def op_div_double():
-%  fbinopWide(instr="div.d fv0, fa0, fa1")
-
-%def op_div_double_2addr():
-%  fbinopWide2addr(instr="div.d fv0, fa0, fa1")
-
-%def op_div_float():
-%  fbinop(instr="div.s fv0, fa0, fa1")
-
-%def op_div_float_2addr():
-%  fbinop2addr(instr="div.s fv0, fa0, fa1")
-
-%def op_double_to_float():
-%  unopNarrower(instr="cvt.s.d fv0, fa0")
-
-%def op_double_to_int():
-    /*
-     * double-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.d    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
-    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
-    trunc.w.d fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-%def op_double_to_long():
-    /*
-     * double-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.d fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.d    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.d     fa1, fa1
-    c.ole.d   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixdfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%def op_double_to_long_helper_code():
-
-#ifndef MIPS32REVGE6
-.Lop_double_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_double_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-%def op_float_to_double():
-%  funopWider(instr="cvt.d.s fv0, fa0")
-
-%def op_float_to_int():
-    /*
-     * float-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.s    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
-    trunc.w.s fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-%def op_float_to_long():
-    /*
-     * float-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.s fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.s    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.s     fa1, fa1
-    c.ole.s   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixsfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%def op_float_to_long_helper_code():
-
-#ifndef MIPS32REVGE6
-.Lop_float_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_float_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-%def op_int_to_double():
-%  funopWider(instr="cvt.d.w fv0, fa0")
-
-%def op_int_to_float():
-%  funop(instr="cvt.s.w fv0, fa0")
-
-%def op_long_to_double():
-    /*
-     * long-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
-
-%def op_long_to_float():
-    /*
-     * long-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdisf)
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-%def op_mul_double():
-%  fbinopWide(instr="mul.d fv0, fa0, fa1")
-
-%def op_mul_double_2addr():
-%  fbinopWide2addr(instr="mul.d fv0, fa0, fa1")
-
-%def op_mul_float():
-%  fbinop(instr="mul.s fv0, fa0, fa1")
-
-%def op_mul_float_2addr():
-%  fbinop2addr(instr="mul.s fv0, fa0, fa1")
-
-%def op_neg_double():
-%  unopWide(instr="addu a1, a1, 0x80000000")
-
-%def op_neg_float():
-%  unop(instr="addu a0, a0, 0x80000000")
-
-%def op_rem_double():
-%  fbinopWide(instr="JAL(fmod)")
-
-%def op_rem_double_2addr():
-%  fbinopWide2addr(instr="JAL(fmod)")
-
-%def op_rem_float():
-%  fbinop(instr="JAL(fmodf)")
-
-%def op_rem_float_2addr():
-%  fbinop2addr(instr="JAL(fmodf)")
-
-%def op_sub_double():
-%  fbinopWide(instr="sub.d fv0, fa0, fa1")
-
-%def op_sub_double_2addr():
-%  fbinopWide2addr(instr="sub.d fv0, fa0, fa1")
-
-%def op_sub_float():
-%  fbinop(instr="sub.s fv0, fa0, fa1")
-
-%def op_sub_float_2addr():
-%  fbinop2addr(instr="sub.s fv0, fa0, fa1")
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
deleted file mode 100644
index c77d12b..0000000
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ /dev/null
@@ -1,87 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL($helper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL($helper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(4)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-%def op_invoke_custom():
-%  invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-%  invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-%  invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-%  invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-%  invoke(helper="MterpInvokeInterface")
-
-%def op_invoke_interface_range():
-%  invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-%  invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-%  invoke(helper="MterpInvokeStatic")
-
-%def op_invoke_static_range():
-%  invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-%  invoke(helper="MterpInvokeSuper")
-
-%def op_invoke_super_range():
-%  invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-%  invoke(helper="MterpInvokeVirtual")
-
-%def op_invoke_virtual_quick():
-%  invoke(helper="MterpInvokeVirtualQuick")
-
-%def op_invoke_virtual_range():
-%  invoke(helper="MterpInvokeVirtualRange")
-
-%def op_invoke_virtual_range_quick():
-%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips/main.S b/runtime/interpreter/mterp/mips/main.S
deleted file mode 100644
index 88180cf..0000000
--- a/runtime/interpreter/mterp/mips/main.S
+++ /dev/null
@@ -1,1144 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2    /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64           /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6    /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
-   reg  nick      purpose
-   s0   rPC       interpreted program counter, used for fetching instructions
-   s1   rFP       interpreted frame pointer, used for accessing locals and args
-   s2   rSELF     self (Thread) pointer
-   s3   rIBASE    interpreted instruction base pointer, used for computed goto
-   s4   rINST     first 16-bit code unit of current instruction
-   s5   rOBJ      object pointer
-   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
-   s7   rTEMP     used as temp storage that can survive a function call
-   s8   rPROFILE  branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4   // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero    $$0      /* always zero */
-#define AT      $$at     /* assembler temp */
-#define v0      $$2      /* return value */
-#define v1      $$3
-#define a0      $$4      /* argument registers */
-#define a1      $$5
-#define a2      $$6
-#define a3      $$7
-#define t0      $$8      /* temp registers (not saved across subroutine calls) */
-#define t1      $$9
-#define t2      $$10
-#define t3      $$11
-#define t4      $$12
-#define t5      $$13
-#define t6      $$14
-#define t7      $$15
-#define ta0     $$12     /* alias */
-#define ta1     $$13
-#define ta2     $$14
-#define ta3     $$15
-#define s0      $$16     /* saved across subroutine calls (callee saved) */
-#define s1      $$17
-#define s2      $$18
-#define s3      $$19
-#define s4      $$20
-#define s5      $$21
-#define s6      $$22
-#define s7      $$23
-#define t8      $$24     /* two more temp registers */
-#define t9      $$25
-#define k0      $$26     /* kernel temporary */
-#define k1      $$27
-#define gp      $$28     /* global pointer */
-#define sp      $$29     /* stack pointer */
-#define s8      $$30     /* one more callee saved */
-#define ra      $$31     /* return address */
-
-/* FP register definitions */
-#define fv0    $$f0
-#define fv0f   $$f1
-#define fv1    $$f2
-#define fv1f   $$f3
-#define fa0    $$f12
-#define fa0f   $$f13
-#define fa1    $$f14
-#define fa1f   $$f15
-#define ft0    $$f4
-#define ft0f   $$f5
-#define ft1    $$f6
-#define ft1f   $$f7
-#define ft2    $$f8
-#define ft2f   $$f9
-#define ft3    $$f10
-#define ft3f   $$f11
-#define ft4    $$f16
-#define ft4f   $$f17
-#define ft5    $$f18
-#define ft5f   $$f19
-#define fs0    $$f20
-#define fs0f   $$f21
-#define fs1    $$f22
-#define fs1f   $$f23
-#define fs2    $$f24
-#define fs2f   $$f25
-#define fs3    $$f26
-#define fs3f   $$f27
-#define fs4    $$f28
-#define fs4f   $$f29
-#define fs5    $$f30
-#define fs5f   $$f31
-
-#ifndef MIPS32REVGE6
-#define fcc0   $$fcc0
-#define fcc1   $$fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
-    seb       rd, rt
-#define SEH(rd, rt) \
-    seh       rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    ins       rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
-    sll       rd, rt, 24; \
-    sra       rd, rd, 24
-#define SEH(rd, rt) \
-    sll       rd, rt, 16; \
-    sra       rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    sll       rt_hi, rt_hi, 16; \
-    or        rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mthc1     r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mtc1      r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
-    jic       rt, 0
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    lsa       rd, rs, rt, sa; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#else
-#define JR(rt) \
-    jalr      zero, rt
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    .set      push; \
-    .set      noat; \
-    sll       AT, rs, sa; \
-    addu      rd, AT, rt; \
-    .set      pop; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
-    lw        tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
-    subu      tmp, rPC, tmp; \
-    sra       tmp, tmp, 1; \
-    sw        tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
-    lhu       rINST, ((_count)*2)(rPC); \
-    addu      rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd.  Updates
- * rPC to point to the next instruction.  "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
-    addu      rPC, rPC, rd; \
-    lhu       rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
-    sll       rd, rd, ${handler_size_bits}; \
-    addu      rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
-    GET_OPCODE_TARGET(rd); \
-    JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
-    .set noat; \
-    EAS2(AT, rFP, rix); \
-    l.s       rd, (AT); \
-    .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    s.s       rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    jalr      zero, dst; \
-    s.s       rlo, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    lw        rd, 0(AT); \
-    .set at
-
-#define STORE_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    sw        rd, 0(AT); \
-    .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
-    sw        rlo, off(rbase); \
-    sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
-    lw        rlo, off(rbase); \
-    lw        rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    sw        AT, (off+4)(rbase); \
-    .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    .set noat; \
-    lw        AT, (off+4)(rbase); \
-    mthc1     AT, rlo; \
-    .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    l.s       rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP    84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
-    STACK_STORE(ra, 124); \
-    STACK_STORE(s8, 120); \
-    STACK_STORE(s0, 116); \
-    STACK_STORE(s1, 112); \
-    STACK_STORE(s2, 108); \
-    STACK_STORE(s3, 104); \
-    STACK_STORE(s4, 100); \
-    STACK_STORE(s5, 96); \
-    STACK_STORE(s6, 92); \
-    STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
-    STACK_LOAD(s7, 88); \
-    STACK_LOAD(s6, 92); \
-    STACK_LOAD(s5, 96); \
-    STACK_LOAD(s4, 100); \
-    STACK_LOAD(s3, 104); \
-    STACK_LOAD(s2, 108); \
-    STACK_LOAD(s1, 112); \
-    STACK_LOAD(s0, 116); \
-    STACK_LOAD(s8, 120); \
-    STACK_LOAD(ra, 124); \
-    DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
-    lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN                 0x80000000
-#define INT_MIN_AS_FLOAT        0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
-#define LONG_MIN_HIGH           0x80000000
-#define LONG_MIN_AS_FLOAT       0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align 2
-    .global ExecuteMterpImpl
-    .ent    ExecuteMterpImpl
-    .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
-    .cfi_startproc
-    .set noreorder
-    .cpload t9
-    .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
-    STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
-    .cprestore STACK_OFFSET_GP
-
-    /* Remember the return register */
-    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sw      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to vregs.
-    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
-    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
-    EAS1(rPC, a1, a0)                             # Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
-    EXPORT_PC()
-
-    /* Starting ibase */
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-    /* Set up for backwards branches & osr profiling */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    JAL(MterpSetUpHotnessCountdown)        # (method, shadow_frame, self)
-    move    rPROFILE, v0                   # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST()                           # load rINST from rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-    /* NOTE: no fallthrough */
-
-%def dchecks_before_helper():
-    // Call C++ to do debug checks and return to the handler using tail call.
-    .extern MterpCheckBefore
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-%def opcode_pre():
-%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
-    #if !defined(NDEBUG)
-    jal    SYMBOL(mterp_dchecks_before_helper)
-    #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
-    b    MterpFallback
-
-%def helpers():
-%  op_float_to_long_helper_code()
-%  op_double_to_long_helper_code()
-%  op_mul_long_helper_code()
-%  op_shl_long_helper_code()
-%  op_shr_long_helper_code()
-%  op_ushr_long_helper_code()
-%  op_shl_long_2addr_helper_code()
-%  op_shr_long_2addr_helper_code()
-%  op_ushr_long_2addr_helper_code()
-
-%def footer():
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogDivideByZeroException)
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogArrayIndexException)
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNegativeArraySizeException)
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNoSuchMethodException)
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNullObjectException)
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogExceptionThrownException)
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
-    JAL(MterpLogSuspendFallback)
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpHandleException)                    # (self, shadow_frame)
-    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
-    lw      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lw      a1, OFF_FP_DEX_PC(rFP)
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-    EAS1(rPC, a0, a1)                            # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC()
-    FETCH_INST()
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
-    bgtz    rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    li      t0, JIT_CHECK_OSR
-    beq     rPROFILE, t0, .L_osr_check
-    blt     rPROFILE, t0, .L_resume_backward_branch
-    subu    rPROFILE, 1
-    beqz    rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE()
-    addu    a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnez    ra, .L_suspend_request_pending
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC()
-    move    a0, rSELF
-    JAL(MterpSuspendCheck)              # (self)
-    bnez    v0, MterpFallback
-    REFRESH_IBASE()                     # might have changed during suspend
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_no_count_backwards:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bne     rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beq     rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
-    add     a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    lw      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST(2)
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    JAL(MterpLogOSR)
-#endif
-    li      v0, 1                       # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    move    v0, zero                    # signal retry with reference interpreter.
-    b       MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                       # signal return to caller.
-    b       MterpDone
-MterpReturn:
-    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sw      v0, 0(a2)
-    sw      v1, 4(a2)
-    li      v0, 1                       # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
-    STACK_LOAD_FULL()
-    jalr    zero, ra
-
-    .cfi_endproc
-    .end ExecuteMterpImpl
-
-%def instruction_end():
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-%def opcode_start():
-%  pass
-%def opcode_end():
-%  pass
-%def helper_start(name):
-    ENTRY ${name}
-%def helper_end(name):
-    END ${name}
diff --git a/runtime/interpreter/mterp/mips/object.S b/runtime/interpreter/mterp/mips/object.S
deleted file mode 100644
index a987789..0000000
--- a/runtime/interpreter/mterp/mips/object.S
+++ /dev/null
@@ -1,257 +0,0 @@
-%def field(helper=""):
-TODO
-
-%def op_check_cast():
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- BBBB
-    GET_OPA(a1)                            #  a1 <- AA
-    EAS2(a1, rFP, a1)                      #  a1 <- &object
-    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
-    move   a3, rSELF                       #  a3 <- self
-    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
-    PREFETCH_INST(2)
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-%  field(helper=helper)
-
-%def op_iget_boolean():
-%  op_iget(helper="MterpIGetU8")
-
-%def op_iget_boolean_quick():
-%  op_iget_quick(load="lbu")
-
-%def op_iget_byte():
-%  op_iget(helper="MterpIGetI8")
-
-%def op_iget_byte_quick():
-%  op_iget_quick(load="lb")
-
-%def op_iget_char():
-%  op_iget(helper="MterpIGetU16")
-
-%def op_iget_char_quick():
-%  op_iget_quick(load="lhu")
-
-%def op_iget_object():
-%  op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_object_quick():
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
-    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
-    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    GET_OPA4(a2)                           #  a2<- A+
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a3, MterpPossibleException        #  bail out
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
-
-%def op_iget_quick(load="lw"):
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    $load     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-%def op_iget_short():
-%  op_iget(helper="MterpIGetI16")
-
-%def op_iget_short_quick():
-%  op_iget_quick(load="lh")
-
-%def op_iget_wide():
-%  op_iget(helper="MterpIGetU64")
-
-%def op_iget_wide_quick():
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1                   #  t0 <- a3 + a1
-    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-%def op_instance_of():
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC()
-    FETCH(a0, 1)                           # a0 <- CCCC
-    GET_OPB(a1)                            # a1 <- B
-    EAS2(a1, rFP, a1)                      # a1 <- &object
-    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
-    move  a3, rSELF                        # a3 <- self
-    GET_OPA4(rOBJ)                         # rOBJ <- A+
-    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       # load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             # advance rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-%  field(helper=helper)
-
-%def op_iput_boolean():
-%  op_iput(helper="MterpIPutU8")
-
-%def op_iput_boolean_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_byte():
-%  op_iput(helper="MterpIPutI8")
-
-%def op_iput_byte_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_char():
-%  op_iput(helper="MterpIPutU16")
-
-%def op_iput_char_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_object():
-%  op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_object_quick():
-    /* For: iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpIputObjectQuick)
-    beqz   v0, MterpException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_iput_quick(store="sw"):
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-%def op_iput_short():
-%  op_iput(helper="MterpIPutI16")
-
-%def op_iput_short_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_wide():
-%  op_iput(helper="MterpIPutU64")
-
-%def op_iput_wide_quick():
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    GET_OPA4(a0)                           #  a0 <- A(+)
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
-    # check object for null
-    beqz      a2, common_errNullObject     #  object was null
-    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
-    FETCH(a3, 1)                           #  a3 <- field byte offset
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
-    JR(t0)                                 #  jump to next instruction
-
-%def op_new_instance():
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rSELF
-    move   a2, rINST
-    JAL(MterpNewInstance)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-%  field(helper=helper)
-
-%def op_sget_boolean():
-%  op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-%  op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-%  op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-%  op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-%  op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-%  op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-%  field(helper=helper)
-
-%def op_sput_boolean():
-%  op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-%  op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-%  op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-%  op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-%  op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips/other.S b/runtime/interpreter/mterp/mips/other.S
deleted file mode 100644
index 5002329..0000000
--- a/runtime/interpreter/mterp/mips/other.S
+++ /dev/null
@@ -1,345 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL($helper)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-%def op_const():
-    /* const vAA, +BBBBbbbb */
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_16():
-    /* const/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_4():
-    /* const/4 vA, +B */
-    sll       a1, rINST, 16                #  a1 <- Bxxx0000
-    GET_OPA(a0)                            #  a0 <- A+
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
-    and       a0, a0, 15
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
-
-%def op_const_class():
-%  const(helper="MterpConstClass")
-
-%def op_const_high16():
-    /* const/high16 vAA, +BBBB0000 */
-    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sll       a0, a0, 16                   #  a0 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_method_handle():
-%  const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-%  const(helper="MterpConstMethodType")
-
-%def op_const_string():
-%  const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
-    /* const/string vAA, string@BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- bbbb (low)
-    FETCH(a2, 2)                        # a2 <- BBBB (high)
-    GET_OPA(a1)                         # a1 <- AA
-    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(3)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(3)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_const_wide():
-    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
-    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
-    FETCH(a3, 4)                           #  a3 <- HHHH (high)
-    GET_OPA(t1)                            #  t1 <- AA
-    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
-    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
-
-%def op_const_wide_16():
-    /* const-wide/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_const_wide_32():
-    /* const-wide/32 vAA, +BBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_const_wide_high16():
-    /* const-wide/high16 vAA, +BBBB000000000000 */
-    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    li        a0, 0                        #  a0 <- 00000000
-    sll       a1, 16                       #  a1 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_monitor_enter():
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-%def op_monitor_exit():
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-%def op_move(is_object="0"):
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
-
-%def op_move_16(is_object="0"):
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH(a1, 2)                           #  a1 <- BBBB
-    FETCH(a0, 1)                           #  a0 <- AAAA
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
-    .endif
-
-%def op_move_exception():
-    /* move-exception vAA */
-    GET_OPA(a2)                                 #  a2 <- AA
-    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
-    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
-    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    JR(t0)                                      #  jump to next instruction
-
-%def op_move_from16(is_object="0"):
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH(a1, 1)                           #  a1 <- BBBB
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
-    .endif
-
-%def op_move_object():
-%  op_move(is_object="1")
-
-%def op_move_object_16():
-%  op_move_16(is_object="1")
-
-%def op_move_object_from16():
-%  op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    lw    a0, 0(a0)                        #  a0 <- result.i
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
-    .else
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
-    .endif
-
-%def op_move_result_object():
-%  op_move_result(is_object="1")
-
-%def op_move_result_wide():
-    /* move-result-wide vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-%def op_move_wide():
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-%def op_move_wide_16():
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 2)                           #  a3 <- BBBB
-    FETCH(a2, 1)                           #  a2 <- AAAA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
-
-%def op_move_wide_from16():
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 1)                           #  a3 <- BBBB
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-%def op_nop():
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_unused_3e():
-%  unused()
-
-%def op_unused_3f():
-%  unused()
-
-%def op_unused_40():
-%  unused()
-
-%def op_unused_41():
-%  unused()
-
-%def op_unused_42():
-%  unused()
-
-%def op_unused_43():
-%  unused()
-
-%def op_unused_73():
-%  unused()
-
-%def op_unused_79():
-%  unused()
-
-%def op_unused_7a():
-%  unused()
-
-%def op_unused_f3():
-%  unused()
-
-%def op_unused_f4():
-%  unused()
-
-%def op_unused_f5():
-%  unused()
-
-%def op_unused_f6():
-%  unused()
-
-%def op_unused_f7():
-%  unused()
-
-%def op_unused_f8():
-%  unused()
-
-%def op_unused_f9():
-%  unused()
-
-%def op_unused_fc():
-%  unused()
-
-%def op_unused_fd():
-%  unused()
diff --git a/runtime/interpreter/mterp/mips64/arithmetic.S b/runtime/interpreter/mterp/mips64/arithmetic.S
deleted file mode 100644
index 0b03e02..0000000
--- a/runtime/interpreter/mterp/mips64/arithmetic.S
+++ /dev/null
@@ -1,458 +0,0 @@
-%def binop(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a4                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-%def binopWide(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a4           # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binopWide2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a2           # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unop(preinstr="", instr=""):
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unopWide(preinstr="", instr=""):
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * For: not-long, neg-long
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_add_int():
-%  binop(instr="addu a0, a0, a1")
-
-%def op_add_int_2addr():
-%  binop2addr(instr="addu a0, a0, a1")
-
-%def op_add_int_lit16():
-%  binopLit16(instr="addu a0, a0, a1")
-
-%def op_add_int_lit8():
-%  binopLit8(instr="addu a0, a0, a1")
-
-%def op_add_long():
-%  binopWide(instr="daddu a0, a0, a1")
-
-%def op_add_long_2addr():
-%  binopWide2addr(instr="daddu a0, a0, a1")
-
-%def op_and_int():
-%  binop(instr="and a0, a0, a1")
-
-%def op_and_int_2addr():
-%  binop2addr(instr="and a0, a0, a1")
-
-%def op_and_int_lit16():
-%  binopLit16(instr="and a0, a0, a1")
-
-%def op_and_int_lit8():
-%  binopLit8(instr="and a0, a0, a1")
-
-%def op_and_long():
-%  binopWide(instr="and a0, a0, a1")
-
-%def op_and_long_2addr():
-%  binopWide2addr(instr="and a0, a0, a1")
-
-%def op_cmp_long():
-    /* cmp-long vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    slt     a2, a0, a1
-    slt     a0, a1, a0
-    subu    a0, a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_div_int():
-%  binop(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_2addr():
-%  binop2addr(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_lit16():
-%  binopLit16(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_lit8():
-%  binopLit8(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_long():
-%  binopWide(instr="ddiv a0, a0, a1", chkzero="1")
-
-%def op_div_long_2addr():
-%  binopWide2addr(instr="ddiv a0, a0, a1", chkzero="1")
-
-%def op_int_to_byte():
-%  unop(instr="seb     a0, a0")
-
-%def op_int_to_char():
-%  unop(instr="and     a0, a0, 0xffff")
-
-%def op_int_to_long():
-    /* int-to-long vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_int_to_short():
-%  unop(instr="seh     a0, a0")
-
-%def op_long_to_int():
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%  op_move()
-
-%def op_mul_int():
-%  binop(instr="mul a0, a0, a1")
-
-%def op_mul_int_2addr():
-%  binop2addr(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit16():
-%  binopLit16(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit8():
-%  binopLit8(instr="mul a0, a0, a1")
-
-%def op_mul_long():
-%  binopWide(instr="dmul a0, a0, a1")
-
-%def op_mul_long_2addr():
-%  binopWide2addr(instr="dmul a0, a0, a1")
-
-%def op_neg_int():
-%  unop(instr="subu    a0, zero, a0")
-
-%def op_neg_long():
-%  unopWide(instr="dsubu   a0, zero, a0")
-
-%def op_not_int():
-%  unop(instr="nor     a0, zero, a0")
-
-%def op_not_long():
-%  unopWide(instr="nor     a0, zero, a0")
-
-%def op_or_int():
-%  binop(instr="or a0, a0, a1")
-
-%def op_or_int_2addr():
-%  binop2addr(instr="or a0, a0, a1")
-
-%def op_or_int_lit16():
-%  binopLit16(instr="or a0, a0, a1")
-
-%def op_or_int_lit8():
-%  binopLit8(instr="or a0, a0, a1")
-
-%def op_or_long():
-%  binopWide(instr="or a0, a0, a1")
-
-%def op_or_long_2addr():
-%  binopWide2addr(instr="or a0, a0, a1")
-
-%def op_rem_int():
-%  binop(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_2addr():
-%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_lit16():
-%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_lit8():
-%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_long():
-%  binopWide(instr="dmod a0, a0, a1", chkzero="1")
-
-%def op_rem_long_2addr():
-%  binopWide2addr(instr="dmod a0, a0, a1", chkzero="1")
-
-%def op_rsub_int():
-%  binopLit16(instr="subu a0, a1, a0")
-
-%def op_rsub_int_lit8():
-%  binopLit8(instr="subu a0, a1, a0")
-
-%def op_shl_int():
-%  binop(instr="sll a0, a0, a1")
-
-%def op_shl_int_2addr():
-%  binop2addr(instr="sll a0, a0, a1")
-
-%def op_shl_int_lit8():
-%  binopLit8(instr="sll a0, a0, a1")
-
-%def op_shl_long():
-%  binopWide(instr="dsll a0, a0, a1")
-
-%def op_shl_long_2addr():
-%  binopWide2addr(instr="dsll a0, a0, a1")
-
-%def op_shr_int():
-%  binop(instr="sra a0, a0, a1")
-
-%def op_shr_int_2addr():
-%  binop2addr(instr="sra a0, a0, a1")
-
-%def op_shr_int_lit8():
-%  binopLit8(instr="sra a0, a0, a1")
-
-%def op_shr_long():
-%  binopWide(instr="dsra a0, a0, a1")
-
-%def op_shr_long_2addr():
-%  binopWide2addr(instr="dsra a0, a0, a1")
-
-%def op_sub_int():
-%  binop(instr="subu a0, a0, a1")
-
-%def op_sub_int_2addr():
-%  binop2addr(instr="subu a0, a0, a1")
-
-%def op_sub_long():
-%  binopWide(instr="dsubu a0, a0, a1")
-
-%def op_sub_long_2addr():
-%  binopWide2addr(instr="dsubu a0, a0, a1")
-
-%def op_ushr_int():
-%  binop(instr="srl a0, a0, a1")
-
-%def op_ushr_int_2addr():
-%  binop2addr(instr="srl a0, a0, a1")
-
-%def op_ushr_int_lit8():
-%  binopLit8(instr="srl a0, a0, a1")
-
-%def op_ushr_long():
-%  binopWide(instr="dsrl a0, a0, a1")
-
-%def op_ushr_long_2addr():
-%  binopWide2addr(instr="dsrl a0, a0, a1")
-
-%def op_xor_int():
-%  binop(instr="xor a0, a0, a1")
-
-%def op_xor_int_2addr():
-%  binop2addr(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit16():
-%  binopLit16(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit8():
-%  binopLit8(instr="xor a0, a0, a1")
-
-%def op_xor_long():
-%  binopWide(instr="xor a0, a0, a1")
-
-%def op_xor_long_2addr():
-%  binopWide2addr(instr="xor a0, a0, a1")
diff --git a/runtime/interpreter/mterp/mips64/array.S b/runtime/interpreter/mterp/mips64/array.S
deleted file mode 100644
index 9d97f0a..0000000
--- a/runtime/interpreter/mterp/mips64/array.S
+++ /dev/null
@@ -1,241 +0,0 @@
-%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $load   a2, $data_offset(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aget_boolean():
-%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    .extern artAGetObjectFromMterp
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    jal     artAGetObjectFromMterp      # (array, index)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    srl     a4, rINST, 8                # a4 <- AA
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    SET_VREG_OBJECT v0, a4              # vAA <- v0
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aget_short():
-%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
-    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a2, a4                # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    $store  a2, $data_offset(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput_boolean():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    .extern MterpAputObject
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpAputObject
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput_short():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    GET_VREG_WIDE a2, a4                # a2 <- vAA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    dsrl32  a2, a2, 0
-    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_array_length():
-    /*
-     * Return the length of an array.
-     */
-    srl     a1, rINST, 12               # a1 <- B
-    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a0, common_errNullObject    # yup, fail
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a3, a2                     # vB <- length
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_fill_array_data():
-    /* fill-array-data vAA, +BBBBBBBB */
-    .extern MterpFillArrayData
-    EXPORT_PC
-    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
-    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
-    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
-    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
-    jal     MterpFillArrayData          # (obj, payload)
-    beqzc   v0, MterpPossibleException  # exception?
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern $helper
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rSELF
-    jal     $helper
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_filled_new_array_range():
-%  op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    .extern MterpNewArray
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    move    a3, rSELF
-    jal     MterpNewArray
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/control_flow.S b/runtime/interpreter/mterp/mips64/control_flow.S
deleted file mode 100644
index 457b938..0000000
--- a/runtime/interpreter/mterp/mips64/control_flow.S
+++ /dev/null
@@ -1,217 +0,0 @@
-%def bincmp(condition=""):
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def zcmp(condition=""):
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    b${condition}zc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_goto():
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    srl     rINST, rINST, 8
-    seb     rINST, rINST                # rINST <- offset (sign-extended AA)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended AAAA)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    lh      rINST, 2(rPC)               # rINST <- aaaa (low)
-    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
-    ins     rINST, a1, 16, 16           # rINST <- offset (sign-extended AAAAaaaa)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_if_eq():
-%  bincmp(condition="eq")
-
-%def op_if_eqz():
-%  zcmp(condition="eq")
-
-%def op_if_ge():
-%  bincmp(condition="ge")
-
-%def op_if_gez():
-%  zcmp(condition="ge")
-
-%def op_if_gt():
-%  bincmp(condition="gt")
-
-%def op_if_gtz():
-%  zcmp(condition="gt")
-
-%def op_if_le():
-%  bincmp(condition="le")
-
-%def op_if_lez():
-%  zcmp(condition="le")
-
-%def op_if_lt():
-%  bincmp(condition="lt")
-
-%def op_if_ltz():
-%  zcmp(condition="lt")
-
-%def op_if_ne():
-%  bincmp(condition="ne")
-
-%def op_if_nez():
-%  zcmp(condition="ne")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBBBBBB */
-    .extern $func
-    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
-    GET_VREG a1, a3                     # a1 <- vAA
-    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
-    jal     $func                       # v0 <- code-unit branch offset
-    move    rINST, v0
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_return(instr="GET_VREG"):
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return (sign-extend), return-object (zero-extend)
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    $instr  a0, a2                      # a0 <- vAA
-    b       MterpReturn
-
-%def op_return_object():
-%  op_return(instr="GET_VREG_U")
-
-%def op_return_void():
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-%def op_return_void_no_barrier():
-    .extern MterpSuspendCheck
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-%def op_return_wide():
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vAA
-    b       MterpReturn
-
-%def op_sparse_switch():
-%  op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
-    beqzc   a0, common_errNullObject
-    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
-    b       MterpException
diff --git a/runtime/interpreter/mterp/mips64/floating_point.S b/runtime/interpreter/mterp/mips64/floating_point.S
deleted file mode 100644
index 1132a09..0000000
--- a/runtime/interpreter/mterp/mips64/floating_point.S
+++ /dev/null
@@ -1,382 +0,0 @@
-%def fbinop(instr=""):
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinop2addr(instr=""):
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinopWide(instr=""):
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinopWide2addr(instr=""):
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcmp(gt_bias=""):
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    cmp.eq.s f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.s f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcmpWide(gt_bias=""):
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    cmp.eq.d f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.d f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcvtFooter(suffix="", valreg=""):
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG$suffix $valreg, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcvtHeader(suffix="", valreg=""):
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG$suffix $valreg, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-%def op_add_double():
-%  fbinopWide(instr="add.d f0, f0, f1")
-
-%def op_add_double_2addr():
-%  fbinopWide2addr(instr="add.d f0, f0, f1")
-
-%def op_add_float():
-%  fbinop(instr="add.s f0, f0, f1")
-
-%def op_add_float_2addr():
-%  fbinop2addr(instr="add.s f0, f0, f1")
-
-%def op_cmpg_double():
-%  fcmpWide(gt_bias="1")
-
-%def op_cmpg_float():
-%  fcmp(gt_bias="1")
-
-%def op_cmpl_double():
-%  fcmpWide(gt_bias="0")
-
-%def op_cmpl_float():
-%  fcmp(gt_bias="0")
-
-%def op_div_double():
-%  fbinopWide(instr="div.d f0, f0, f1")
-
-%def op_div_double_2addr():
-%  fbinopWide2addr(instr="div.d f0, f0, f1")
-
-%def op_div_float():
-%  fbinop(instr="div.s f0, f0, f1")
-
-%def op_div_float_2addr():
-%  fbinop2addr(instr="div.s f0, f0, f1")
-
-%def op_double_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.s.d f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_double_to_int():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    trunc.w.d f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_double_to_long():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    trunc.l.d f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_float_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.d.s f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_float_to_int():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    trunc.w.s f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_float_to_long():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    trunc.l.s f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_int_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.d.w f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_int_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.s.w f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_long_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.d.l f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_long_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.s.l f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_mul_double():
-%  fbinopWide(instr="mul.d f0, f0, f1")
-
-%def op_mul_double_2addr():
-%  fbinopWide2addr(instr="mul.d f0, f0, f1")
-
-%def op_mul_float():
-%  fbinop(instr="mul.s f0, f0, f1")
-
-%def op_mul_float_2addr():
-%  fbinop2addr(instr="mul.s f0, f0, f1")
-
-%def op_neg_double():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    neg.d   f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_neg_float():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    neg.s   f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_rem_double():
-    /* rem-double vAA, vBB, vCC */
-    .extern fmod
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
-    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
-    jal     fmod                        # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_double_2addr():
-    /* rem-double/2addr vA, vB */
-    .extern fmod
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f12, a2             # f12 <- vA
-    GET_VREG_DOUBLE f13, a3             # f13 <- vB
-    jal     fmod                        # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_float():
-    /* rem-float vAA, vBB, vCC */
-    .extern fmodf
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f12, a2              # f12 <- vBB
-    GET_VREG_FLOAT f13, a3              # f13 <- vCC
-    jal     fmodf                       # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_float_2addr():
-    /* rem-float/2addr vA, vB */
-    .extern fmodf
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f12, a2              # f12 <- vA
-    GET_VREG_FLOAT f13, a3              # f13 <- vB
-    jal     fmodf                       # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_sub_double():
-%  fbinopWide(instr="sub.d f0, f0, f1")
-
-%def op_sub_double_2addr():
-%  fbinopWide2addr(instr="sub.d f0, f0, f1")
-
-%def op_sub_float():
-%  fbinop(instr="sub.s f0, f0, f1")
-
-%def op_sub_float_2addr():
-%  fbinop2addr(instr="sub.s f0, f0, f1")
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
deleted file mode 100644
index c2967cf..0000000
--- a/runtime/interpreter/mterp/mips64/invoke.S
+++ /dev/null
@@ -1,110 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern $helper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     $helper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     $helper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 4
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-%def op_invoke_custom():
-%  invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-%  invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-%  invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-%  invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-%  invoke(helper="MterpInvokeInterface")
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-%  invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-%  invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-%  invoke(helper="MterpInvokeStatic")
-
-%def op_invoke_static_range():
-%  invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-%  invoke(helper="MterpInvokeSuper")
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-%  invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-%  invoke(helper="MterpInvokeVirtual")
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_quick():
-%  invoke(helper="MterpInvokeVirtualQuick")
-
-%def op_invoke_virtual_range():
-%  invoke(helper="MterpInvokeVirtualRange")
-
-%def op_invoke_virtual_range_quick():
-%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips64/main.S b/runtime/interpreter/mterp/mips64/main.S
deleted file mode 100644
index ac3a4a3..0000000
--- a/runtime/interpreter/mterp/mips64/main.S
+++ /dev/null
@@ -1,745 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $$0  /* always zero */
-#define AT   $$at /* assembler temp */
-#define v0   $$2  /* return value */
-#define v1   $$3
-#define a0   $$4  /* argument registers */
-#define a1   $$5
-#define a2   $$6
-#define a3   $$7
-#define a4   $$8  /* expanded register arguments */
-#define a5   $$9
-#define a6   $$10
-#define a7   $$11
-#define ta0  $$8  /* alias */
-#define ta1  $$9
-#define ta2  $$10
-#define ta3  $$11
-#define t0   $$12 /* temp registers (not saved across subroutine calls) */
-#define t1   $$13
-#define t2   $$14
-#define t3   $$15
-
-#define s0   $$16 /* saved across subroutine calls (callee saved) */
-#define s1   $$17
-#define s2   $$18
-#define s3   $$19
-#define s4   $$20
-#define s5   $$21
-#define s6   $$22
-#define s7   $$23
-#define t8   $$24 /* two more temp registers */
-#define t9   $$25
-#define k0   $$26 /* kernel temporary */
-#define k1   $$27
-#define gp   $$28 /* global pointer */
-#define sp   $$29 /* stack pointer */
-#define s8   $$30 /* one more callee saved */
-#define ra   $$31 /* return address */
-
-#define f0   $$f0
-#define f1   $$f1
-#define f2   $$f2
-#define f3   $$f3
-#define f12  $$f12
-#define f13  $$f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
-    bgec    \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
-    bltc    \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  s0  rPC       interpreted program counter, used for fetching instructions
-  s1  rFP       interpreted frame pointer, used for accessing locals and args
-  s2  rSELF     self (Thread) pointer
-  s3  rINST     first 16-bit code unit of current instruction
-  s4  rIBASE    interpreted instruction base pointer, used for computed goto
-  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  s6  rPROFILE  jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      s0
-#define CFI_DEX  16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP  4   // DWARF register number of the first argument register (a0).
-#define rFP      s1
-#define rSELF    s2
-#define rINST    s3
-#define rIBASE   s4
-#define rREFS    s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    lhu     rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-    daddu   rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    daddu   rPC, rPC, \reg
-    FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ADVANCE \count
-    FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    lhu     rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
-    .set noat
-    sll     AT, \reg, 7
-    daddu   AT, rIBASE, AT
-    jic     AT, 0
-    .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_U reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwu     \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    lw      AT, 4(AT)
-    dinsu   \reg, AT, 32, 32
-    .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    lw      AT, 4(AT)
-    mthc1   AT, \reg
-    .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    drotr32 \reg, \reg, 0
-    sw      \reg, 4(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    mfhc1   \vreg, \reg
-    sw      \vreg, 4(AT)
-    .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE      80    /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN             0x80000000
-#define INT_MIN_AS_FLOAT    0xCF000000
-#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
-#define LONG_MIN            0x8000000000000000
-#define LONG_MIN_AS_FLOAT   0xDF000000
-#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
-    .set    reorder
-
-    .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
-    .balign 16
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-ExecuteMterpImpl:
-    .cfi_startproc
-    .cpsetup t9, t8, ExecuteMterpImpl
-
-    .cfi_def_cfa sp, 0
-    daddu   sp, sp, -STACK_SIZE
-    .cfi_adjust_cfa_offset STACK_SIZE
-
-    sd      t8, STACK_OFFSET_GP(sp)
-    .cfi_rel_offset 28, STACK_OFFSET_GP
-    sd      ra, STACK_OFFSET_RA(sp)
-    .cfi_rel_offset 31, STACK_OFFSET_RA
-
-    sd      s0, STACK_OFFSET_S0(sp)
-    .cfi_rel_offset 16, STACK_OFFSET_S0
-    sd      s1, STACK_OFFSET_S1(sp)
-    .cfi_rel_offset 17, STACK_OFFSET_S1
-    sd      s2, STACK_OFFSET_S2(sp)
-    .cfi_rel_offset 18, STACK_OFFSET_S2
-    sd      s3, STACK_OFFSET_S3(sp)
-    .cfi_rel_offset 19, STACK_OFFSET_S3
-    sd      s4, STACK_OFFSET_S4(sp)
-    .cfi_rel_offset 20, STACK_OFFSET_S4
-    sd      s5, STACK_OFFSET_S5(sp)
-    .cfi_rel_offset 21, STACK_OFFSET_S5
-    sd      s6, STACK_OFFSET_S6(sp)
-    .cfi_rel_offset 22, STACK_OFFSET_S6
-
-    /* Remember the return register */
-    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sd      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
-    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    dlsa    rREFS, v0, rFP, 2
-    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
-    dlsa    rPC, v0, a1, 1
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* Set up for backwards branches & osr profiling */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    jal     MterpSetUpHotnessCountdown
-    move    rPROFILE, v0                # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /* NOTE: no fallthrough */
-
-%def dchecks_before_helper():
-    // Call C++ to do debug checks and return to the handler using tail call.
-    .extern MterpCheckBefore
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-%def opcode_pre():
-%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
-    #if !defined(NDEBUG)
-    jal    SYMBOL(mterp_dchecks_before_helper)
-    #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
-    b       MterpFallback
-
-%def helpers():
-%  pass
-
-%def footer():
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
-    .extern MterpLogDivideByZeroException
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogDivideByZeroException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogArrayIndexException
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogArrayIndexException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogNullObjectException
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogNullObjectException
-#endif
-    b       MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-    .extern MterpHandleException
-    .extern MterpShouldSwitchInterpreters
-MterpException:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpHandleException                    # (self, shadow_frame)
-    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
-    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lwu     a1, OFF_FP_DEX_PC(rFP)
-    REFRESH_IBASE
-    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-    li      v0, JIT_CHECK_OSR
-    beqc    rPROFILE, v0, .L_osr_check
-    bltc    rPROFILE, v0, .L_resume_backward_branch
-    dsubu   rPROFILE, 1
-    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnezc   ra, .L_suspend_request_pending
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    move    a0, rSELF
-    jal     MterpSuspendCheck           # (self)
-    bnezc   v0, MterpFallback
-    REFRESH_IBASE                       # might have changed during suspend
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_no_count_backwards:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bnec    rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beqc    rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    ld      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST                               # rINST contains offset
-    jal     MterpLogOSR
-#endif
-    li      v0, 1                                   # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-    .extern MterpLogFallback
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogFallback
-#endif
-MterpCommonFallback:
-    li      v0, 0                                   # signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA.  Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                                   # signal return to caller.
-    b       MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
-    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sd      a0, 0(a2)
-    li      v0, 1                                   # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-    ld      s6, STACK_OFFSET_S6(sp)
-    .cfi_restore 22
-    ld      s5, STACK_OFFSET_S5(sp)
-    .cfi_restore 21
-    ld      s4, STACK_OFFSET_S4(sp)
-    .cfi_restore 20
-    ld      s3, STACK_OFFSET_S3(sp)
-    .cfi_restore 19
-    ld      s2, STACK_OFFSET_S2(sp)
-    .cfi_restore 18
-    ld      s1, STACK_OFFSET_S1(sp)
-    .cfi_restore 17
-    ld      s0, STACK_OFFSET_S0(sp)
-    .cfi_restore 16
-
-    ld      ra, STACK_OFFSET_RA(sp)
-    .cfi_restore 31
-
-    ld      t8, STACK_OFFSET_GP(sp)
-    .cpreturn
-    .cfi_restore 28
-
-    .set    noreorder
-    jr      ra
-    daddu   sp, sp, STACK_SIZE
-    .cfi_adjust_cfa_offset -STACK_SIZE
-
-    .cfi_endproc
-    .set    reorder
-    .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
-%def instruction_end():
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-%def opcode_start():
-%  pass
-%def opcode_end():
-%  pass
-%def helper_start(name):
-    ENTRY ${name}
-%def helper_end(name):
-    END ${name}
diff --git a/runtime/interpreter/mterp/mips64/object.S b/runtime/interpreter/mterp/mips64/object.S
deleted file mode 100644
index a5a2b3d..0000000
--- a/runtime/interpreter/mterp/mips64/object.S
+++ /dev/null
@@ -1,262 +0,0 @@
-%def field(helper=""):
-TODO
-
-%def op_check_cast():
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    .extern MterpCheckCast
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpCheckCast              # (index, &obj, method, self)
-    PREFETCH_INST 2
-    bnez    v0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-%  field(helper=helper)
-
-%def op_iget_boolean():
-%  op_iget(helper="MterpIGetU8")
-
-%def op_iget_boolean_quick():
-%  op_iget_quick(load="lbu")
-
-%def op_iget_byte():
-%  op_iget(helper="MterpIGetI8")
-
-%def op_iget_byte_quick():
-%  op_iget_quick(load="lb")
-
-%def op_iget_char():
-%  op_iget(helper="MterpIGetU16")
-
-%def op_iget_char_quick():
-%  op_iget_quick(load="lhu")
-
-%def op_iget_object():
-%  op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_object_quick():
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    .extern artIGetObjectFromMterp
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- object we're operating on
-    jal     artIGetObjectFromMterp      # (obj, offset)
-    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a3, MterpPossibleException  # bail out
-    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget_quick(load="lw"):
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    $load   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget_short():
-%  op_iget(helper="MterpIGetI16")
-
-%def op_iget_short_quick():
-%  op_iget_quick(load="lh")
-
-%def op_iget_wide():
-%  op_iget(helper="MterpIGetU64")
-
-%def op_iget_wide_quick():
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a4, 2(rPC)                  # a4 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    daddu   a4, a3, a4                  # create direct pointer
-    lw      a0, 0(a4)
-    lw      a1, 4(a4)
-    dinsu   a0, a1, 32, 32
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG_WIDE a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_instance_of():
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    .extern MterpInstanceOf
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- CCCC
-    srl     a1, rINST, 12               # a1 <- B
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpInstanceOf             # (index, &obj, method, self)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    ADVANCE 2                           # advance rPC
-    SET_VREG v0, a2                     # vA <- v0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-%  field(helper=helper)
-
-%def op_iput_boolean():
-%  op_iput(helper="MterpIPutU8")
-
-%def op_iput_boolean_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_byte():
-%  op_iput(helper="MterpIPutI8")
-
-%def op_iput_byte_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_char():
-%  op_iput(helper="MterpIPutU16")
-
-%def op_iput_char_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_object():
-%  op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_object_quick():
-    .extern MterpIputObjectQuick
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpIputObjectQuick
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput_quick(store="sw"):
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    $store  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput_short():
-%  op_iput(helper="MterpIPutI16")
-
-%def op_iput_short_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_wide():
-%  op_iput(helper="MterpIPutU64")
-
-%def op_iput_wide_quick():
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a3, 2(rPC)                  # a3 <- field byte offset
-    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
-    ext     a0, rINST, 8, 4             # a0 <- A
-    beqz    a2, common_errNullObject    # object was null
-    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a2, a3                  # create a direct pointer
-    sw      a0, 0(a1)
-    dsrl32  a0, a0, 0
-    sw      a0, 4(a1)
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_new_instance():
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    .extern MterpNewInstance
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rSELF
-    move    a2, rINST
-    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-%  field(helper=helper)
-
-%def op_sget_boolean():
-%  op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-%  op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-%  op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-%  op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-%  op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-%  op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-%  field(helper=helper)
-
-%def op_sput_boolean():
-%  op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-%  op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-%  op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-%  op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-%  op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips64/other.S b/runtime/interpreter/mterp/mips64/other.S
deleted file mode 100644
index 789efee..0000000
--- a/runtime/interpreter/mterp/mips64/other.S
+++ /dev/null
@@ -1,355 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     $helper                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-%def op_const():
-    /* const vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_16():
-    /* const/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_4():
-    /* const/4 vA, #+B */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    seh     a0, rINST                   # sign extend B in rINST
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    sra     a0, a0, 12                  # shift B into its final position
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- +B
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_class():
-%  const(helper="MterpConstClass")
-
-%def op_const_high16():
-    /* const/high16 vAA, #+BBBB0000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    sll     a0, a0, 16                  # a0 <- BBBB0000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB0000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_method_handle():
-%  const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-%  const(helper="MterpConstMethodType")
-
-%def op_const_string():
-%  const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
-    /* const/string vAA, String//BBBBBBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
-    srl     a1, rINST, 8                # a1 <- AA
-    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 3                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide():
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    srl     a4, rINST, 8                # a4 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
-    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
-    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
-    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
-    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_16():
-    /* const-wide/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_32():
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_high16():
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_monitor_enter():
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    .extern artLockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artLockObjectFromCode
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_monitor_exit():
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    .extern artUnlockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move(is_object="0"):
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_16(is_object="0"):
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAAAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_exception():
-    /* move-exception vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_from16(is_object="0"):
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_object():
-%  op_move(is_object="1")
-
-%def op_move_object_16():
-%  op_move_16(is_object="1")
-
-%def op_move_object_from16():
-%  op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    lw      a0, 0(a0)                   # a0 <- result.i
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- result
-    .else
-    SET_VREG a0, a2                     # vAA <- result
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_result_object():
-%  op_move_result(is_object="1")
-
-%def op_move_result_wide():
-    /* for: move-result-wide */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    ld      a0, 0(a0)                   # a0 <- result.j
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide():
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    ext     a2, rINST, 8, 4             # a2 <- A
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide_16():
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide_from16():
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_nop():
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_unused_3e():
-%  unused()
-
-%def op_unused_3f():
-%  unused()
-
-%def op_unused_40():
-%  unused()
-
-%def op_unused_41():
-%  unused()
-
-%def op_unused_42():
-%  unused()
-
-%def op_unused_43():
-%  unused()
-
-%def op_unused_79():
-%  unused()
-
-%def op_unused_7a():
-%  unused()
-
-%def op_unused_f3():
-%  unused()
-
-%def op_unused_f4():
-%  unused()
-
-%def op_unused_f5():
-%  unused()
-
-%def op_unused_f6():
-%  unused()
-
-%def op_unused_f7():
-%  unused()
-
-%def op_unused_f8():
-%  unused()
-
-%def op_unused_f9():
-%  unused()
-
-%def op_unused_fc():
-%  unused()
-
-%def op_unused_fd():
-%  unused()