Remove MIPS support from runtime/.
Test: aosp_taimen-userdebug boots.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 147346243
Change-Id: If1fc8be94caa69f734438d7a1f4c715addfd8876
diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py
index 71c5883..186a994 100755
--- a/build/apex/art_apex_test.py
+++ b/build/apex/art_apex_test.py
@@ -890,8 +890,6 @@
self._checker.check_art_test_executable('indirect_reference_table_test')
self._checker.check_art_test_executable('instruction_set_features_arm64_test')
self._checker.check_art_test_executable('instruction_set_features_arm_test')
- self._checker.check_art_test_executable('instruction_set_features_mips64_test')
- self._checker.check_art_test_executable('instruction_set_features_mips_test')
self._checker.check_art_test_executable('instruction_set_features_test')
self._checker.check_art_test_executable('instruction_set_features_x86_64_test')
self._checker.check_art_test_executable('instruction_set_features_x86_test')
diff --git a/build/art.go b/build/art.go
index e354f61..353a682 100644
--- a/build/art.go
+++ b/build/art.go
@@ -108,11 +108,6 @@
asflags = append(asflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
}
- if ctx.Config().IsEnvTrue("ART_MIPS32_CHECK_ALIGNMENT") {
- // Enable the use of MIPS32 CHECK_ALIGNMENT macro for debugging purposes
- asflags = append(asflags, "-DART_MIPS32_CHECK_ALIGNMENT")
- }
-
if !ctx.Config().IsEnvFalse("USE_D8_DESUGAR") {
cflags = append(cflags, "-DUSE_D8_DESUGAR=1")
}
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 938cff7..64fd6c0 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -19,8 +19,8 @@
#include <memory>
#include <random>
+#include "bit_utils.h"
#include "common_art_test.h"
-#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
#include "logging.h"
#include "memory_tool.h"
#include "mman.h"
@@ -503,10 +503,6 @@
}
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
- // Some MIPS32 hardware (namely the Creator Ci20 development board)
- // cannot allocate in the 2GB-4GB region.
- TEST_DISABLED_FOR_MIPS();
-
// This test does not work under AddressSanitizer.
// Historical note: This test did not work under Valgrind either.
TEST_DISABLED_FOR_MEMORY_TOOL();
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 6e8aac7..22931dd 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -255,10 +255,6 @@
"arch/arm/registers_arm.cc",
"arch/arm64/instruction_set_features_arm64.cc",
"arch/arm64/registers_arm64.cc",
- "arch/mips/instruction_set_features_mips.cc",
- "arch/mips/registers_mips.cc",
- "arch/mips64/instruction_set_features_mips64.cc",
- "arch/mips64/registers_mips64.cc",
"arch/x86/instruction_set_features_x86.cc",
"arch/x86/registers_x86.cc",
"arch/x86_64/registers_x86_64.cc",
@@ -357,33 +353,6 @@
asflags: ["-DMTERP_USE_AVX"],
},
},
- mips: {
- srcs: [
- "interpreter/mterp/mterp.cc",
- "interpreter/mterp/nterp_stub.cc",
- "arch/mips/context_mips.cc",
- "arch/mips/entrypoints_init_mips.cc",
- "arch/mips/jni_entrypoints_mips.S",
- "arch/mips/memcmp16_mips.S",
- "arch/mips/quick_entrypoints_mips.S",
- "arch/mips/thread_mips.cc",
- "arch/mips/fault_handler_mips.cc",
- ],
- },
- mips64: {
- srcs: [
- "interpreter/mterp/mterp.cc",
- "interpreter/mterp/nterp_stub.cc",
- "arch/mips64/context_mips64.cc",
- "arch/mips64/entrypoints_init_mips64.cc",
- "arch/mips64/jni_entrypoints_mips64.S",
- "arch/mips64/memcmp16_mips64.S",
- "arch/mips64/quick_entrypoints_mips64.S",
- "arch/mips64/thread_mips64.cc",
- "monitor_pool.cc",
- "arch/mips64/fault_handler_mips64.cc",
- ],
- },
},
target: {
android: {
@@ -637,8 +606,6 @@
"arch/stub_test.cc",
"arch/arm/instruction_set_features_arm_test.cc",
"arch/arm64/instruction_set_features_arm64_test.cc",
- "arch/mips/instruction_set_features_mips_test.cc",
- "arch/mips64/instruction_set_features_mips64_test.cc",
"arch/x86/instruction_set_features_x86_test.cc",
"arch/x86_64/instruction_set_features_x86_64_test.cc",
"barrier_test.cc",
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 12ad84b..23213d9 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -89,48 +89,6 @@
#undef BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET
} // namespace arm64
-namespace mips {
-#include "arch/mips/asm_support_mips.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
-static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
-#undef FRAME_SIZE_SAVE_REFS_ONLY
-static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
-#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
-static constexpr size_t kFrameSizeSaveEverythingForClinit = FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT
-static constexpr size_t kFrameSizeSaveEverythingForSuspendCheck =
- FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK
-static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
-#undef FRAME_SIZE_SAVE_EVERYTHING
-#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
-#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
-} // namespace mips
-
-namespace mips64 {
-#include "arch/mips64/asm_support_mips64.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
-static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
-#undef FRAME_SIZE_SAVE_REFS_ONLY
-static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
-#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
-static constexpr size_t kFrameSizeSaveEverythingForClinit = FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT
-static constexpr size_t kFrameSizeSaveEverythingForSuspendCheck =
- FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK
-static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
-#undef FRAME_SIZE_SAVE_EVERYTHING
-#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
-#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
-} // namespace mips64
-
namespace x86 {
#include "arch/x86/asm_support_x86.h"
static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
@@ -183,8 +141,6 @@
}
TEST_ARCH(Arm, arm)
TEST_ARCH(Arm64, arm64)
-TEST_ARCH(Mips, mips)
-TEST_ARCH(Mips64, mips64)
TEST_ARCH(X86, x86)
TEST_ARCH(X86_64, x86_64)
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 22f0c28..8ff2aad 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -83,9 +83,9 @@
// arguments, only define ReadBarrierMarkRegX entrypoints for the
// first 30 registers. This limitation is not a problem on other
// supported architectures (ARM, x86 and x86-64) either, as they
- // have less core registers (resp. 16, 8 and 16). (We may have to
- // revise that design choice if read barrier support is added for
- // MIPS and/or MIPS64.)
+ // have less core registers (resp. 16, 8 and 16).
+ // TODO: ARM/ARM64 now use introspection entrypoints. Consider
+ // reducing the number of entrypoints to those needed by x86-64.
qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_reg00 : nullptr;
qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
diff --git a/runtime/arch/context-inl.h b/runtime/arch/context-inl.h
index ddcbbb1..cac7c43 100644
--- a/runtime/arch/context-inl.h
+++ b/runtime/arch/context-inl.h
@@ -28,12 +28,6 @@
#elif defined(__aarch64__)
#include "arm64/context_arm64.h"
#define RUNTIME_CONTEXT_TYPE arm64::Arm64Context
-#elif defined(__mips__) && !defined(__LP64__)
-#include "mips/context_mips.h"
-#define RUNTIME_CONTEXT_TYPE mips::MipsContext
-#elif defined(__mips__) && defined(__LP64__)
-#include "mips64/context_mips64.h"
-#define RUNTIME_CONTEXT_TYPE mips64::Mips64Context
#elif defined(__i386__)
#include "x86/context_x86.h"
#define RUNTIME_CONTEXT_TYPE x86::X86Context
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index c5c2d31..2581f6e 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -28,8 +28,6 @@
#include "arm/instruction_set_features_arm.h"
#include "arm64/instruction_set_features_arm64.h"
-#include "mips/instruction_set_features_mips.h"
-#include "mips64/instruction_set_features_mips64.h"
#include "x86/instruction_set_features_x86.h"
#include "x86_64/instruction_set_features_x86_64.h"
@@ -43,16 +41,12 @@
return ArmInstructionSetFeatures::FromVariant(variant, error_msg);
case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
- case InstructionSet::kMips:
- return MipsInstructionSetFeatures::FromVariant(variant, error_msg);
- case InstructionSet::kMips64:
- return Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
case InstructionSet::kX86:
return X86InstructionSetFeatures::FromVariant(variant, error_msg);
case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
- case InstructionSet::kNone:
+ default:
break;
}
UNIMPLEMENTED(FATAL) << isa;
@@ -70,12 +64,6 @@
case InstructionSet::kArm64:
result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
break;
- case InstructionSet::kMips:
- result = MipsInstructionSetFeatures::FromBitmap(bitmap);
- break;
- case InstructionSet::kMips64:
- result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
- break;
case InstructionSet::kX86:
result = X86InstructionSetFeatures::FromBitmap(bitmap);
break;
@@ -83,7 +71,6 @@
result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
break;
- case InstructionSet::kNone:
default:
UNIMPLEMENTED(FATAL) << isa;
UNREACHABLE();
@@ -99,16 +86,12 @@
return ArmInstructionSetFeatures::FromCppDefines();
case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromCppDefines();
- case InstructionSet::kMips:
- return MipsInstructionSetFeatures::FromCppDefines();
- case InstructionSet::kMips64:
- return Mips64InstructionSetFeatures::FromCppDefines();
case InstructionSet::kX86:
return X86InstructionSetFeatures::FromCppDefines();
case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromCppDefines();
- case InstructionSet::kNone:
+ default:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -133,16 +116,12 @@
return ArmInstructionSetFeatures::FromCpuInfo();
case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromCpuInfo();
- case InstructionSet::kMips:
- return MipsInstructionSetFeatures::FromCpuInfo();
- case InstructionSet::kMips64:
- return Mips64InstructionSetFeatures::FromCpuInfo();
case InstructionSet::kX86:
return X86InstructionSetFeatures::FromCpuInfo();
case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromCpuInfo();
- case InstructionSet::kNone:
+ default:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -156,16 +135,12 @@
return ArmInstructionSetFeatures::FromHwcap();
case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromHwcap();
- case InstructionSet::kMips:
- return MipsInstructionSetFeatures::FromHwcap();
- case InstructionSet::kMips64:
- return Mips64InstructionSetFeatures::FromHwcap();
case InstructionSet::kX86:
return X86InstructionSetFeatures::FromHwcap();
case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromHwcap();
- case InstructionSet::kNone:
+ default:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -179,16 +154,12 @@
return ArmInstructionSetFeatures::FromAssembly();
case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromAssembly();
- case InstructionSet::kMips:
- return MipsInstructionSetFeatures::FromAssembly();
- case InstructionSet::kMips64:
- return Mips64InstructionSetFeatures::FromAssembly();
case InstructionSet::kX86:
return X86InstructionSetFeatures::FromAssembly();
case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromAssembly();
- case InstructionSet::kNone:
+ default:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -259,16 +230,6 @@
return down_cast<const Arm64InstructionSetFeatures*>(this);
}
-const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
- DCHECK_EQ(InstructionSet::kMips, GetInstructionSet());
- return down_cast<const MipsInstructionSetFeatures*>(this);
-}
-
-const Mips64InstructionSetFeatures* InstructionSetFeatures::AsMips64InstructionSetFeatures() const {
- DCHECK_EQ(InstructionSet::kMips64, GetInstructionSet());
- return down_cast<const Mips64InstructionSetFeatures*>(this);
-}
-
const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
DCHECK(InstructionSet::kX86 == GetInstructionSet() ||
InstructionSet::kX86_64 == GetInstructionSet());
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index 9222a7b..78ce580 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -28,8 +28,6 @@
class ArmInstructionSetFeatures;
class Arm64InstructionSetFeatures;
-class MipsInstructionSetFeatures;
-class Mips64InstructionSetFeatures;
class X86InstructionSetFeatures;
class X86_64InstructionSetFeatures;
@@ -114,12 +112,6 @@
// Down cast this Arm64InstructionFeatures.
const Arm64InstructionSetFeatures* AsArm64InstructionSetFeatures() const;
- // Down cast this MipsInstructionFeatures.
- const MipsInstructionSetFeatures* AsMipsInstructionSetFeatures() const;
-
- // Down cast this Mips64InstructionFeatures.
- const Mips64InstructionSetFeatures* AsMips64InstructionSetFeatures() const;
-
// Down cast this X86InstructionFeatures.
const X86InstructionSetFeatures* AsX86InstructionSetFeatures() const;
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index b051a1c..0226c4e 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -30,7 +30,7 @@
//
// In both cases, MemCmp16 is declared.
-#if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || defined(__i386__) || defined(__x86_64__)
+#if defined(__aarch64__) || defined(__arm__) || defined(__i386__) || defined(__x86_64__)
extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count);
#define MemCmp16 __memcmp16
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
deleted file mode 100644
index fa51059..0000000
--- a/runtime/arch/mips/asm_support_mips.S
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
-#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
-
-#include "asm_support_mips.h"
-
-// Define special registers.
-
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-
- // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
- .type \name, %function
- .global \name
- // Cache alignment for function entry.
- .balign 16
-\name:
- .cfi_startproc
- // Ensure we get a sane starting CFA.
- .cfi_def_cfa $sp, \cfa_offset
-.endm
-
- // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP name
- ENTRY_NO_GP_CUSTOM_CFA \name, 0
-.endm
-
- // Declare a function called name, sets up $gp.
-.macro ENTRY name
- ENTRY_NO_GP \name
- // Load $gp. We expect that ".set noreorder" is in effect.
- .cpload $t9
- // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-.macro UNIMPLEMENTED name
- ENTRY \name
- break
- break
- END \name
-.endm
-
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
- /* mips32r5 & mips32r6 have mthc1 op, and have 64-bit fp regs,
- and in FPXX abi we avoid referring to odd-numbered fp regs */
-
-/* LDu: Load 64-bit floating-point value to float reg feven,
- from unaligned (mod-4-aligned) mem location disp(base) */
-.macro LDu feven,fodd,disp,base,temp
- l.s \feven, \disp(\base)
- lw \temp, \disp+4(\base)
- mthc1 \temp, \feven
-.endm
-
-/* SDu: Store 64-bit floating-point value from float reg feven,
- to unaligned (mod-4-aligned) mem location disp(base) */
-.macro SDu feven,fodd,disp,base,temp
- mfhc1 \temp, \feven
- s.s \feven, \disp(\base)
- sw \temp, \disp+4(\base)
-.endm
-
-/* MTD: Move double, from general regpair (reven,rodd)
- to float regpair (feven,fodd) */
-.macro MTD reven,rodd,feven,fodd
- mtc1 \reven, \feven
- mthc1 \rodd, \feven
-.endm
-
-#else
- /* mips32r1 has no mthc1 op;
- mips32r1 and mips32r2 use 32-bit floating point register mode (FR=0),
- and always hold doubles as (feven, fodd) fp reg pair */
-
-.macro LDu feven,fodd,disp,base,temp
- l.s \feven, \disp(\base)
- l.s \fodd, \disp+4(\base)
-.endm
-
-.macro SDu feven,fodd,disp,base,temp
- s.s \feven, \disp(\base)
- s.s \fodd, \disp+4(\base)
-.endm
-
-.macro MTD reven,rodd,feven,fodd
- mtc1 \reven, \feven
- mtc1 \rodd, \fodd
-.endm
-
-#endif /* mips_isa_rev */
-
-// Macros to poison (negate) the reference for heap poisoning.
-.macro POISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
-#endif // USE_HEAP_POISONING
-.endm
-
-// Macros to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
-#endif // USE_HEAP_POISONING
-.endm
-
-// Byte size of the instructions (un)poisoning heap references.
-#ifdef USE_HEAP_POISONING
-#define HEAP_POISON_INSTR_SIZE 4
-#else
-#define HEAP_POISON_INSTR_SIZE 0
-#endif // USE_HEAP_POISONING
-
-// Based on contents of creg select the minimum integer
-// At the end of the macro the original value of creg is lost
-.macro MINint dreg,rreg,sreg,creg
- .set push
- .set noat
-#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
- .ifc \dreg, \rreg
- selnez \dreg, \rreg, \creg
- seleqz \creg, \sreg, \creg
- .else
- seleqz \dreg, \sreg, \creg
- selnez \creg, \rreg, \creg
- .endif
- or \dreg, \dreg, \creg
-#else
- movn \dreg, \rreg, \creg
- movz \dreg, \sreg, \creg
-#endif
- .set pop
-.endm
-
-// Find minimum of two signed registers
-.macro MINs dreg,rreg,sreg
- .set push
- .set noat
- slt $at, \rreg, \sreg
- MINint \dreg, \rreg, \sreg, $at
- .set pop
-.endm
-
-// Find minimum of two unsigned registers
-.macro MINu dreg,rreg,sreg
- .set push
- .set noat
- sltu $at, \rreg, \sreg
- MINint \dreg, \rreg, \sreg, $at
- .set pop
-.endm
-
-// This utility macro is used to check whether the address contained in
-// a register is suitably aligned. Default usage is confirm that the
-// address stored in $sp is a multiple of 16. It can be used for other
-// alignments, and for other base address registers, if needed.
-//
-// Enable this macro by running the shell command:
-//
-// export ART_MIPS32_CHECK_ALIGNMENT=true
-//
-// NOTE: The value of alignment must be a power of 2, and must fit in an
-// unsigned 15-bit integer. The macro won't behave as expected if these
-// conditions aren't met.
-//
-.macro CHECK_ALIGNMENT ba=$sp, tmp=$at, alignment=16
-#ifdef ART_MIPS32_CHECK_ALIGNMENT
- .set push
- .set noat
- .set noreorder
- andi \tmp, \ba, \alignment-1
- beqz \tmp, .+12 # Skip break instruction if base address register (ba) is aligned
- nop
- break
- .set pop
-#endif
-.endm
-
-#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
deleted file mode 100644
index bec5238..0000000
--- a/runtime/arch/mips/asm_support_mips.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
-
-#include "asm_support.h"
-
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 112
-#define FRAME_SIZE_SAVE_REFS_ONLY 48
-#define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
-#define FRAME_SIZE_SAVE_EVERYTHING 256
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
-
-// &art_quick_read_barrier_mark_introspection is the first of many entry points:
-// 21 entry points for long field offsets, large array indices and variable array indices
-// (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
-// 21 entry points for short field offsets and small array indices
-// (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
-// 21 entry points for GC roots
-// (see macro BRB_GC_ROOT_ENTRY)
-
-// There are as many entry points of each kind as there are registers that
-// can hold a reference: V0-V1, A0-A3, T0-T7, S2-S8.
-#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 21
-
-#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4) // 8 instructions in
- // BRB_FIELD_*_OFFSET_ENTRY.
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
- (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4) // 4 instructions in BRB_GC_ROOT_ENTRY.
-
-#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/callee_save_frame_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
deleted file mode 100644
index 84ce209..0000000
--- a/runtime/arch/mips/callee_save_frame_mips.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_mips.h"
-#include "runtime_globals.h"
-
-namespace art {
-namespace mips {
-
-static constexpr uint32_t kMipsCalleeSaveAlwaysSpills =
- (1u << art::mips::RA);
-static constexpr uint32_t kMipsCalleeSaveRefSpills =
- (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) | (1 << art::mips::S5) |
- (1 << art::mips::S6) | (1 << art::mips::S7) | (1 << art::mips::GP) | (1 << art::mips::FP);
-static constexpr uint32_t kMipsCalleeSaveArgSpills =
- (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) | (1 << art::mips::T0) |
- (1 << art::mips::T1);
-// We want to save all floating point register pairs at addresses
-// which are multiples of 8 so that we can eliminate use of the
-// SDu/LDu macros by using sdc1/ldc1 to store/load floating
-// register values using a single instruction. Because integer
-// registers are stored at the top of the frame, to achieve having
-// the floating point register pairs aligned on multiples of 8 the
-// number of integer registers saved must be even. Previously, the
-// only case in which we saved floating point registers beneath an
-// odd number of integer registers was when "type" is
-// CalleeSaveType::kSaveAllCalleeSaves. (There are other cases in
-// which an odd number of integer registers are saved but those
-// cases don't save any floating point registers. If no floating
-// point registers are saved we don't care if the number of integer
-// registers saved is odd or even). To save an even number of
-// integer registers in this particular case we add the ZERO
-// register to the list of registers which get saved.
-static constexpr uint32_t kMipsCalleeSaveAllSpills =
- (1 << art::mips::ZERO) | (1 << art::mips::S0) | (1 << art::mips::S1);
-static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
- (1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
- (1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
- (1 << art::mips::T0) | (1 << art::mips::T1) | (1 << art::mips::T2) | (1 << art::mips::T3) |
- (1 << art::mips::T4) | (1 << art::mips::T5) | (1 << art::mips::T6) | (1 << art::mips::T7) |
- (1 << art::mips::S0) | (1 << art::mips::S1) | (1 << art::mips::T8) | (1 << art::mips::T9);
-
-static constexpr uint32_t kMipsCalleeSaveFpAlwaysSpills = 0;
-static constexpr uint32_t kMipsCalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kMipsCalleeSaveFpArgSpills =
- (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
- (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
- (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19);
-static constexpr uint32_t kMipsCalleeSaveAllFPSpills =
- (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
- (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
- (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
- (1 << art::mips::F0) | (1 << art::mips::F1) | (1 << art::mips::F2) | (1 << art::mips::F3) |
- (1 << art::mips::F4) | (1 << art::mips::F5) | (1 << art::mips::F6) | (1 << art::mips::F7) |
- (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
- (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
- (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19) |
- (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
- (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
- (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-
-class MipsCalleeSaveFrame {
- public:
- static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
- }
-
- static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
- }
-
- static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
- POPCOUNT(GetFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
- }
-
- static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
- }
-
- static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) -
- (POPCOUNT(GetCoreSpills(type)) +
- POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMipsPointerSize);
- }
-
- static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) -
- POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMipsPointerSize);
- }
-
- static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) - static_cast<size_t>(kMipsPointerSize);
- }
-};
-
-} // namespace mips
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
deleted file mode 100644
index 3f362de..0000000
--- a/runtime/arch/mips/context_mips.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "context_mips.h"
-
-#include "base/bit_utils.h"
-#include "base/bit_utils_iterator.h"
-#include "quick/quick_method_frame_info.h"
-
-namespace art {
-namespace mips {
-
-static constexpr uint32_t gZero = 0;
-
-void MipsContext::Reset() {
- std::fill_n(gprs_, arraysize(gprs_), nullptr);
- std::fill_n(fprs_, arraysize(fprs_), nullptr);
- gprs_[SP] = &sp_;
- gprs_[T9] = &t9_;
- gprs_[A0] = &arg0_;
- // Initialize registers with easy to spot debug values.
- sp_ = MipsContext::kBadGprBase + SP;
- t9_ = MipsContext::kBadGprBase + T9;
- arg0_ = 0;
-}
-
-void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
- int spill_pos = 0;
-
- // Core registers come first, from the highest down to the lowest.
- for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- // If the $ZERO register shows up in the list of registers to
- // be saved this was only done to properly align the floating
- // point register save locations to addresses which are
- // multiples of 8. We only store the address of a register in
- // gprs_ if the register is not the $ZERO register. The $ZERO
- // register is read-only so there's never a reason to save it
- // on the stack.
- if (core_reg != 0u) {
- gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
- }
- ++spill_pos;
- }
- DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
-
- // FP registers come second, from the highest down to the lowest.
- for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
- ++spill_pos;
- }
- DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
-}
-
-void MipsContext::SetGPR(uint32_t reg, uintptr_t value) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
- DCHECK(IsAccessibleGPR(reg));
- CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
- *gprs_[reg] = value;
-}
-
-void MipsContext::SetFPR(uint32_t reg, uintptr_t value) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
- DCHECK(IsAccessibleFPR(reg));
- CHECK_NE(fprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
- *fprs_[reg] = value;
-}
-
-void MipsContext::SmashCallerSaves() {
- // This needs to be 0 because we want a null/zero return value.
- gprs_[V0] = const_cast<uint32_t*>(&gZero);
- gprs_[V1] = const_cast<uint32_t*>(&gZero);
- gprs_[A1] = nullptr;
- gprs_[A2] = nullptr;
- gprs_[A3] = nullptr;
- gprs_[T0] = nullptr;
- gprs_[T1] = nullptr;
-
- fprs_[F8] = nullptr;
- fprs_[F9] = nullptr;
- fprs_[F10] = nullptr;
- fprs_[F11] = nullptr;
- fprs_[F12] = nullptr;
- fprs_[F13] = nullptr;
- fprs_[F14] = nullptr;
- fprs_[F15] = nullptr;
- fprs_[F16] = nullptr;
- fprs_[F17] = nullptr;
- fprs_[F18] = nullptr;
- fprs_[F19] = nullptr;
-}
-
-extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
-
-void MipsContext::DoLongJump() {
- uintptr_t gprs[kNumberOfCoreRegisters];
- // Align fprs[] so that art_quick_do_long_jump() can load FPU
- // registers from it using the ldc1 instruction.
- uint32_t fprs[kNumberOfFRegisters] __attribute__((aligned(8)));
- for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
- gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : MipsContext::kBadGprBase + i;
- }
- for (size_t i = 0; i < kNumberOfFRegisters; ++i) {
- fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : MipsContext::kBadFprBase + i;
- }
- art_quick_do_long_jump(gprs, fprs);
-}
-
-} // namespace mips
-} // namespace art
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
deleted file mode 100644
index 960aea1..0000000
--- a/runtime/arch/mips/context_mips.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
-
-#include <android-base/logging.h>
-
-#include "arch/context.h"
-#include "base/macros.h"
-#include "registers_mips.h"
-
-namespace art {
-namespace mips {
-
-class MipsContext : public Context {
- public:
- MipsContext() {
- Reset();
- }
- virtual ~MipsContext() {}
-
- void Reset() override;
-
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
-
- void SetSP(uintptr_t new_sp) override {
- SetGPR(SP, new_sp);
- }
-
- void SetPC(uintptr_t new_pc) override {
- SetGPR(T9, new_pc);
- }
-
- bool IsAccessibleGPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
- return gprs_[reg] != nullptr;
- }
-
- uintptr_t* GetGPRAddress(uint32_t reg) override {
- DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
- return gprs_[reg];
- }
-
- uintptr_t GetGPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
- DCHECK(IsAccessibleGPR(reg));
- return *gprs_[reg];
- }
-
- void SetGPR(uint32_t reg, uintptr_t value) override;
-
- bool IsAccessibleFPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
- return fprs_[reg] != nullptr;
- }
-
- uintptr_t GetFPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
- DCHECK(IsAccessibleFPR(reg));
- return *fprs_[reg];
- }
-
- void SetFPR(uint32_t reg, uintptr_t value) override;
-
- void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
-
- void SetArg0(uintptr_t new_arg0_value) override {
- SetGPR(A0, new_arg0_value);
- }
-
- private:
- // Pointers to registers in the stack, initialized to null except for the special cases below.
- uintptr_t* gprs_[kNumberOfCoreRegisters];
- uint32_t* fprs_[kNumberOfFRegisters];
- // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
- // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
- // also need the first argument for single-frame deopt.
- uintptr_t sp_, t9_, arg0_;
-};
-} // namespace mips
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
deleted file mode 100644
index 3a6625f..0000000
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
-
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-
-namespace art {
-
-/* Returns true if entrypoint contains direct reference to
- native implementation. The list is required as direct
- entrypoints need additional handling during invocation.*/
-static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
- return
- entrypoint == kQuickInstanceofNonTrivial ||
- entrypoint == kQuickA64Load ||
- entrypoint == kQuickA64Store ||
- entrypoint == kQuickFmod ||
- entrypoint == kQuickFmodf ||
- entrypoint == kQuickMemcpy ||
- entrypoint == kQuickL2d ||
- entrypoint == kQuickL2f ||
- entrypoint == kQuickD2iz ||
- entrypoint == kQuickF2iz ||
- entrypoint == kQuickD2l ||
- entrypoint == kQuickF2l ||
- entrypoint == kQuickLdiv ||
- entrypoint == kQuickLmod ||
- entrypoint == kQuickLmul ||
- entrypoint == kQuickCmpgDouble ||
- entrypoint == kQuickCmpgFloat ||
- entrypoint == kQuickCmplDouble ||
- entrypoint == kQuickCmplFloat ||
- entrypoint == kQuickReadBarrierJni ||
- entrypoint == kQuickReadBarrierSlow ||
- entrypoint == kQuickReadBarrierForRootSlow ||
- entrypoint == kQuickCos ||
- entrypoint == kQuickSin ||
- entrypoint == kQuickAcos ||
- entrypoint == kQuickAsin ||
- entrypoint == kQuickAtan ||
- entrypoint == kQuickAtan2 ||
- entrypoint == kQuickPow ||
- entrypoint == kQuickCbrt ||
- entrypoint == kQuickCosh ||
- entrypoint == kQuickExp ||
- entrypoint == kQuickExpm1 ||
- entrypoint == kQuickHypot ||
- entrypoint == kQuickLog ||
- entrypoint == kQuickLog10 ||
- entrypoint == kQuickNextAfter ||
- entrypoint == kQuickSinh ||
- entrypoint == kQuickTan ||
- entrypoint == kQuickTanh;
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
deleted file mode 100644
index cbf5681..0000000
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#include "arch/mips/asm_support_mips.h"
-#include "base/atomic.h"
-#include "base/logging.h"
-#include "base/quasi_atomic.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/math_entrypoints.h"
-#include "entrypoints/quick/quick_alloc_entrypoints.h"
-#include "entrypoints/quick/quick_default_externs.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "entrypoints_direct_mips.h"
-#include "interpreter/interpreter.h"
-
-namespace art {
-
-// Cast entrypoints.
-extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
-
-// Read barrier entrypoints.
-// art_quick_read_barrier_mark_regXX uses a non-standard calling
-// convention: it expects its input in register XX+1 and returns its
-// result in that same register, and saves and restores all
-// caller-save registers.
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg14(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
-
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
-extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-extern "C" int64_t artLmul(int64_t a, int64_t b);
-extern "C" int64_t artLdiv(int64_t a, int64_t b);
-extern "C" int64_t artLmod(int64_t a, int64_t b);
-
-// Math conversions.
-extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT
-extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT
-extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT
-extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE
-extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG
-extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __divdi3(int64_t, int64_t);
-extern "C" int64_t __moddi3(int64_t, int64_t);
-
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
- intptr_t introspection_field_array_entries_size =
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
- static_assert(
- BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
- BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
- "Expecting equal");
- DCHECK_EQ(introspection_field_array_entries_size,
- BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
- intptr_t introspection_gc_root_entries_size =
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
- DCHECK_EQ(introspection_gc_root_entries_size,
- BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
- qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg03 = is_active ? art_quick_read_barrier_mark_reg03 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg04 = is_active ? art_quick_read_barrier_mark_reg04 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg05 = is_active ? art_quick_read_barrier_mark_reg05 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg06 = is_active ? art_quick_read_barrier_mark_reg06 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg07 = is_active ? art_quick_read_barrier_mark_reg07 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg08 = is_active ? art_quick_read_barrier_mark_reg08 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg09 = is_active ? art_quick_read_barrier_mark_reg09 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg10 = is_active ? art_quick_read_barrier_mark_reg10 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg11 = is_active ? art_quick_read_barrier_mark_reg11 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg12 = is_active ? art_quick_read_barrier_mark_reg12 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg13 = is_active ? art_quick_read_barrier_mark_reg13 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg14 = is_active ? art_quick_read_barrier_mark_reg14 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg22 = is_active ? art_quick_read_barrier_mark_reg22 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg29 = is_active ? art_quick_read_barrier_mark_reg29 : nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
- "Non-direct C stub marked direct.");
-}
-
-void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- // Note: MIPS has asserts checking for the type of entrypoint. Don't move it
- // to InitDefaultEntryPoints().
-
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
-
- // Cast
- qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
- static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
- qpoints->pCheckInstanceOf = art_quick_check_instance_of;
- static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
-
- // Resolution and initialization
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
- "Non-direct C stub marked direct.");
- qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
- static_assert(!IsDirectEntrypoint(kQuickResolveTypeAndVerifyAccess),
- "Non-direct C stub marked direct.");
- qpoints->pResolveType = art_quick_resolve_type;
- static_assert(!IsDirectEntrypoint(kQuickResolveType), "Non-direct C stub marked direct.");
- qpoints->pResolveString = art_quick_resolve_string;
- static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
- qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
- static_assert(!IsDirectEntrypoint(kQuickResolveMethodHandle), "Non-direct C stub marked direct.");
- qpoints->pResolveMethodType = art_quick_resolve_method_type;
- static_assert(!IsDirectEntrypoint(kQuickResolveMethodType), "Non-direct C stub marked direct.");
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- static_assert(!IsDirectEntrypoint(kQuickSet8Instance), "Non-direct C stub marked direct.");
- qpoints->pSet8Static = art_quick_set8_static;
- static_assert(!IsDirectEntrypoint(kQuickSet8Static), "Non-direct C stub marked direct.");
- qpoints->pSet16Instance = art_quick_set16_instance;
- static_assert(!IsDirectEntrypoint(kQuickSet16Instance), "Non-direct C stub marked direct.");
- qpoints->pSet16Static = art_quick_set16_static;
- static_assert(!IsDirectEntrypoint(kQuickSet16Static), "Non-direct C stub marked direct.");
- qpoints->pSet32Instance = art_quick_set32_instance;
- static_assert(!IsDirectEntrypoint(kQuickSet32Instance), "Non-direct C stub marked direct.");
- qpoints->pSet32Static = art_quick_set32_static;
- static_assert(!IsDirectEntrypoint(kQuickSet32Static), "Non-direct C stub marked direct.");
- qpoints->pSet64Instance = art_quick_set64_instance;
- static_assert(!IsDirectEntrypoint(kQuickSet64Instance), "Non-direct C stub marked direct.");
- qpoints->pSet64Static = art_quick_set64_static;
- static_assert(!IsDirectEntrypoint(kQuickSet64Static), "Non-direct C stub marked direct.");
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- static_assert(!IsDirectEntrypoint(kQuickSetObjInstance), "Non-direct C stub marked direct.");
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- static_assert(!IsDirectEntrypoint(kQuickSetObjStatic), "Non-direct C stub marked direct.");
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- static_assert(!IsDirectEntrypoint(kQuickGetBooleanInstance), "Non-direct C stub marked direct.");
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- static_assert(!IsDirectEntrypoint(kQuickGetByteInstance), "Non-direct C stub marked direct.");
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- static_assert(!IsDirectEntrypoint(kQuickGetCharInstance), "Non-direct C stub marked direct.");
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- static_assert(!IsDirectEntrypoint(kQuickGetShortInstance), "Non-direct C stub marked direct.");
- qpoints->pGet32Instance = art_quick_get32_instance;
- static_assert(!IsDirectEntrypoint(kQuickGet32Instance), "Non-direct C stub marked direct.");
- qpoints->pGet64Instance = art_quick_get64_instance;
- static_assert(!IsDirectEntrypoint(kQuickGet64Instance), "Non-direct C stub marked direct.");
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- static_assert(!IsDirectEntrypoint(kQuickGetObjInstance), "Non-direct C stub marked direct.");
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- static_assert(!IsDirectEntrypoint(kQuickGetBooleanStatic), "Non-direct C stub marked direct.");
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- static_assert(!IsDirectEntrypoint(kQuickGetByteStatic), "Non-direct C stub marked direct.");
- qpoints->pGetCharStatic = art_quick_get_char_static;
- static_assert(!IsDirectEntrypoint(kQuickGetCharStatic), "Non-direct C stub marked direct.");
- qpoints->pGetShortStatic = art_quick_get_short_static;
- static_assert(!IsDirectEntrypoint(kQuickGetShortStatic), "Non-direct C stub marked direct.");
- qpoints->pGet32Static = art_quick_get32_static;
- static_assert(!IsDirectEntrypoint(kQuickGet32Static), "Non-direct C stub marked direct.");
- qpoints->pGet64Static = art_quick_get64_static;
- static_assert(!IsDirectEntrypoint(kQuickGet64Static), "Non-direct C stub marked direct.");
- qpoints->pGetObjStatic = art_quick_get_obj_static;
- static_assert(!IsDirectEntrypoint(kQuickGetObjStatic), "Non-direct C stub marked direct.");
-
- // Array
- qpoints->pAputObject = art_quick_aput_obj;
- static_assert(!IsDirectEntrypoint(kQuickAputObject), "Non-direct C stub marked direct.");
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodStart), "Non-direct C stub marked direct.");
- qpoints->pJniMethodFastStart = JniMethodFastStart;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodFastStart), "Non-direct C stub marked direct.");
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodStartSynchronized),
- "Non-direct C stub marked direct.");
- qpoints->pJniMethodEnd = JniMethodEnd;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodEnd), "Non-direct C stub marked direct.");
- qpoints->pJniMethodFastEnd = JniMethodFastEnd;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEnd), "Non-direct C stub marked direct.");
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodEndSynchronized),
- "Non-direct C stub marked direct.");
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReference),
- "Non-direct C stub marked direct.");
- qpoints->pJniMethodFastEndWithReference = JniMethodFastEndWithReference;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEndWithReference),
- "Non-direct C stub marked direct.");
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReferenceSynchronized),
- "Non-direct C stub marked direct.");
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
- static_assert(!IsDirectEntrypoint(kQuickQuickGenericJniTrampoline),
- "Non-direct C stub marked direct.");
-
- // Locks
- if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
- qpoints->pLockObject = art_quick_lock_object_no_inline;
- qpoints->pUnlockObject = art_quick_unlock_object_no_inline;
- } else {
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
- }
- static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct.");
- static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct.");
-
- // Math
- qpoints->pCmpgDouble = CmpgDouble;
- static_assert(IsDirectEntrypoint(kQuickCmpgDouble), "Direct C stub not marked direct.");
- qpoints->pCmpgFloat = CmpgFloat;
- static_assert(IsDirectEntrypoint(kQuickCmpgFloat), "Direct C stub not marked direct.");
- qpoints->pCmplDouble = CmplDouble;
- static_assert(IsDirectEntrypoint(kQuickCmplDouble), "Direct C stub not marked direct.");
- qpoints->pCmplFloat = CmplFloat;
- static_assert(IsDirectEntrypoint(kQuickCmplFloat), "Direct C stub not marked direct.");
- qpoints->pFmod = fmod;
- static_assert(IsDirectEntrypoint(kQuickFmod), "Direct C stub not marked direct.");
- qpoints->pL2d = art_l2d;
- static_assert(IsDirectEntrypoint(kQuickL2d), "Direct C stub not marked direct.");
- qpoints->pFmodf = fmodf;
- static_assert(IsDirectEntrypoint(kQuickFmodf), "Direct C stub not marked direct.");
- qpoints->pL2f = art_l2f;
- static_assert(IsDirectEntrypoint(kQuickL2f), "Direct C stub not marked direct.");
- qpoints->pD2iz = art_d2i;
- static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
- qpoints->pF2iz = art_f2i;
- static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
- qpoints->pIdivmod = nullptr;
- qpoints->pD2l = art_d2l;
- static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
- qpoints->pF2l = art_f2l;
- static_assert(IsDirectEntrypoint(kQuickF2l), "Direct C stub not marked direct.");
- qpoints->pLdiv = artLdiv;
- static_assert(IsDirectEntrypoint(kQuickLdiv), "Direct C stub not marked direct.");
- qpoints->pLmod = artLmod;
- static_assert(IsDirectEntrypoint(kQuickLmod), "Direct C stub not marked direct.");
- qpoints->pLmul = artLmul;
- static_assert(IsDirectEntrypoint(kQuickLmul), "Direct C stub not marked direct.");
- qpoints->pShlLong = art_quick_shl_long;
- static_assert(!IsDirectEntrypoint(kQuickShlLong), "Non-direct C stub marked direct.");
- qpoints->pShrLong = art_quick_shr_long;
- static_assert(!IsDirectEntrypoint(kQuickShrLong), "Non-direct C stub marked direct.");
- qpoints->pUshrLong = art_quick_ushr_long;
- static_assert(!IsDirectEntrypoint(kQuickUshrLong), "Non-direct C stub marked direct.");
-
- // More math.
- qpoints->pCos = cos;
- static_assert(IsDirectEntrypoint(kQuickCos), "Direct C stub marked non-direct.");
- qpoints->pSin = sin;
- static_assert(IsDirectEntrypoint(kQuickSin), "Direct C stub marked non-direct.");
- qpoints->pAcos = acos;
- static_assert(IsDirectEntrypoint(kQuickAcos), "Direct C stub marked non-direct.");
- qpoints->pAsin = asin;
- static_assert(IsDirectEntrypoint(kQuickAsin), "Direct C stub marked non-direct.");
- qpoints->pAtan = atan;
- static_assert(IsDirectEntrypoint(kQuickAtan), "Direct C stub marked non-direct.");
- qpoints->pAtan2 = atan2;
- static_assert(IsDirectEntrypoint(kQuickAtan2), "Direct C stub marked non-direct.");
- qpoints->pPow = pow;
- static_assert(IsDirectEntrypoint(kQuickPow), "Direct C stub marked non-direct.");
- qpoints->pCbrt = cbrt;
- static_assert(IsDirectEntrypoint(kQuickCbrt), "Direct C stub marked non-direct.");
- qpoints->pCosh = cosh;
- static_assert(IsDirectEntrypoint(kQuickCosh), "Direct C stub marked non-direct.");
- qpoints->pExp = exp;
- static_assert(IsDirectEntrypoint(kQuickExp), "Direct C stub marked non-direct.");
- qpoints->pExpm1 = expm1;
- static_assert(IsDirectEntrypoint(kQuickExpm1), "Direct C stub marked non-direct.");
- qpoints->pHypot = hypot;
- static_assert(IsDirectEntrypoint(kQuickHypot), "Direct C stub marked non-direct.");
- qpoints->pLog = log;
- static_assert(IsDirectEntrypoint(kQuickLog), "Direct C stub marked non-direct.");
- qpoints->pLog10 = log10;
- static_assert(IsDirectEntrypoint(kQuickLog10), "Direct C stub marked non-direct.");
- qpoints->pNextAfter = nextafter;
- static_assert(IsDirectEntrypoint(kQuickNextAfter), "Direct C stub marked non-direct.");
- qpoints->pSinh = sinh;
- static_assert(IsDirectEntrypoint(kQuickSinh), "Direct C stub marked non-direct.");
- qpoints->pTan = tan;
- static_assert(IsDirectEntrypoint(kQuickTan), "Direct C stub marked non-direct.");
- qpoints->pTanh = tanh;
- static_assert(IsDirectEntrypoint(kQuickTanh), "Direct C stub marked non-direct.");
-
- // Intrinsics
- qpoints->pIndexOf = art_quick_indexof;
- static_assert(!IsDirectEntrypoint(kQuickIndexOf), "Non-direct C stub marked direct.");
- qpoints->pStringCompareTo = art_quick_string_compareto;
- static_assert(!IsDirectEntrypoint(kQuickStringCompareTo), "Non-direct C stub marked direct.");
- qpoints->pMemcpy = memcpy;
-
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
- "Non-direct C stub marked direct.");
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
- "Non-direct C stub marked direct.");
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
- "Non-direct C stub marked direct.");
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
- "Non-direct C stub marked direct.");
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
- static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
- "Non-direct C stub marked direct.");
- qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
- static_assert(!IsDirectEntrypoint(kQuickInvokePolymorphic), "Non-direct C stub marked direct.");
- qpoints->pInvokeCustom = art_quick_invoke_custom;
- static_assert(!IsDirectEntrypoint(kQuickInvokeCustom), "Non-direct C stub marked direct.");
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
- static_assert(!IsDirectEntrypoint(kQuickTestSuspend), "Non-direct C stub marked direct.");
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- static_assert(!IsDirectEntrypoint(kQuickDeliverException), "Non-direct C stub marked direct.");
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- static_assert(!IsDirectEntrypoint(kQuickThrowArrayBounds), "Non-direct C stub marked direct.");
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- static_assert(!IsDirectEntrypoint(kQuickThrowDivZero), "Non-direct C stub marked direct.");
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct.");
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
- static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
- qpoints->pThrowStringBounds = art_quick_throw_string_bounds;
- static_assert(!IsDirectEntrypoint(kQuickThrowStringBounds), "Non-direct C stub marked direct.");
-
- // Deoptimization from compiled code.
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
- static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct.");
-
- // Atomic 64-bit load/store
- qpoints->pA64Load = QuasiAtomic::Read64;
- static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
- qpoints->pA64Store = QuasiAtomic::Write64;
- static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
-
- // Read barrier.
- qpoints->pReadBarrierJni = ReadBarrierJni;
- static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
- UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
- // Cannot use the following registers to pass arguments:
- // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
- // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
- qpoints->pReadBarrierMarkReg15 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg15),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg16 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg16),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg23 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg23),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg24 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg24),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg25 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg25),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg26 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg26),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg27 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg27),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg28 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg28),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierSlow = artReadBarrierSlow;
- static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
- qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
- static_assert(IsDirectEntrypoint(kQuickReadBarrierForRootSlow),
- "Direct C stub not marked direct.");
-}
-
-} // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
deleted file mode 100644
index f55df92..0000000
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sys/ucontext.h>
-#include "fault_handler.h"
-
-#include "arch/instruction_set.h"
-#include "arch/mips/callee_save_frame_mips.h"
-#include "art_method.h"
-#include "base/callee_save_type.h"
-#include "base/hex_dump.h"
-#include "base/logging.h" // For VLOG.
-#include "base/macros.h"
-#include "registers_mips.h"
-#include "runtime_globals.h"
-#include "thread-current-inl.h"
-
-extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception_from_signal();
-
-//
-// Mips specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
- void* context,
- ArtMethod** out_method,
- uintptr_t* out_return_pc,
- uintptr_t* out_sp,
- bool* out_is_stack_overflow) {
- struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips::SP]);
- VLOG(signals) << "sp: " << *out_sp;
- if (*out_sp == 0) {
- return;
- }
-
- // In the case of a stack overflow, the stack is not valid and we can't
- // get the method from the top of the stack. However it's in r0.
- uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
- uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips));
- if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
- *is_stack_overflow = true;
- } else {
- // The method is at the top of the stack.
- *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
- *is_stack_overflow = false;
- }
-
- // Work out the return PC. This will be the address of the instruction
- // following the faulting ldr/str instruction.
-
- VLOG(signals) << "pc: " << std::hex
- << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
-
- *out_return_pc = sc->sc_pc + 4;
-}
-
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
- if (!IsValidImplicitCheck(info)) {
- return false;
- }
- // The code that looks for the catch location needs to know the value of the
- // PC at the point of call. For Null checks we insert a GC map that is immediately after
- // the load/store instruction that might cause the fault.
-
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-
- // Decrement $sp by the frame size of the kSaveEverything method and store
- // the fault address in the padding right after the ArtMethod*.
- sc->sc_regs[mips::SP] -= mips::MipsCalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
- uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips::SP]) + /* ArtMethod* */ 1;
- *padding = reinterpret_cast<uintptr_t>(info->si_addr);
-
- sc->sc_regs[mips::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
- VLOG(signals) << "Generating null pointer exception";
- return true;
-}
-
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
-}
-
-// Stack overflow fault handler.
-//
-// This checks that the fault address is equal to the current stack pointer
-// minus the overflow region size (16K typically). The instruction that
-// generates this signal is:
-//
-// lw zero, -16384(sp)
-//
-// It will fault if sp is inside the protected region on the stack.
-//
-// If we determine this is a stack overflow we need to move the stack pointer
-// to the overflow region below the protected region.
-
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
- struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
- VLOG(signals) << "sigcontext: " << std::hex << sc;
-
- uintptr_t sp = sc->sc_regs[mips::SP];
- VLOG(signals) << "sp: " << std::hex << sp;
-
- uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
- VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
- VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
- ", fault_addr: " << fault_addr;
-
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips);
-
- // Check that the fault address is the value expected for a stack overflow.
- if (fault_addr != overflow_addr) {
- VLOG(signals) << "Not a stack overflow";
- return false;
- }
-
- VLOG(signals) << "Stack overflow found";
-
- // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
- // The value of RA must be the same as it was when we entered the code that
- // caused this fault. This will be inserted into a callee save frame by
- // the function to which this handler returns (art_quick_throw_stack_overflow).
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
- sc->sc_regs[mips::T9] = sc->sc_pc; // make sure T9 points to the function
-
- // The kernel will now return to the address in sc->arm_pc.
- return true;
-}
-} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
deleted file mode 100644
index 99ce536..0000000
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips.h"
-
-#include <fstream>
-#include <sstream>
-
-#include <android-base/stringprintf.h>
-#include <android-base/strings.h>
-
-#include "base/stl_util.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-// An enum for the Mips revision.
-enum class MipsLevel {
- kBase,
- kR2,
- kR5,
- kR6
-};
-
-#if defined(_MIPS_ARCH_MIPS32R6)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR6;
-#elif defined(_MIPS_ARCH_MIPS32R5)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR5;
-#elif defined(_MIPS_ARCH_MIPS32R2)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR2;
-#else
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kBase;
-#endif
-
-static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bit, bool* msa) {
- // Override defaults based on compiler flags.
- if (kRuntimeMipsLevel >= MipsLevel::kR2) {
- *mips_isa_gte2 = true;
- } else {
- *mips_isa_gte2 = false;
- }
-
- if (kRuntimeMipsLevel >= MipsLevel::kR5) {
- *fpu_32bit = false;
- *msa = true;
- } else {
- *fpu_32bit = true;
- *msa = false;
- }
-
- if (kRuntimeMipsLevel >= MipsLevel::kR6) {
- *r6 = true;
- } else {
- *r6 = false;
- }
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
- const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
-
- // Override defaults based on compiler flags.
- // This is needed when running ART test where the variant is not defined.
- bool fpu_32bit;
- bool mips_isa_gte2;
- bool r6;
- bool msa;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
- // Override defaults based on variant string.
- // Only care if it is R1, R2, R5 or R6 and we assume all CPUs will have a FP unit.
- constexpr const char* kMips32Prefix = "mips32r";
- const size_t kPrefixLength = strlen(kMips32Prefix);
- if (variant.compare(0, kPrefixLength, kMips32Prefix, kPrefixLength) == 0 &&
- variant.size() > kPrefixLength) {
- r6 = (variant[kPrefixLength] >= '6');
- fpu_32bit = (variant[kPrefixLength] < '5');
- mips_isa_gte2 = (variant[kPrefixLength] >= '2');
- msa = (variant[kPrefixLength] >= '5');
- } else if (variant == "default") {
- // Default variant has FPU, is gte2. This is the traditional setting.
- //
- // Note, we get FPU bitness and R6-ness from the build (using cpp defines, see above)
- // and don't override them because many things depend on the "default" variant being
- // sufficient for most purposes. That is, "default" should work for both R2 and R6.
- // Use "mips32r#" to get a specific configuration, possibly not matching the runtime
- // ISA (e.g. for ISA-specific testing of dex2oat internals).
- mips_isa_gte2 = true;
- } else {
- LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
- }
-
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
- bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
- bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
- bool r6 = (bitmap & kR6) != 0;
- bool msa = (bitmap & kMsaBitfield) != 0;
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
- bool fpu_32bit;
- bool mips_isa_gte2;
- bool r6;
- bool msa;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
- bool fpu_32bit;
- bool mips_isa_gte2;
- bool r6;
- bool msa;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
- msa = false;
-
- std::ifstream in("/proc/cpuinfo");
- if (!in.fail()) {
- while (!in.eof()) {
- std::string line;
- std::getline(in, line);
- if (!in.eof()) {
- LOG(INFO) << "cpuinfo line: " << line;
- if (line.find("ASEs") != std::string::npos) {
- LOG(INFO) << "found Application Specific Extensions";
- if (line.find("msa") != std::string::npos) {
- msa = true;
- }
- }
- }
- }
- in.close();
- } else {
- LOG(ERROR) << "Failed to open /proc/cpuinfo";
- }
-
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
- UNIMPLEMENTED(WARNING);
- return FromCppDefines();
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
- UNIMPLEMENTED(WARNING);
- return FromCppDefines();
-}
-
-bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (InstructionSet::kMips != other->GetInstructionSet()) {
- return false;
- }
- const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
- return (fpu_32bit_ == other_as_mips->fpu_32bit_) &&
- (mips_isa_gte2_ == other_as_mips->mips_isa_gte2_) &&
- (r6_ == other_as_mips->r6_) &&
- (msa_ == other_as_mips->msa_);
-}
-
-uint32_t MipsInstructionSetFeatures::AsBitmap() const {
- return (fpu_32bit_ ? kFpu32Bitfield : 0) |
- (mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0) |
- (r6_ ? kR6 : 0) |
- (msa_ ? kMsaBitfield : 0);
-}
-
-std::string MipsInstructionSetFeatures::GetFeatureString() const {
- std::string result;
- if (fpu_32bit_) {
- result += "fpu32";
- } else {
- result += "-fpu32";
- }
- if (mips_isa_gte2_) {
- result += ",mips2";
- } else {
- result += ",-mips2";
- }
- if (r6_) {
- result += ",r6";
- } // Suppress non-r6.
- if (msa_) {
- result += ",msa";
- } else {
- result += ",-msa";
- }
- return result;
-}
-
-std::unique_ptr<const InstructionSetFeatures>
-MipsInstructionSetFeatures::AddFeaturesFromSplitString(
- const std::vector<std::string>& features, std::string* error_msg) const {
- bool fpu_32bit = fpu_32bit_;
- bool mips_isa_gte2 = mips_isa_gte2_;
- bool r6 = r6_;
- bool msa = msa_;
- for (const std::string& feature : features) {
- DCHECK_EQ(android::base::Trim(feature), feature)
- << "Feature name is not trimmed: '" << feature << "'";
- if (feature == "fpu32") {
- fpu_32bit = true;
- } else if (feature == "-fpu32") {
- fpu_32bit = false;
- } else if (feature == "mips2") {
- mips_isa_gte2 = true;
- } else if (feature == "-mips2") {
- mips_isa_gte2 = false;
- } else if (feature == "r6") {
- r6 = true;
- } else if (feature == "-r6") {
- r6 = false;
- } else if (feature == "msa") {
- msa = true;
- } else if (feature == "-msa") {
- msa = false;
- } else {
- *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
- return nullptr;
- }
- }
- return std::unique_ptr<const InstructionSetFeatures>(
- new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
deleted file mode 100644
index ab5bb3c..0000000
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set_features.h"
-#include "base/macros.h"
-
-namespace art {
-
-class MipsInstructionSetFeatures;
-using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
-
-// Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures final : public InstructionSetFeatures {
- public:
- // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
-
- // Parse a bitmap and create an InstructionSetFeatures.
- static MipsFeaturesUniquePtr FromBitmap(uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features.
- static MipsFeaturesUniquePtr FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static MipsFeaturesUniquePtr FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static MipsFeaturesUniquePtr FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static MipsFeaturesUniquePtr FromAssembly();
-
- bool Equals(const InstructionSetFeatures* other) const override;
-
- InstructionSet GetInstructionSet() const override {
- return InstructionSet::kMips;
- }
-
- uint32_t AsBitmap() const override;
-
- std::string GetFeatureString() const override;
-
- // Is this an ISA revision greater than 2 opening up new opcodes.
- bool IsMipsIsaRevGreaterThanEqual2() const {
- return mips_isa_gte2_;
- }
-
- // Floating point double registers are encoded differently based on whether the Status.FR bit is
- // set. When the FR bit is 0 then the FPU is 32-bit, 1 its 64-bit. Return true if the code should
- // be generated assuming Status.FR is 0.
- bool Is32BitFloatingPoint() const {
- return fpu_32bit_;
- }
-
- bool IsR6() const {
- return r6_;
- }
-
- // Does it have MSA (MIPS SIMD Architecture) support.
- bool HasMsa() const {
- return msa_;
- }
-
- virtual ~MipsInstructionSetFeatures() {}
-
- protected:
- // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
- std::unique_ptr<const InstructionSetFeatures>
- AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const override;
-
- private:
- MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
- : InstructionSetFeatures(),
- fpu_32bit_(fpu_32bit),
- mips_isa_gte2_(mips_isa_gte2),
- r6_(r6),
- msa_(msa) {
- // Sanity checks.
- if (r6) {
- CHECK(mips_isa_gte2);
- CHECK(!fpu_32bit);
- }
- if (!mips_isa_gte2) {
- CHECK(fpu_32bit);
- }
- }
-
- // Bitmap positions for encoding features as a bitmap.
- enum {
- kFpu32Bitfield = 1 << 0,
- kIsaRevGte2Bitfield = 1 << 1,
- kR6 = 1 << 2,
- kMsaBitfield = 1 << 3,
- };
-
- const bool fpu_32bit_;
- const bool mips_isa_gte2_;
- const bool r6_;
- const bool msa_;
-
- DISALLOW_COPY_AND_ASSIGN(MipsInstructionSetFeatures);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
deleted file mode 100644
index b7de952..0000000
--- a/runtime/arch/mips/instruction_set_features_mips_test.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
- ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips_features->GetInstructionSet(), InstructionSet::kMips);
- EXPECT_TRUE(mips_features->Equals(mips_features.get()));
- EXPECT_STREQ("fpu32,mips2,-msa", mips_features->GetFeatureString().c_str());
- EXPECT_EQ(mips_features->AsBitmap(), 3U);
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
- ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r1_features->GetInstructionSet(), InstructionSet::kMips);
- EXPECT_TRUE(mips32r1_features->Equals(mips32r1_features.get()));
- EXPECT_STREQ("fpu32,-mips2,-msa", mips32r1_features->GetFeatureString().c_str());
- EXPECT_EQ(mips32r1_features->AsBitmap(), 1U);
-
- std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
- ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r1_features->Equals(mips_default_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
- ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r2_features->GetInstructionSet(), InstructionSet::kMips);
- EXPECT_TRUE(mips32r2_features->Equals(mips32r2_features.get()));
- EXPECT_STREQ("fpu32,mips2,-msa", mips32r2_features->GetFeatureString().c_str());
- EXPECT_EQ(mips32r2_features->AsBitmap(), 3U);
-
- std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
- ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(mips32r2_features->Equals(mips_default_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
- ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r2_features->Equals(mips32r1_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
- ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r5_features->GetInstructionSet(), InstructionSet::kMips);
- EXPECT_TRUE(mips32r5_features->Equals(mips32r5_features.get()));
- EXPECT_STREQ("-fpu32,mips2,msa", mips32r5_features->GetFeatureString().c_str());
- EXPECT_EQ(mips32r5_features->AsBitmap(), 10U);
-
- std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
- ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r5_features->Equals(mips_default_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
- ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r5_features->Equals(mips32r1_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
- ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r5_features->Equals(mips32r2_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR6Variant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips32r6_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r6", &error_msg));
- ASSERT_TRUE(mips32r6_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r6_features->GetInstructionSet(), InstructionSet::kMips);
- EXPECT_TRUE(mips32r6_features->Equals(mips32r6_features.get()));
- EXPECT_STREQ("-fpu32,mips2,r6,msa", mips32r6_features->GetFeatureString().c_str());
- EXPECT_EQ(mips32r6_features->AsBitmap(), 14U);
-
- std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
- ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r6_features->Equals(mips_default_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
- ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r6_features->Equals(mips32r1_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
- ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r6_features->Equals(mips32r2_features.get()));
-
- std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
- ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
- EXPECT_FALSE(mips32r6_features->Equals(mips32r5_features.get()));
-}
-
-} // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
deleted file mode 100644
index 2c0e750..0000000
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips.S"
-
- .set noreorder
- .balign 4
-
- /*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- addiu $sp, $sp, -48 # leave room for $f12, $f13, $f14, $f15, $a0, $a1, $a2, $a3, and $ra
- .cfi_adjust_cfa_offset 48
- sw $ra, 32($sp)
- .cfi_rel_offset 31, 32
- CHECK_ALIGNMENT $sp, $t0
- sdc1 $f14, 24($sp)
- sdc1 $f12, 16($sp)
- sw $a3, 12($sp)
- .cfi_rel_offset 7, 12
- sw $a2, 8($sp)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- sw $a0, 0($sp)
- .cfi_rel_offset 4, 0
- la $t9, artFindNativeMethod
- jalr $t9 # (Thread*)
- move $a0, $s1 # pass Thread::Current()
- lw $a0, 0($sp) # restore registers from stack
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
- CHECK_ALIGNMENT $sp, $t0
- ldc1 $f12, 16($sp)
- ldc1 $f14, 24($sp)
- lw $ra, 32($sp)
- beq $v0, $zero, .Lno_native_code_found
- addiu $sp, $sp, 48 # restore the stack
- .cfi_adjust_cfa_offset -48
- move $t9, $v0 # put method code result in $t9
- jalr $zero, $t9 # leaf call to method's code
- nop
-.Lno_native_code_found:
- jalr $zero, $ra
- nop
-END art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/mips/memcmp16_mips.S b/runtime/arch/mips/memcmp16_mips.S
deleted file mode 100644
index c8eac9b..0000000
--- a/runtime/arch/mips/memcmp16_mips.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
-#define ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
-
-#include "asm_support_mips.S"
-
-// u4 __memcmp16(const u2*, const u2*, size_t);
-ENTRY_NO_GP __memcmp16
- li $t0,0
- li $t1,0
- beqz $a2,done /* 0 length string */
- beq $a0,$a1,done /* strings are identical */
-
- /* Unoptimized... */
-1: lhu $t0,0($a0)
- lhu $t1,0($a1)
- addu $a1,2
- bne $t0,$t1,done
- addu $a0,2
- subu $a2,1
- bnez $a2,1b
-
-done:
- subu $v0,$t0,$t1
- j $ra
-END __memcmp16
-
-#endif // ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
deleted file mode 100644
index 5697185..0000000
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ /dev/null
@@ -1,3305 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips.S"
-
-#include "arch/quick_alloc_entrypoints.S"
-
- .set noreorder
- .balign 4
-
- /* Deliver the given exception */
- .extern artDeliverExceptionFromCode
- /* Deliver an exception pending on a thread */
- .extern artDeliverPendingExceptionFromCode
-
-#define ARG_SLOT_SIZE 32 // space for a0-a3 plus 4 more words
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
- * Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
- * Clobbers $t0 and $sp
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
- */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- addiu $sp, $sp, -112
- .cfi_adjust_cfa_offset 112
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 112)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
-#endif
-
- sw $ra, 108($sp)
- .cfi_rel_offset 31, 108
- sw $s8, 104($sp)
- .cfi_rel_offset 30, 104
- sw $gp, 100($sp)
- .cfi_rel_offset 28, 100
- sw $s7, 96($sp)
- .cfi_rel_offset 23, 96
- sw $s6, 92($sp)
- .cfi_rel_offset 22, 92
- sw $s5, 88($sp)
- .cfi_rel_offset 21, 88
- sw $s4, 84($sp)
- .cfi_rel_offset 20, 84
- sw $s3, 80($sp)
- .cfi_rel_offset 19, 80
- sw $s2, 76($sp)
- .cfi_rel_offset 18, 76
- sw $s1, 72($sp)
- .cfi_rel_offset 17, 72
- sw $s0, 68($sp)
- .cfi_rel_offset 16, 68
- // 4-byte placeholder for register $zero, serving for alignment
- // of the following double precision floating point registers.
-
- CHECK_ALIGNMENT $sp, $t1
- sdc1 $f30, 56($sp)
- sdc1 $f28, 48($sp)
- sdc1 $f26, 40($sp)
- sdc1 $f24, 32($sp)
- sdc1 $f22, 24($sp)
- sdc1 $f20, 16($sp)
-
- # 1 word for holding Method* plus 12 bytes padding to keep contents of SP
- # a multiple of 16.
-
- lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
- lw $t0, 0($t0)
- lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t0)
- sw $t0, 0($sp) # Place Method* at bottom of stack.
- sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
- addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes non-moving GC.
- * Does not include rSUSPEND or rSELF
- * callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
- * Clobbers $t0 and $sp
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_REFS_ONLY + ARG_SLOT_SIZE bytes on the stack
- */
-.macro SETUP_SAVE_REFS_ONLY_FRAME
- addiu $sp, $sp, -48
- .cfi_adjust_cfa_offset 48
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 48)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS) size not as expected."
-#endif
-
- sw $ra, 44($sp)
- .cfi_rel_offset 31, 44
- sw $s8, 40($sp)
- .cfi_rel_offset 30, 40
- sw $gp, 36($sp)
- .cfi_rel_offset 28, 36
- sw $s7, 32($sp)
- .cfi_rel_offset 23, 32
- sw $s6, 28($sp)
- .cfi_rel_offset 22, 28
- sw $s5, 24($sp)
- .cfi_rel_offset 21, 24
- sw $s4, 20($sp)
- .cfi_rel_offset 20, 20
- sw $s3, 16($sp)
- .cfi_rel_offset 19, 16
- sw $s2, 12($sp)
- .cfi_rel_offset 18, 12
- # 2 words for alignment and bottom word will hold Method*
-
- lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
- lw $t0, 0($t0)
- lw $t0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t0)
- sw $t0, 0($sp) # Place Method* at bottom of stack.
- sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
- addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME
- addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
- .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
- lw $ra, 44($sp)
- .cfi_restore 31
- lw $s8, 40($sp)
- .cfi_restore 30
- lw $gp, 36($sp)
- .cfi_restore 28
- lw $s7, 32($sp)
- .cfi_restore 23
- lw $s6, 28($sp)
- .cfi_restore 22
- lw $s5, 24($sp)
- .cfi_restore 21
- lw $s4, 20($sp)
- .cfi_restore 20
- lw $s3, 16($sp)
- .cfi_restore 19
- lw $s2, 12($sp)
- .cfi_restore 18
- addiu $sp, $sp, 48
- .cfi_adjust_cfa_offset -48
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
- RESTORE_SAVE_REFS_ONLY_FRAME
- jalr $zero, $ra
- nop
-.endm
-
- /*
- * Individually usable part of macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY.
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- sw $s8, 104($sp)
- .cfi_rel_offset 30, 104
- sw $s7, 96($sp)
- .cfi_rel_offset 23, 96
- sw $s6, 92($sp)
- .cfi_rel_offset 22, 92
- sw $s5, 88($sp)
- .cfi_rel_offset 21, 88
- sw $s4, 84($sp)
- .cfi_rel_offset 20, 84
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
- * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
- * (26 total + 1 word padding + method*)
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY save_s4_thru_s8=1
- addiu $sp, $sp, -112
- .cfi_adjust_cfa_offset 112
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 112)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS) size not as expected."
-#endif
-
- sw $ra, 108($sp)
- .cfi_rel_offset 31, 108
- sw $gp, 100($sp)
- .cfi_rel_offset 28, 100
- .if \save_s4_thru_s8
- SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- .endif
- sw $s3, 80($sp)
- .cfi_rel_offset 19, 80
- sw $s2, 76($sp)
- .cfi_rel_offset 18, 76
- sw $t1, 72($sp)
- .cfi_rel_offset 9, 72
- sw $t0, 68($sp)
- .cfi_rel_offset 8, 68
- sw $a3, 64($sp)
- .cfi_rel_offset 7, 64
- sw $a2, 60($sp)
- .cfi_rel_offset 6, 60
- sw $a1, 56($sp)
- .cfi_rel_offset 5, 56
- CHECK_ALIGNMENT $sp, $t8
- sdc1 $f18, 48($sp)
- sdc1 $f16, 40($sp)
- sdc1 $f14, 32($sp)
- sdc1 $f12, 24($sp)
- sdc1 $f10, 16($sp)
- sdc1 $f8, 8($sp)
- # bottom will hold Method*
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
- * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
- * (26 total + 1 word padding + method*)
- * Clobbers $t0 and $sp
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME save_s4_thru_s8_only=0
- .if \save_s4_thru_s8_only
- // It is expected that `SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY /* save_s4_thru_s8 */ 0`
- // has been done prior to `SETUP_SAVE_REFS_AND_ARGS_FRAME /* save_s4_thru_s8_only */ 1`.
- SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- .else
- SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
- .endif
- lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
- lw $t0, 0($t0)
- lw $t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t0)
- sw $t0, 0($sp) # Place Method* at bottom of stack.
- sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
- addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
- * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
- * (26 total + 1 word padding + method*)
- * Clobbers $sp
- * Use $a0 as the Method* and loads it into bottom of stack.
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
- sw $a0, 0($sp) # Place Method* at bottom of stack.
- sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
- addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
- /*
- * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP
- lw $gp, 100($sp)
- .cfi_restore 28
-.endm
-
- /*
- * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
- lw $a1, 56($sp)
- .cfi_restore 5
-.endm
-
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME restore_s4_thru_s8=1, remove_arg_slots=1
- .if \remove_arg_slots
- addiu $sp, $sp, ARG_SLOT_SIZE # Remove argument slots from the stack.
- .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
- .endif
- lw $ra, 108($sp)
- .cfi_restore 31
- .if \restore_s4_thru_s8
- lw $s8, 104($sp)
- .cfi_restore 30
- .endif
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP
- .if \restore_s4_thru_s8
- lw $s7, 96($sp)
- .cfi_restore 23
- lw $s6, 92($sp)
- .cfi_restore 22
- lw $s5, 88($sp)
- .cfi_restore 21
- lw $s4, 84($sp)
- .cfi_restore 20
- .endif
- lw $s3, 80($sp)
- .cfi_restore 19
- lw $s2, 76($sp)
- .cfi_restore 18
- lw $t1, 72($sp)
- .cfi_restore 9
- lw $t0, 68($sp)
- .cfi_restore 8
- lw $a3, 64($sp)
- .cfi_restore 7
- lw $a2, 60($sp)
- .cfi_restore 6
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
- CHECK_ALIGNMENT $sp, $t8
- ldc1 $f18, 48($sp)
- ldc1 $f16, 40($sp)
- ldc1 $f14, 32($sp)
- ldc1 $f12, 24($sp)
- ldc1 $f10, 16($sp)
- ldc1 $f8, 8($sp)
- addiu $sp, $sp, 112 # Pop frame.
- .cfi_adjust_cfa_offset -112
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveEverything).
- * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING.
- * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
- * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
- * Clobbers $t0 and $t1.
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
- * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
- */
-.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING != 256)
-#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected."
-#endif
-
- sw $ra, 252($sp)
- .cfi_rel_offset 31, 252
- sw $fp, 248($sp)
- .cfi_rel_offset 30, 248
- sw $gp, 244($sp)
- .cfi_rel_offset 28, 244
- sw $t9, 240($sp)
- .cfi_rel_offset 25, 240
- sw $t8, 236($sp)
- .cfi_rel_offset 24, 236
- sw $s7, 232($sp)
- .cfi_rel_offset 23, 232
- sw $s6, 228($sp)
- .cfi_rel_offset 22, 228
- sw $s5, 224($sp)
- .cfi_rel_offset 21, 224
- sw $s4, 220($sp)
- .cfi_rel_offset 20, 220
- sw $s3, 216($sp)
- .cfi_rel_offset 19, 216
- sw $s2, 212($sp)
- .cfi_rel_offset 18, 212
- sw $s1, 208($sp)
- .cfi_rel_offset 17, 208
- sw $s0, 204($sp)
- .cfi_rel_offset 16, 204
- sw $t7, 200($sp)
- .cfi_rel_offset 15, 200
- sw $t6, 196($sp)
- .cfi_rel_offset 14, 196
- sw $t5, 192($sp)
- .cfi_rel_offset 13, 192
- sw $t4, 188($sp)
- .cfi_rel_offset 12, 188
- sw $t3, 184($sp)
- .cfi_rel_offset 11, 184
- sw $t2, 180($sp)
- .cfi_rel_offset 10, 180
- sw $t1, 176($sp)
- .cfi_rel_offset 9, 176
- sw $t0, 172($sp)
- .cfi_rel_offset 8, 172
- sw $a3, 168($sp)
- .cfi_rel_offset 7, 168
- sw $a2, 164($sp)
- .cfi_rel_offset 6, 164
- sw $a1, 160($sp)
- .cfi_rel_offset 5, 160
- sw $a0, 156($sp)
- .cfi_rel_offset 4, 156
- sw $v1, 152($sp)
- .cfi_rel_offset 3, 152
- sw $v0, 148($sp)
- .cfi_rel_offset 2, 148
-
- // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
- bal 1f
- .set push
- .set noat
- sw $at, 144($sp)
- .cfi_rel_offset 1, 144
- .set pop
-1:
- .cpload $ra
-
- CHECK_ALIGNMENT $sp, $t1
- sdc1 $f30, 136($sp)
- sdc1 $f28, 128($sp)
- sdc1 $f26, 120($sp)
- sdc1 $f24, 112($sp)
- sdc1 $f22, 104($sp)
- sdc1 $f20, 96($sp)
- sdc1 $f18, 88($sp)
- sdc1 $f16, 80($sp)
- sdc1 $f14, 72($sp)
- sdc1 $f12, 64($sp)
- sdc1 $f10, 56($sp)
- sdc1 $f8, 48($sp)
- sdc1 $f6, 40($sp)
- sdc1 $f4, 32($sp)
- sdc1 $f2, 24($sp)
- sdc1 $f0, 16($sp)
-
- # 3 words padding and 1 word for holding Method*
-
- lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
- lw $t0, 0($t0)
- lw $t0, \runtime_method_offset($t0)
- sw $t0, 0($sp) # Place Method* at bottom of stack.
- sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
- addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveEverything).
- * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
- * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
- * Clobbers $t0 and $t1.
- * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
- * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
- */
-.macro SETUP_SAVE_EVERYTHING_FRAME runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- addiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING)
- .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING)
- SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP \runtime_method_offset
-.endm
-
-.macro RESTORE_SAVE_EVERYTHING_FRAME restore_a0=1
- addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
- .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
- CHECK_ALIGNMENT $sp, $t1
- ldc1 $f30, 136($sp)
- ldc1 $f28, 128($sp)
- ldc1 $f26, 120($sp)
- ldc1 $f24, 112($sp)
- ldc1 $f22, 104($sp)
- ldc1 $f20, 96($sp)
- ldc1 $f18, 88($sp)
- ldc1 $f16, 80($sp)
- ldc1 $f14, 72($sp)
- ldc1 $f12, 64($sp)
- ldc1 $f10, 56($sp)
- ldc1 $f8, 48($sp)
- ldc1 $f6, 40($sp)
- ldc1 $f4, 32($sp)
- ldc1 $f2, 24($sp)
- ldc1 $f0, 16($sp)
-
- lw $ra, 252($sp)
- .cfi_restore 31
- lw $fp, 248($sp)
- .cfi_restore 30
- lw $gp, 244($sp)
- .cfi_restore 28
- lw $t9, 240($sp)
- .cfi_restore 25
- lw $t8, 236($sp)
- .cfi_restore 24
- lw $s7, 232($sp)
- .cfi_restore 23
- lw $s6, 228($sp)
- .cfi_restore 22
- lw $s5, 224($sp)
- .cfi_restore 21
- lw $s4, 220($sp)
- .cfi_restore 20
- lw $s3, 216($sp)
- .cfi_restore 19
- lw $s2, 212($sp)
- .cfi_restore 18
- lw $s1, 208($sp)
- .cfi_restore 17
- lw $s0, 204($sp)
- .cfi_restore 16
- lw $t7, 200($sp)
- .cfi_restore 15
- lw $t6, 196($sp)
- .cfi_restore 14
- lw $t5, 192($sp)
- .cfi_restore 13
- lw $t4, 188($sp)
- .cfi_restore 12
- lw $t3, 184($sp)
- .cfi_restore 11
- lw $t2, 180($sp)
- .cfi_restore 10
- lw $t1, 176($sp)
- .cfi_restore 9
- lw $t0, 172($sp)
- .cfi_restore 8
- lw $a3, 168($sp)
- .cfi_restore 7
- lw $a2, 164($sp)
- .cfi_restore 6
- lw $a1, 160($sp)
- .cfi_restore 5
- .if \restore_a0
- lw $a0, 156($sp)
- .cfi_restore 4
- .endif
- lw $v1, 152($sp)
- .cfi_restore 3
- lw $v0, 148($sp)
- .cfi_restore 2
- .set push
- .set noat
- lw $at, 144($sp)
- .cfi_restore 1
- .set pop
-
- addiu $sp, $sp, 256 # pop frame
- .cfi_adjust_cfa_offset -256
-.endm
-
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_ when the runtime method frame is ready.
- * Requires $gp properly set up.
- */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
- la $t9, artDeliverPendingExceptionFromCode
- jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-.endm
-
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_.
- * Requires $gp properly set up.
- */
-.macro DELIVER_PENDING_EXCEPTION
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME # save callee saves for throw
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
-.macro RETURN_IF_NO_EXCEPTION
- lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_ONLY_FRAME
- bnez $t0, 1f # success if no exception is pending
- nop
- jalr $zero, $ra
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_ZERO
- RESTORE_SAVE_REFS_ONLY_FRAME
- bnez $v0, 1f # success?
- nop
- jalr $zero, $ra # return on success
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RESTORE_SAVE_REFS_ONLY_FRAME
- beqz $v0, 1f # success?
- nop
- jalr $zero, $ra # return on success
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
- /*
- * On stack replacement stub.
- * On entry:
- * a0 = stack to copy
- * a1 = size of stack
- * a2 = pc to call
- * a3 = JValue* result
- * [sp + 16] = shorty
- * [sp + 20] = thread
- */
-ENTRY art_quick_osr_stub
- // Save callee general purpose registers, RA and GP.
- addiu $sp, $sp, -48
- .cfi_adjust_cfa_offset 48
- sw $ra, 44($sp)
- .cfi_rel_offset 31, 44
- sw $s8, 40($sp)
- .cfi_rel_offset 30, 40
- sw $gp, 36($sp)
- .cfi_rel_offset 28, 36
- sw $s7, 32($sp)
- .cfi_rel_offset 23, 32
- sw $s6, 28($sp)
- .cfi_rel_offset 22, 28
- sw $s5, 24($sp)
- .cfi_rel_offset 21, 24
- sw $s4, 20($sp)
- .cfi_rel_offset 20, 20
- sw $s3, 16($sp)
- .cfi_rel_offset 19, 16
- sw $s2, 12($sp)
- .cfi_rel_offset 18, 12
- sw $s1, 8($sp)
- .cfi_rel_offset 17, 8
- sw $s0, 4($sp)
- .cfi_rel_offset 16, 4
-
- move $s8, $sp # Save the stack pointer
- move $s7, $a1 # Save size of stack
- move $s6, $a2 # Save the pc to call
- lw rSELF, 48+20($sp) # Save managed thread pointer into rSELF
- addiu $t0, $sp, -12 # Reserve space for stack pointer,
- # JValue* result, and ArtMethod* slot.
- srl $t0, $t0, 4 # Align stack pointer to 16 bytes
- sll $sp, $t0, 4 # Update stack pointer
- sw $s8, 4($sp) # Save old stack pointer
- sw $a3, 8($sp) # Save JValue* result
- sw $zero, 0($sp) # Store null for ArtMethod* at bottom of frame
- subu $sp, $a1 # Reserve space for callee stack
- move $a2, $a1
- move $a1, $a0
- move $a0, $sp
- la $t9, memcpy
- jalr $t9 # memcpy (dest a0, src a1, bytes a2)
- addiu $sp, $sp, -16 # make space for argument slots for memcpy
- bal .Losr_entry # Call the method
- addiu $sp, $sp, 16 # restore stack after memcpy
- lw $a2, 8($sp) # Restore JValue* result
- lw $sp, 4($sp) # Restore saved stack pointer
- lw $a0, 48+16($sp) # load shorty
- lbu $a0, 0($a0) # load return type
- li $a1, 'D' # put char 'D' into a1
- beq $a0, $a1, .Losr_fp_result # Test if result type char == 'D'
- li $a1, 'F' # put char 'F' into a1
- beq $a0, $a1, .Losr_fp_result # Test if result type char == 'F'
- nop
- sw $v0, 0($a2)
- b .Losr_exit
- sw $v1, 4($a2) # store v0/v1 into result
-.Losr_fp_result:
- CHECK_ALIGNMENT $a2, $t0, 8
- sdc1 $f0, 0($a2) # store f0/f1 into result
-.Losr_exit:
- lw $ra, 44($sp)
- .cfi_restore 31
- lw $s8, 40($sp)
- .cfi_restore 30
- lw $gp, 36($sp)
- .cfi_restore 28
- lw $s7, 32($sp)
- .cfi_restore 23
- lw $s6, 28($sp)
- .cfi_restore 22
- lw $s5, 24($sp)
- .cfi_restore 21
- lw $s4, 20($sp)
- .cfi_restore 20
- lw $s3, 16($sp)
- .cfi_restore 19
- lw $s2, 12($sp)
- .cfi_restore 18
- lw $s1, 8($sp)
- .cfi_restore 17
- lw $s0, 4($sp)
- .cfi_restore 16
- jalr $zero, $ra
- addiu $sp, $sp, 48
- .cfi_adjust_cfa_offset -48
-.Losr_entry:
- addiu $s7, $s7, -4
- addu $t0, $s7, $sp
- move $t9, $s6
- jalr $zero, $t9
- sw $ra, 0($t0) # Store RA per the compiler ABI
-END art_quick_osr_stub
-
- /*
- * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_.
- * Note that fprs_ is expected to be an address that is a multiple of 8.
- * FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
- */
-ENTRY art_quick_do_long_jump
- CHECK_ALIGNMENT $a1, $t1, 8
- ldc1 $f0, 0*8($a1)
- ldc1 $f2, 1*8($a1)
- ldc1 $f4, 2*8($a1)
- ldc1 $f6, 3*8($a1)
- ldc1 $f8, 4*8($a1)
- ldc1 $f10, 5*8($a1)
- ldc1 $f12, 6*8($a1)
- ldc1 $f14, 7*8($a1)
- ldc1 $f16, 8*8($a1)
- ldc1 $f18, 9*8($a1)
- ldc1 $f20, 10*8($a1)
- ldc1 $f22, 11*8($a1)
- ldc1 $f24, 12*8($a1)
- ldc1 $f26, 13*8($a1)
- ldc1 $f28, 14*8($a1)
- ldc1 $f30, 15*8($a1)
-
- .set push
- .set nomacro
- .set noat
- lw $at, 4($a0)
- .set pop
- lw $v0, 8($a0)
- lw $v1, 12($a0)
- lw $a1, 20($a0)
- lw $a2, 24($a0)
- lw $a3, 28($a0)
- lw $t0, 32($a0)
- lw $t1, 36($a0)
- lw $t2, 40($a0)
- lw $t3, 44($a0)
- lw $t4, 48($a0)
- lw $t5, 52($a0)
- lw $t6, 56($a0)
- lw $t7, 60($a0)
- lw $s0, 64($a0)
- lw $s1, 68($a0)
- lw $s2, 72($a0)
- lw $s3, 76($a0)
- lw $s4, 80($a0)
- lw $s5, 84($a0)
- lw $s6, 88($a0)
- lw $s7, 92($a0)
- lw $t8, 96($a0)
- lw $t9, 100($a0)
- lw $gp, 112($a0)
- lw $sp, 116($a0)
- lw $fp, 120($a0)
- lw $ra, 124($a0)
- lw $a0, 16($a0)
- move $v0, $zero # clear result registers v0 and v1 (in branch delay slot)
- jalr $zero, $t9 # do long jump
- move $v1, $zero
-END art_quick_do_long_jump
-
- /*
- * Called by managed code, saves most registers (forms basis of long jump context) and passes
- * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
- * the bottom of the thread. On entry a0 holds Throwable*
- */
-ENTRY art_quick_deliver_exception
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artDeliverExceptionFromCode
- jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
- move $a1, rSELF # pass Thread::Current
-END art_quick_deliver_exception
-
- /*
- * Called by managed code to create and deliver a NullPointerException
- */
- .extern artThrowNullPointerExceptionFromCode
-ENTRY_NO_GP art_quick_throw_null_pointer_exception
- // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
- // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
- SETUP_SAVE_EVERYTHING_FRAME
- la $t9, artThrowNullPointerExceptionFromCode
- jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_null_pointer_exception
-
-
- /*
- * Call installed by a signal handler to create and deliver a NullPointerException.
- */
- .extern artThrowNullPointerExceptionFromSignal
-ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING
- SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
- # Retrieve the fault address from the padding where the signal handler stores it.
- lw $a0, (ARG_SLOT_SIZE + __SIZEOF_POINTER__)($sp)
- la $t9, artThrowNullPointerExceptionFromSignal
- jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*)
- move $a1, rSELF # pass Thread::Current
-END art_quick_throw_null_pointer_exception_from_signal
-
- /*
- * Called by managed code to create and deliver an ArithmeticException
- */
- .extern artThrowDivZeroFromCode
-ENTRY_NO_GP art_quick_throw_div_zero
- SETUP_SAVE_EVERYTHING_FRAME
- la $t9, artThrowDivZeroFromCode
- jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_div_zero
-
- /*
- * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
- */
- .extern artThrowArrayBoundsFromCode
-ENTRY_NO_GP art_quick_throw_array_bounds
- // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
- // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
- SETUP_SAVE_EVERYTHING_FRAME
- la $t9, artThrowArrayBoundsFromCode
- jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_throw_array_bounds
-
- /*
- * Called by managed code to create and deliver a StringIndexOutOfBoundsException
- * as if thrown from a call to String.charAt().
- */
- .extern artThrowStringBoundsFromCode
-ENTRY_NO_GP art_quick_throw_string_bounds
- SETUP_SAVE_EVERYTHING_FRAME
- la $t9, artThrowStringBoundsFromCode
- jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_throw_string_bounds
-
- /*
- * Called by managed code to create and deliver a StackOverflowError.
- */
- .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artThrowStackOverflowFromCode
- jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_stack_overflow
-
- /*
- * All generated callsites for interface invokes and invocation slow paths will load arguments
- * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
- * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
- * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
- *
- * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
- * of the target Method* in $v0 and method->code_ in $v1.
- *
- * If unsuccessful, the helper will return null/null. There will be a pending exception in the
- * thread and we branch to another stub to deliver it.
- *
- * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
- * pointing back to the original caller.
- */
-.macro INVOKE_TRAMPOLINE_BODY cxx_name, save_s4_thru_s8_only=0
- .extern \cxx_name
- SETUP_SAVE_REFS_AND_ARGS_FRAME \save_s4_thru_s8_only # save callee saves in case
- # allocation triggers GC
- move $a2, rSELF # pass Thread::Current
- la $t9, \cxx_name
- jalr $t9 # (method_idx, this, Thread*, $sp)
- addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
- move $a0, $v0 # save target Method*
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- beqz $v0, 1f
- move $t9, $v1 # save $v0->code_
- jalr $zero, $t9
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-.macro INVOKE_TRAMPOLINE c_name, cxx_name
-ENTRY \c_name
- INVOKE_TRAMPOLINE_BODY \cxx_name
-END \c_name
-.endm
-
-INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
-
-INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
-
-// Each of the following macros expands into four instructions or 16 bytes.
-// They are used to build indexable "tables" of code.
-
-.macro LOAD_WORD_TO_REG reg, next_arg, index_reg, label
- lw $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4)
- b \label
- addiu $\index_reg, 16
- .balign 16
-.endm
-
-.macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index_reg, next_index, label
- lw $\reg1, -8($\next_arg) # next_arg points to argument after the current one (offset is 8)
- lw $\reg2, -4($\next_arg)
- b \label
- li $\index_reg, \next_index
- .balign 16
-.endm
-
-.macro LOAD_FLOAT_TO_REG reg, next_arg, index_reg, label
- lwc1 $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4)
- b \label
- addiu $\index_reg, 16
- .balign 16
-.endm
-
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
-// LDu expands into 3 instructions for 64-bit FPU, so index_reg cannot be updated here.
-.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
- .set reorder # force use of the branch delay slot
- LDu $\reg1, $\reg2, -8, $\next_arg, $\tmp # next_arg points to argument after the current one
- # (offset is 8)
- b \label
- .set noreorder
- .balign 16
-.endm
-#else
-// LDu expands into 2 instructions for 32-bit FPU, so index_reg is updated here.
-.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
- LDu $\reg1, $\reg2, -8, $\next_arg, $\tmp # next_arg points to argument after the current one
- # (offset is 8)
- b \label
- addiu $\index_reg, 16
- .balign 16
-.endm
-#endif
-
-.macro LOAD_END index_reg, next_index, label
- b \label
- li $\index_reg, \next_index
- .balign 16
-.endm
-
-#define SPILL_SIZE 32
-
- /*
- * Invocation stub for quick code.
- * On entry:
- * a0 = method pointer
- * a1 = argument array or null for no argument methods
- * a2 = size of argument array in bytes
- * a3 = (managed) thread pointer
- * [sp + 16] = JValue* result
- * [sp + 20] = shorty
- */
-ENTRY art_quick_invoke_stub
- sw $a0, 0($sp) # save out a0
- addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
- .cfi_adjust_cfa_offset SPILL_SIZE
- sw $gp, 16($sp)
- sw $ra, 12($sp)
- .cfi_rel_offset 31, 12
- sw $fp, 8($sp)
- .cfi_rel_offset 30, 8
- sw $s1, 4($sp)
- .cfi_rel_offset 17, 4
- sw $s0, 0($sp)
- .cfi_rel_offset 16, 0
- move $fp, $sp # save sp in fp
- .cfi_def_cfa_register 30
- move $s1, $a3 # move managed thread pointer into s1
- addiu $t0, $a2, 4 # create space for ArtMethod* in frame.
- subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes:
- srl $t0, $t0, 4 # native calling convention only aligns to 8B,
- sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves.
- addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy
- la $t9, memcpy
- jalr $t9 # (dest, src, bytes)
- addiu $sp, $sp, -16 # make space for argument slots for memcpy
- addiu $sp, $sp, 16 # restore stack after memcpy
- lw $gp, 16($fp) # restore $gp
- lw $a0, SPILL_SIZE($fp) # restore ArtMethod*
- lw $a1, 4($sp) # a1 = this*
- addiu $t8, $sp, 8 # t8 = pointer to the current argument (skip ArtMethod* and this*)
- li $t6, 0 # t6 = gpr_index = 0 (corresponds to A2; A0 and A1 are skipped)
- li $t7, 0 # t7 = fp_index = 0
- lw $t9, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
- # as the $fp is SPILL_SIZE bytes below the $sp on entry)
- addiu $t9, 1 # t9 = shorty + 1 (skip 1 for return type)
-
- // Load the base addresses of tabInt ... tabDouble.
- // We will use the register indices (gpr_index, fp_index) to branch.
- // Note that the indices are scaled by 16, so they can be added to the bases directly.
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
- lapc $t2, tabInt
- lapc $t3, tabLong
- lapc $t4, tabSingle
- lapc $t5, tabDouble
-#else
- bltzal $zero, tabBase # nal
- addiu $t2, $ra, %lo(tabInt - tabBase)
-tabBase:
- addiu $t3, $ra, %lo(tabLong - tabBase)
- addiu $t4, $ra, %lo(tabSingle - tabBase)
- addiu $t5, $ra, %lo(tabDouble - tabBase)
-#endif
-
-loop:
- lbu $ra, 0($t9) # ra = shorty[i]
- beqz $ra, loopEnd # finish getting args when shorty[i] == '\0'
- addiu $t9, 1
-
- addiu $ra, -'J'
- beqz $ra, isLong # branch if result type char == 'J'
- addiu $ra, 'J' - 'D'
- beqz $ra, isDouble # branch if result type char == 'D'
- addiu $ra, 'D' - 'F'
- beqz $ra, isSingle # branch if result type char == 'F'
-
- addu $ra, $t2, $t6
- jalr $zero, $ra
- addiu $t8, 4 # next_arg = curr_arg + 4
-
-isLong:
- addu $ra, $t3, $t6
- jalr $zero, $ra
- addiu $t8, 8 # next_arg = curr_arg + 8
-
-isSingle:
- addu $ra, $t4, $t7
- jalr $zero, $ra
- addiu $t8, 4 # next_arg = curr_arg + 4
-
-isDouble:
- addu $ra, $t5, $t7
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
- addiu $t7, 16 # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
-#endif
- jalr $zero, $ra
- addiu $t8, 8 # next_arg = curr_arg + 8
-
-loopEnd:
- lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
- jalr $t9 # call the method
- sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame
- move $sp, $fp # restore the stack
- lw $s0, 0($sp)
- .cfi_restore 16
- lw $s1, 4($sp)
- .cfi_restore 17
- lw $fp, 8($sp)
- .cfi_restore 30
- lw $ra, 12($sp)
- .cfi_restore 31
- addiu $sp, $sp, SPILL_SIZE
- .cfi_adjust_cfa_offset -SPILL_SIZE
- lw $t0, 16($sp) # get result pointer
- lw $t1, 20($sp) # get shorty
- lb $t1, 0($t1) # get result type char
- li $t2, 'D' # put char 'D' into t2
- beq $t1, $t2, 5f # branch if result type char == 'D'
- li $t3, 'F' # put char 'F' into t3
- beq $t1, $t3, 5f # branch if result type char == 'F'
- sw $v0, 0($t0) # store the result
- jalr $zero, $ra
- sw $v1, 4($t0) # store the other half of the result
-5:
- CHECK_ALIGNMENT $t0, $t1, 8
- sdc1 $f0, 0($t0) # store floating point result
- jalr $zero, $ra
- nop
-
- // Note that gpr_index is kept within the range of tabInt and tabLong
- // and fp_index is kept within the range of tabSingle and tabDouble.
- .balign 16
-tabInt:
- LOAD_WORD_TO_REG a2, t8, t6, loop # a2 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG a3, t8, t6, loop # a3 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG t0, t8, t6, loop # t0 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG t1, t8, t6, loop # t1 = current argument, gpr_index += 16
- LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16
-tabLong:
- LOAD_LONG_TO_REG a2, a3, t8, t6, 2*16, loop # a2_a3 = curr_arg, gpr_index = 2*16
- LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop # t0_t1 = curr_arg, gpr_index = 4*16
- LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop # t0_t1 = curr_arg, gpr_index = 4*16
- LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16
- LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16
-tabSingle:
- LOAD_FLOAT_TO_REG f8, t8, t7, loop # f8 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f10, t8, t7, loop # f10 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f12, t8, t7, loop # f12 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f14, t8, t7, loop # f14 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f16, t8, t7, loop # f16 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f18, t8, t7, loop # f18 = curr_arg, fp_index += 16
- LOAD_END t7, 6*16, loop # no more FPR args, fp_index = 6*16
-tabDouble:
- LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loop # f8_f9 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loop # f10_f11 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loop # f12_f13 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loop # f14_f15 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loop # f16_f17 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loop # f18_f19 = curr_arg; if FPU32, fp_index += 16
- LOAD_END t7, 6*16, loop # no more FPR args, fp_index = 6*16
-END art_quick_invoke_stub
-
- /*
- * Invocation static stub for quick code.
- * On entry:
- * a0 = method pointer
- * a1 = argument array or null for no argument methods
- * a2 = size of argument array in bytes
- * a3 = (managed) thread pointer
- * [sp + 16] = JValue* result
- * [sp + 20] = shorty
- */
-ENTRY art_quick_invoke_static_stub
- sw $a0, 0($sp) # save out a0
- addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
- .cfi_adjust_cfa_offset SPILL_SIZE
- sw $gp, 16($sp)
- sw $ra, 12($sp)
- .cfi_rel_offset 31, 12
- sw $fp, 8($sp)
- .cfi_rel_offset 30, 8
- sw $s1, 4($sp)
- .cfi_rel_offset 17, 4
- sw $s0, 0($sp)
- .cfi_rel_offset 16, 0
- move $fp, $sp # save sp in fp
- .cfi_def_cfa_register 30
- move $s1, $a3 # move managed thread pointer into s1
- addiu $t0, $a2, 4 # create space for ArtMethod* in frame.
- subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes:
- srl $t0, $t0, 4 # native calling convention only aligns to 8B,
- sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves.
- addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy
- la $t9, memcpy
- jalr $t9 # (dest, src, bytes)
- addiu $sp, $sp, -16 # make space for argument slots for memcpy
- addiu $sp, $sp, 16 # restore stack after memcpy
- lw $gp, 16($fp) # restore $gp
- lw $a0, SPILL_SIZE($fp) # restore ArtMethod*
- addiu $t8, $sp, 4 # t8 = pointer to the current argument (skip ArtMethod*)
- li $t6, 0 # t6 = gpr_index = 0 (corresponds to A1; A0 is skipped)
- li $t7, 0 # t7 = fp_index = 0
- lw $t9, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
- # as the $fp is SPILL_SIZE bytes below the $sp on entry)
- addiu $t9, 1 # t9 = shorty + 1 (skip 1 for return type)
-
- // Load the base addresses of tabIntS ... tabDoubleS.
- // We will use the register indices (gpr_index, fp_index) to branch.
- // Note that the indices are scaled by 16, so they can be added to the bases directly.
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
- lapc $t2, tabIntS
- lapc $t3, tabLongS
- lapc $t4, tabSingleS
- lapc $t5, tabDoubleS
-#else
- bltzal $zero, tabBaseS # nal
- addiu $t2, $ra, %lo(tabIntS - tabBaseS)
-tabBaseS:
- addiu $t3, $ra, %lo(tabLongS - tabBaseS)
- addiu $t4, $ra, %lo(tabSingleS - tabBaseS)
- addiu $t5, $ra, %lo(tabDoubleS - tabBaseS)
-#endif
-
-loopS:
- lbu $ra, 0($t9) # ra = shorty[i]
- beqz $ra, loopEndS # finish getting args when shorty[i] == '\0'
- addiu $t9, 1
-
- addiu $ra, -'J'
- beqz $ra, isLongS # branch if result type char == 'J'
- addiu $ra, 'J' - 'D'
- beqz $ra, isDoubleS # branch if result type char == 'D'
- addiu $ra, 'D' - 'F'
- beqz $ra, isSingleS # branch if result type char == 'F'
-
- addu $ra, $t2, $t6
- jalr $zero, $ra
- addiu $t8, 4 # next_arg = curr_arg + 4
-
-isLongS:
- addu $ra, $t3, $t6
- jalr $zero, $ra
- addiu $t8, 8 # next_arg = curr_arg + 8
-
-isSingleS:
- addu $ra, $t4, $t7
- jalr $zero, $ra
- addiu $t8, 4 # next_arg = curr_arg + 4
-
-isDoubleS:
- addu $ra, $t5, $t7
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
- addiu $t7, 16 # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
-#endif
- jalr $zero, $ra
- addiu $t8, 8 # next_arg = curr_arg + 8
-
-loopEndS:
- lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
- jalr $t9 # call the method
- sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame
- move $sp, $fp # restore the stack
- lw $s0, 0($sp)
- .cfi_restore 16
- lw $s1, 4($sp)
- .cfi_restore 17
- lw $fp, 8($sp)
- .cfi_restore 30
- lw $ra, 12($sp)
- .cfi_restore 31
- addiu $sp, $sp, SPILL_SIZE
- .cfi_adjust_cfa_offset -SPILL_SIZE
- lw $t0, 16($sp) # get result pointer
- lw $t1, 20($sp) # get shorty
- lb $t1, 0($t1) # get result type char
- li $t2, 'D' # put char 'D' into t2
- beq $t1, $t2, 6f # branch if result type char == 'D'
- li $t3, 'F' # put char 'F' into t3
- beq $t1, $t3, 6f # branch if result type char == 'F'
- sw $v0, 0($t0) # store the result
- jalr $zero, $ra
- sw $v1, 4($t0) # store the other half of the result
-6:
- CHECK_ALIGNMENT $t0, $t1, 8
- sdc1 $f0, 0($t0) # store floating point result
- jalr $zero, $ra
- nop
-
- // Note that gpr_index is kept within the range of tabIntS and tabLongS
- // and fp_index is kept within the range of tabSingleS and tabDoubleS.
- .balign 16
-tabIntS:
- LOAD_WORD_TO_REG a1, t8, t6, loopS # a1 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG a2, t8, t6, loopS # a2 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG a3, t8, t6, loopS # a3 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG t0, t8, t6, loopS # t0 = current argument, gpr_index += 16
- LOAD_WORD_TO_REG t1, t8, t6, loopS # t1 = current argument, gpr_index += 16
- LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16
-tabLongS:
- LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS # a2_a3 = curr_arg, gpr_index = 3*16
- LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS # a2_a3 = curr_arg, gpr_index = 3*16
- LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS # t0_t1 = curr_arg, gpr_index = 5*16
- LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS # t0_t1 = curr_arg, gpr_index = 5*16
- LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16
- LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16
-tabSingleS:
- LOAD_FLOAT_TO_REG f8, t8, t7, loopS # f8 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f10, t8, t7, loopS # f10 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f12, t8, t7, loopS # f12 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f14, t8, t7, loopS # f14 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f16, t8, t7, loopS # f16 = curr_arg, fp_index += 16
- LOAD_FLOAT_TO_REG f18, t8, t7, loopS # f18 = curr_arg, fp_index += 16
- LOAD_END t7, 6*16, loopS # no more FPR args, fp_index = 6*16
-tabDoubleS:
- LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loopS # f8_f9 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loopS # f10_f11 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loopS # f12_f13 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loopS # f14_f15 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loopS # f16_f17 = curr_arg; if FPU32, fp_index += 16
- LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loopS # f18_f19 = curr_arg; if FPU32, fp_index += 16
- LOAD_END t7, 6*16, loopS # no more FPR args, fp_index = 6*16
-END art_quick_invoke_static_stub
-
-#undef SPILL_SIZE
-
- /*
- * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
- * failure.
- */
- .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- la $t9, artHandleFillArrayDataFromCode
- jalr $t9 # (payload offset, Array*, method, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_handle_fill_data
-
- /*
- * Entry from managed code that calls artLockObjectFromCode, may block for GC.
- */
- .extern artLockObjectFromCode
-ENTRY art_quick_lock_object
- beqz $a0, art_quick_throw_null_pointer_exception
- li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
- li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_lock:
- lw $t0, THREAD_ID_OFFSET(rSELF) # TODO: Can the thread ID really change during the loop?
- ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- and $t2, $t1, $t3 # zero the gc bits
- bnez $t2, .Lnot_unlocked # already thin locked
- # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
- or $t2, $t1, $t0 # $t2 holds thread id with count of 0 with preserved read barrier bits
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqz $t2, .Lretry_lock # store failed, retry
- nop
- jalr $zero, $ra
- sync # full (LoadLoad|LoadStore) memory barrier
-.Lnot_unlocked:
- # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
- srl $t2, $t1, LOCK_WORD_STATE_SHIFT
- bnez $t2, .Lslow_lock # if either of the top two bits are set, go slow path
- xor $t2, $t1, $t0 # lock_word.ThreadId() ^ self->ThreadId()
- andi $t2, $t2, 0xFFFF # zero top 16 bits
- bnez $t2, .Lslow_lock # lock word and self thread id's match -> recursive lock
- # otherwise contention, go to slow path
- and $t2, $t1, $t3 # zero the gc bits
- addu $t2, $t2, $t8 # increment count in lock word
- srl $t2, $t2, LOCK_WORD_STATE_SHIFT # if the first gc state bit is set, we overflowed.
- bnez $t2, .Lslow_lock # if we overflow the count go slow path
- addu $t2, $t1, $t8 # increment count for real
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqz $t2, .Lretry_lock # store failed, retry
- nop
- jalr $zero, $ra
- nop
-.Lslow_lock:
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
- la $t9, artLockObjectFromCode
- jalr $t9 # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_lock_object
-
-ENTRY art_quick_lock_object_no_inline
- beqz $a0, art_quick_throw_null_pointer_exception
- nop
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
- la $t9, artLockObjectFromCode
- jalr $t9 # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_lock_object_no_inline
-
- /*
- * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
- */
- .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object
- beqz $a0, art_quick_throw_null_pointer_exception
- li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
- li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_unlock:
-#ifndef USE_READ_BARRIER
- lw $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0) # Need to use atomic read-modify-write for read barrier
-#endif
- srl $t2, $t1, LOCK_WORD_STATE_SHIFT
- bnez $t2, .Lslow_unlock # if either of the top two bits are set, go slow path
- lw $t0, THREAD_ID_OFFSET(rSELF)
- and $t2, $t1, $t3 # zero the gc bits
- xor $t2, $t2, $t0 # lock_word.ThreadId() ^ self->ThreadId()
- andi $t2, $t2, 0xFFFF # zero top 16 bits
- bnez $t2, .Lslow_unlock # do lock word and self thread id's match?
- and $t2, $t1, $t3 # zero the gc bits
- bgeu $t2, $t8, .Lrecursive_thin_unlock
- # transition to unlocked
- nor $t2, $zero, $t3 # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
- and $t2, $t1, $t2 # $t2: zero except for the preserved gc bits
- sync # full (LoadStore|StoreStore) memory barrier
-#ifndef USE_READ_BARRIER
- jalr $zero, $ra
- sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqz $t2, .Lretry_unlock # store failed, retry
- nop
- jalr $zero, $ra
- nop
-#endif
-.Lrecursive_thin_unlock:
- # t1: original lock word
- subu $t2, $t1, $t8 # decrement count
-#ifndef USE_READ_BARRIER
- jalr $zero, $ra
- sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqz $t2, .Lretry_unlock # store failed, retry
- nop
- jalr $zero, $ra
- nop
-#endif
-.Lslow_unlock:
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- la $t9, artUnlockObjectFromCode
- jalr $t9 # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_unlock_object
-
-ENTRY art_quick_unlock_object_no_inline
- beqz $a0, art_quick_throw_null_pointer_exception
- nop
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- la $t9, artUnlockObjectFromCode
- jalr $t9 # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_unlock_object_no_inline
-
- /*
- * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
- */
- .extern artInstanceOfFromCode
- .extern artThrowClassCastExceptionForObject
-ENTRY art_quick_check_instance_of
- // Type check using the bit string passes null as the target class. In that case just throw.
- beqz $a1, .Lthrow_class_cast_exception_for_bitstring_check
- nop
-
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $gp, 16($sp)
- sw $ra, 12($sp)
- .cfi_rel_offset 31, 12
- sw $t9, 8($sp)
- sw $a1, 4($sp)
- sw $a0, 0($sp)
- la $t9, artInstanceOfFromCode
- jalr $t9
- addiu $sp, $sp, -16 # reserve argument slots on the stack
- addiu $sp, $sp, 16
- lw $gp, 16($sp)
- beqz $v0, .Lthrow_class_cast_exception
- lw $ra, 12($sp)
- jalr $zero, $ra
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception:
- lw $t9, 8($sp)
- lw $a1, 4($sp)
- lw $a0, 0($sp)
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception_for_bitstring_check:
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artThrowClassCastExceptionForObject
- jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_check_instance_of
-
- /*
- * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
- * nReg is the register number for rReg.
- */
-.macro POP_REG_NE rReg, nReg, offset, rExclude
- .ifnc \rReg, \rExclude
- lw \rReg, \offset($sp) # restore rReg
- .cfi_restore \nReg
- .endif
-.endm
-
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER rDest, rObj, offset
-#ifdef USE_READ_BARRIER
- # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $t9, 24($sp)
- .cfi_rel_offset 25, 24
- sw $t1, 20($sp)
- .cfi_rel_offset 9, 20
- sw $t0, 16($sp)
- .cfi_rel_offset 8, 16
- sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- sw $a0, 0($sp)
- .cfi_rel_offset 4, 0
-
- # move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
- .ifnc \rObj, $a1
- move $a1, \rObj # pass rObj
- .endif
- addiu $a2, $zero, \offset # pass offset
- la $t9, artReadBarrierSlow
- jalr $t9 # artReadBarrierSlow(ref, rObj, offset)
- addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack
- # before the call to artReadBarrierSlow.
- addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow
- # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
- move \rDest, $v0 # save return value in rDest
- # (rDest cannot be v0 in art_quick_aput_obj)
-
- lw $a0, 0($sp) # restore registers except rDest
- # (rDest can only be t0 or t1 in art_quick_aput_obj)
- .cfi_restore 4
- lw $a1, 4($sp)
- .cfi_restore 5
- lw $a2, 8($sp)
- .cfi_restore 6
- POP_REG_NE $t0, 8, 16, \rDest
- POP_REG_NE $t1, 9, 20, \rDest
- lw $t9, 24($sp)
- .cfi_restore 25
- lw $ra, 28($sp) # restore $ra
- .cfi_restore 31
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-#else
- lw \rDest, \offset(\rObj)
- UNPOISON_HEAP_REF \rDest
-#endif // USE_READ_BARRIER
-.endm
-
-#ifdef USE_READ_BARRIER
- .extern artReadBarrierSlow
-#endif
-ENTRY art_quick_aput_obj
- beqz $a2, .Ldo_aput_null
- nop
- READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
- bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
- nop
-.Ldo_aput:
- sll $a1, $a1, 2
- add $t0, $a0, $a1
- POISON_HEAP_REF $a2
- sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
- lw $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
- srl $t1, $a0, CARD_TABLE_CARD_SHIFT
- add $t1, $t1, $t0
- sb $t0, ($t1)
- jalr $zero, $ra
- nop
-.Ldo_aput_null:
- sll $a1, $a1, 2
- add $t0, $a0, $a1
- sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
- jalr $zero, $ra
- nop
-.Lcheck_assignability:
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $gp, 16($sp)
- sw $t9, 12($sp)
- sw $a2, 8($sp)
- sw $a1, 4($sp)
- sw $a0, 0($sp)
- move $a1, $t1
- move $a0, $t0
- la $t9, artIsAssignableFromCode
- jalr $t9 # (Class*, Class*)
- addiu $sp, $sp, -16 # reserve argument slots on the stack
- addiu $sp, $sp, 16
- lw $ra, 28($sp)
- lw $gp, 16($sp)
- lw $t9, 12($sp)
- lw $a2, 8($sp)
- lw $a1, 4($sp)
- lw $a0, 0($sp)
- addiu $sp, 32
- .cfi_adjust_cfa_offset -32
- bnez $v0, .Ldo_aput
- nop
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- move $a1, $a2
- la $t9, artThrowArrayStoreException
- jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_aput_obj
-
-// Macros taking opportunity of code similarities for downcalls.
-.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9 # (field_idx, Thread*)
- move $a1, rSELF # pass Thread::Current
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9 # (field_idx, Object*, Thread*) or
- # (field_idx, new_val, Thread*)
- move $a2, rSELF # pass Thread::Current
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9 # (field_idx, Object*, new_val, Thread*)
- move $a3, rSELF # pass Thread::Current
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9 # (field_idx, Object*, 64-bit new_val, Thread*) or
- # (field_idx, 64-bit new_val, Thread*)
- # Note that a 64-bit new_val needs to be aligned with
- # an even-numbered register, hence A1 may be skipped
- # for new_val to reside in A2-A3.
- sw rSELF, 16($sp) # pass Thread::Current
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
- /*
- * Called by managed code to resolve a static/instance field and load/store a value.
- *
- * Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
- * defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
- */
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
-FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
-FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro ONE_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9
- move $a1, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-.macro TWO_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9
- move $a2, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-.macro THREE_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9
- move $a3, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-.macro FOUR_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, \entrypoint
- jalr $t9
- sw rSELF, 16($sp) # pass Thread::Current
- \return
-END \name
-.endm
-
-// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-// Comment out allocators that have mips specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-// A hand-written override for:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
-ENTRY_NO_GP \c_name
- # Fast path rosalloc allocation
- # a0: type
- # s1: Thread::Current
- # -----------------------------
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # t4: thread stack bottom offset
- # v0: free list head
- #
- # t5, t6 : temps
- lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
- lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
- bgeu $t3, $t4, .Lslow_path_\c_name
-
- lw $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
- li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation. Also does the
- # initialized and finalizable checks.
- # When isInitialized == 0, then the class is potentially not yet initialized.
- # If the class is not yet initialized, the object size will be very large to force the branch
- # below to be taken.
- #
- # See InitializeClassVisitors in class-inl.h for more details.
- bgtu $t1, $t5, .Lslow_path_\c_name
-
- # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
- # combine the two shifts together.
- srl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
-
- addu $t2, $t1, $s1
- lw $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
- lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqz $v0, .Lslow_path_\c_name
- nop
-
- # Load the next pointer of the head and update the list head with the next pointer.
- lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
- sw $v0, 0($t3)
- addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
- lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $t5, $t5, -1
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
-.if \isInitialized == 0
- # This barrier is only necessary when the allocation also requires a class initialization check.
- #
- # If the class is already observably initialized, then new-instance allocations are protected
- # from publishing by the compiler which inserts its own StoreStore barrier.
- sync # Fence.
-.endif
- jalr $zero, $ra
- nop
-
- .Lslow_path_\c_name:
- addiu $t9, $t9, (.Lslow_path_\c_name - \c_name) + 4
- .cpload $t9
- SETUP_SAVE_REFS_ONLY_FRAME
- la $t9, \cxx_name
- jalr $t9
- move $a1, $s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \c_name
-.endm
-
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-//
-// a0: type, s1(rSELF): Thread::Current.
-// Need to preserve a0 to the slow path.
-//
-// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
-// If isInitialized=0 the compiler can only assume it's been at least resolved.
-.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
- lw $v0, THREAD_LOCAL_POS_OFFSET(rSELF) # Load thread_local_pos.
- lw $a2, THREAD_LOCAL_END_OFFSET(rSELF) # Load thread_local_end.
- subu $a3, $a2, $v0 # Compute the remaining buffer size.
- lw $t0, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load the object size.
-
- # When isInitialized == 0, then the class is potentially not yet initialized.
- # If the class is not yet initialized, the object size will be very large to force the branch
- # below to be taken.
- #
- # See InitializeClassVisitors in class-inl.h for more details.
- bgtu $t0, $a3, \slowPathLabel # Check if it fits.
- addu $t1, $v0, $t0 # Add object size to tlab pos (in branch
- # delay slot).
- # "Point of no slow path". Won't go to the slow path from here on.
- sw $t1, THREAD_LOCAL_POS_OFFSET(rSELF) # Store new thread_local_pos.
- lw $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF) # Increment thread_local_objects.
- addiu $a2, $a2, 1
- sw $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0) # Store the class pointer.
-
-.if \isInitialized == 0
- # This barrier is only necessary when the allocation also requires a class initialization check.
- #
- # If the class is already observably initialized, then new-instance allocations are protected
- # from publishing by the compiler which inserts its own StoreStore barrier.
- sync # Fence.
-.endif
- jalr $zero, $ra
- nop
-.endm
-
-// The common code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-.macro GENERATE_ALLOC_OBJECT_TLAB name, entrypoint, isInitialized
-ENTRY_NO_GP \name
- # Fast path tlab allocation.
- # a0: type, s1(rSELF): Thread::Current.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path_\name, \isInitialized
-.Lslow_path_\name:
- addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
- .cpload $t9
- SETUP_SAVE_REFS_ONLY_FRAME # Save callee saves in case of GC.
- la $t9, \entrypoint
- jalr $t9 # (mirror::Class*, Thread*)
- move $a1, rSELF # Pass Thread::Current.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
-// and art_quick_alloc_array_resolved/initialized_region_tlab.
-//
-// a0: type, a1: component_count, a2: total_size, s1(rSELF): Thread::Current.
-// Need to preserve a0 and a1 to the slow path.
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
- li $a3, OBJECT_ALIGNMENT_MASK_TOGGLED # Apply alignemnt mask
- and $a2, $a2, $a3 # (addr + 7) & ~7.
-
- lw $v0, THREAD_LOCAL_POS_OFFSET(rSELF) # Load thread_local_pos.
- lw $t1, THREAD_LOCAL_END_OFFSET(rSELF) # Load thread_local_end.
- subu $t2, $t1, $v0 # Compute the remaining buffer size.
- bgtu $a2, $t2, \slowPathLabel # Check if it fits.
- addu $a2, $v0, $a2 # Add object size to tlab pos (in branch
- # delay slot).
-
- # "Point of no slow path". Won't go to the slow path from here on.
- sw $a2, THREAD_LOCAL_POS_OFFSET(rSELF) # Store new thread_local_pos.
- lw $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF) # Increment thread_local_objects.
- addiu $a2, $a2, 1
- sw $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0) # Store the class pointer.
- jalr $zero, $ra
- sw $a1, MIRROR_ARRAY_LENGTH_OFFSET($v0) # Store the array length.
-.endm
-
-.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
-ENTRY_NO_GP \name
- # Fast path array allocation for region tlab allocation.
- # a0: mirror::Class* type
- # a1: int32_t component_count
- # s1(rSELF): Thread::Current
- \size_setup .Lslow_path_\name
- ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path_\name
-.Lslow_path_\name:
- # a0: mirror::Class* type
- # a1: int32_t component_count
- # a2: Thread* self
- addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
- .cpload $t9
- SETUP_SAVE_REFS_ONLY_FRAME # Save callee saves in case of GC.
- la $t9, \entrypoint
- jalr $t9
- move $a2, rSELF # Pass Thread::Current.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
- break # We should never enter here.
- # Code below is for reference.
- # Possibly a large object, go slow.
- # Also does negative array size check.
- li $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
- bgtu $a1, $a2, \slow_path
- # Array classes are never finalizable
- # or uninitialized, no need to check.
- lw $a3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($a0) # Load component type.
- UNPOISON_HEAP_REF $a3
- lw $a3, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET($a3)
- srl $a3, $a3, PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT # Component size shift is in high 16 bits.
- sllv $a2, $a1, $a3 # Calculate data size.
- # Add array data offset and alignment.
- addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
-
- addiu $a3, $a3, 1 # Add 4 to the length only if the component
- andi $a3, $a3, 4 # size shift is 3 (for 64 bit alignment).
- addu $a2, $a2, $a3
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_8 slow_path
- # Possibly a large object, go slow.
- # Also does negative array size check.
- li $a2, (MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
- bgtu $a1, $a2, \slow_path
- # Add array data offset and alignment (in branch delay slot).
- addiu $a2, $a1, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_16 slow_path
- # Possibly a large object, go slow.
- # Also does negative array size check.
- li $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
- bgtu $a1, $a2, \slow_path
- sll $a2, $a1, 1
- # Add array data offset and alignment.
- addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_32 slow_path
- # Possibly a large object, go slow.
- # Also does negative array size check.
- li $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
- bgtu $a1, $a2, \slow_path
- sll $a2, $a1, 2
- # Add array data offset and alignment.
- addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_64 slow_path
- # Possibly a large object, go slow.
- # Also does negative array size check.
- li $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
- bgtu $a1, $a2, \slow_path
- sll $a2, $a1, 3
- # Add array data offset and alignment.
- addiu $a2, $a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-
- /*
- * Macro for resolution and initialization of indexed DEX file
- * constants such as classes and strings. $a0 is both input and
- * output.
- */
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- .extern \entrypoint
-ENTRY_NO_GP \name
- SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset # Save everything in case of GC.
- move $s2, $gp # Preserve $gp across the call for exception delivery.
- la $t9, \entrypoint
- jalr $t9 # (uint32_t index, Thread*)
- move $a1, rSELF # Pass Thread::Current (in delay slot).
- beqz $v0, 1f # Success?
- move $a0, $v0 # Move result to $a0 (in delay slot).
- RESTORE_SAVE_EVERYTHING_FRAME 0 # Restore everything except $a0.
- jalr $zero, $ra # Return on success.
- nop
-1:
- move $gp, $s2
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END \name
-.endm
-
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint
- ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
-.endm
-
- /*
- * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
- * index. On success the MethodHandle is returned, otherwise an exception is raised.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
-
- /*
- * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
- * On success the MethodType is returned, otherwise an exception is raised.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
-
- /*
- * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. A0 holds the string index. The fast
- * path check for hit in strings cache has already been performed.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
-
- /*
- * Entry from managed code when uninitialized static storage, this stub will run the class
- * initializer and deliver the exception on error. On success the static storage base is
- * returned.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-
- /*
- * Entry from managed code when dex cache misses for a type_idx.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
-
- /*
- * Entry from managed code when type_idx needs to be checked for access and dex cache may also
- * miss.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
-
- /*
- * Called by managed code when the value in rSUSPEND has been decremented to 0.
- */
- .extern artTestSuspendFromCode
-ENTRY_NO_GP art_quick_test_suspend
- SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
- # save everything for stack crawl
- la $t9, artTestSuspendFromCode
- jalr $t9 # (Thread*)
- move $a0, rSELF
- RESTORE_SAVE_EVERYTHING_FRAME
- jalr $zero, $ra
- nop
-END art_quick_test_suspend
-
- /*
- * Called by managed code that is attempting to call a method on a proxy class. On entry
- * a0 holds the proxy method; a1, a2 and a3 may contain arguments.
- */
- .extern artQuickProxyInvokeHandler
-ENTRY art_quick_proxy_invoke_handler
- SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- move $a2, rSELF # pass Thread::Current
- la $t9, artQuickProxyInvokeHandler
- jalr $t9 # (Method* proxy method, receiver, Thread*, SP)
- addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
- lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t7, 1f
- # don't care if $v0 and/or $v1 are modified, when exception branch taken
- MTD $v0, $v1, $f0, $f1 # move float value to return value
- jalr $zero, $ra
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_proxy_invoke_handler
-
- /*
- * Called to resolve an imt conflict.
- * a0 is the conflict ArtMethod.
- * t7 is a hidden argument that holds the target interface method's dex method index.
- *
- * Note that this stub writes to v0-v1, a0, t2-t9, f0-f7.
- */
- .extern artLookupResolvedMethod
- .extern __atomic_load_8 # For int64_t std::atomic::load(std::memory_order).
-ENTRY art_quick_imt_conflict_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY /* save_s4_thru_s8 */ 0
-
- lw $t8, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp) # $t8 = referrer.
- // If the method is obsolete, just go through the dex cache miss slow path.
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
- lw $t9, ART_METHOD_ACCESS_FLAGS_OFFSET($t8) # $t9 = access flags.
- sll $t9, $t9, 31 - ACC_OBSOLETE_METHOD_SHIFT # Move obsolete method bit to sign bit.
- bltz $t9, .Limt_conflict_trampoline_dex_cache_miss
- lw $t8, ART_METHOD_DECLARING_CLASS_OFFSET($t8) # $t8 = declaring class (no read barrier).
- lw $t8, MIRROR_CLASS_DEX_CACHE_OFFSET($t8) # $t8 = dex cache (without read barrier).
- UNPOISON_HEAP_REF $t8
- la $t9, __atomic_load_8
- addiu $sp, $sp, -ARG_SLOT_SIZE # Reserve argument slots on the stack.
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
- lw $t8, MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET($t8) # $t8 = dex cache methods array.
-
- move $s2, $t7 # $s2 = method index (callee-saved).
- lw $s3, ART_METHOD_JNI_OFFSET_32($a0) # $s3 = ImtConflictTable (callee-saved).
-
- sll $t7, $t7, 32 - METHOD_DEX_CACHE_HASH_BITS # $t7 = slot index in top bits, zeroes below.
- srl $t7, $t7, 32 - METHOD_DEX_CACHE_HASH_BITS - (POINTER_SIZE_SHIFT + 1)
- # $t7 = slot offset.
-
- li $a1, STD_MEMORY_ORDER_RELAXED # $a1 = std::memory_order_relaxed.
- jalr $t9 # [$v0, $v1] = __atomic_load_8($a0, $a1).
- addu $a0, $t8, $t7 # $a0 = DexCache method slot address.
-
- bne $v1, $s2, .Limt_conflict_trampoline_dex_cache_miss # Branch if method index miss.
- addiu $sp, $sp, ARG_SLOT_SIZE # Remove argument slots from the stack.
- .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
-.Limt_table_iterate:
- lw $t8, 0($s3) # Load next entry in ImtConflictTable.
- # Branch if found.
- beq $t8, $v0, .Limt_table_found
- nop
- # If the entry is null, the interface method is not in the ImtConflictTable.
- beqz $t8, .Lconflict_trampoline
- nop
- # Iterate over the entries of the ImtConflictTable.
- b .Limt_table_iterate
- addiu $s3, $s3, 2 * __SIZEOF_POINTER__ # Iterate to the next entry.
-
-.Limt_table_found:
- # We successfully hit an entry in the table. Load the target method and jump to it.
- .cfi_remember_state
- lw $a0, __SIZEOF_POINTER__($s3)
- lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)
- RESTORE_SAVE_REFS_AND_ARGS_FRAME /* restore_s4_thru_s8 */ 0, /* remove_arg_slots */ 0
- jalr $zero, $t9
- nop
- .cfi_restore_state
-
-.Lconflict_trampoline:
- # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
- .cfi_remember_state
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP # Restore clobbered $gp.
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1 # Restore this.
- move $a0, $v0 # Load interface method.
- INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline, /* save_s4_thru_s8_only */ 1
- .cfi_restore_state
-
-.Limt_conflict_trampoline_dex_cache_miss:
- # We're not creating a proper runtime method frame here,
- # artLookupResolvedMethod() is not allowed to walk the stack.
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP # Restore clobbered $gp.
- lw $a1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp) # $a1 = referrer.
- la $t9, artLookupResolvedMethod
- addiu $sp, $sp, -ARG_SLOT_SIZE # Reserve argument slots on the stack.
- .cfi_adjust_cfa_offset ARG_SLOT_SIZE
- jalr $t9 # (uint32_t method_index, ArtMethod* referrer).
- move $a0, $s2 # $a0 = method index.
-
- # If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
- beqz $v0, .Lconflict_trampoline
- addiu $sp, $sp, ARG_SLOT_SIZE # Remove argument slots from the stack.
- .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
- b .Limt_table_iterate
- nop
-END art_quick_imt_conflict_trampoline
-
- .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a2, rSELF # pass Thread::Current
- la $t9, artQuickResolutionTrampoline
- jalr $t9 # (Method* called, receiver, Thread*, SP)
- addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
- beqz $v0, 1f
- lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
- jalr $zero, $t9 # tail call to method
- nop
-1:
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- DELIVER_PENDING_EXCEPTION
-END art_quick_resolution_trampoline
-
- .extern artQuickGenericJniTrampoline
- .extern artQuickGenericJniEndTrampoline
-ENTRY art_quick_generic_jni_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- move $s8, $sp # save $sp to $s8
- move $s3, $gp # save $gp to $s3
-
- # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
- move $a0, rSELF # pass Thread::Current
- addiu $a1, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots)
- la $t9, artQuickGenericJniTrampoline
- jalr $t9 # (Thread*, SP)
- addiu $sp, $sp, -5120 # reserve space on the stack
-
- # The C call will have registered the complete save-frame on success.
- # The result of the call is:
- # v0: ptr to native code, 0 on error.
- # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
- beq $v0, $zero, 2f # check entry error
- move $t9, $v0 # save the code ptr
- move $sp, $v1 # release part of the alloca
-
- # Load parameters from stack into registers
- lw $a0, 0($sp)
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
-
- # artQuickGenericJniTrampoline sets bit 0 of the native code address to 1
- # when the first two arguments are both single precision floats. This lets
- # us extract them properly from the stack and load into floating point
- # registers.
- MTD $a0, $a1, $f12, $f13
- andi $t0, $t9, 1
- xor $t9, $t9, $t0
- bnez $t0, 1f
- mtc1 $a1, $f14
- MTD $a2, $a3, $f14, $f15
-
-1:
- jalr $t9 # native call
- nop
- addiu $sp, $sp, 16 # remove arg slots
-
- move $gp, $s3 # restore $gp from $s3
-
- # result sign extension is handled in C code
- # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
- move $a0, rSELF # pass Thread::Current
- move $a2, $v0 # pass result
- move $a3, $v1
- addiu $sp, $sp, -32 # reserve arg slots
- la $t9, artQuickGenericJniEndTrampoline
- jalr $t9
- s.d $f0, 16($sp) # pass result_f
-
- lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- bne $t0, $zero, 2f # check for pending exceptions
-
- move $sp, $s8 # tear down the alloca
-
- # tear down the callee-save frame
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
-
- MTD $v0, $v1, $f0, $f1 # move float value to return value
- jalr $zero, $ra
- nop
-
-2:
- lw $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
- addiu $sp, $t0, -1 // Remove the GenericJNI tag.
- move $gp, $s3 # restore $gp from $s3
- # This will create a new save-all frame, required by the runtime.
- DELIVER_PENDING_EXCEPTION
-END art_quick_generic_jni_trampoline
-
- .extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a1, rSELF # pass Thread::Current
- la $t9, artQuickToInterpreterBridge
- jalr $t9 # (Method* method, Thread*, SP)
- addiu $a2, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
- lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t7, 1f
- # don't care if $v0 and/or $v1 are modified, when exception branch taken
- MTD $v0, $v1, $f0, $f1 # move float value to return value
- jalr $zero, $ra
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_to_interpreter_bridge
-
- .extern artInvokeObsoleteMethod
-ENTRY art_invoke_obsolete_method_stub
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artInvokeObsoleteMethod
- jalr $t9 # (Method* method, Thread* self)
- move $a1, rSELF # pass Thread::Current
-END art_invoke_obsolete_method_stub
-
- /*
- * Routine that intercepts method calls and returns.
- */
- .extern artInstrumentationMethodEntryFromCode
- .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- sw $a0, 28($sp) # save arg0 in free arg slot
- addiu $a3, $sp, ARG_SLOT_SIZE # Pass $sp.
- la $t9, artInstrumentationMethodEntryFromCode
- jalr $t9 # (Method*, Object*, Thread*, SP)
- move $a2, rSELF # pass Thread::Current
- beqz $v0, .Ldeliver_instrumentation_entry_exception
- move $t9, $v0 # $t9 holds reference to code
- lw $a0, 28($sp) # restore arg0 from free arg slot
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- la $ra, art_quick_instrumentation_exit
- jalr $zero, $t9 # call method, returning to art_quick_instrumentation_exit
- nop
-.Ldeliver_instrumentation_entry_exception:
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- DELIVER_PENDING_EXCEPTION
-END art_quick_instrumentation_entry
-
-ENTRY_NO_GP art_quick_instrumentation_exit
- move $ra, $zero # RA points here, so clobber with 0 for later checks.
- SETUP_SAVE_EVERYTHING_FRAME # Allocates ARG_SLOT_SIZE bytes at the bottom of the stack.
- move $s2, $gp # Preserve $gp across the call for exception delivery.
-
- addiu $a3, $sp, ARG_SLOT_SIZE+16 # Pass fpr_res pointer ($f0 in SAVE_EVERYTHING_FRAME).
- addiu $a2, $sp, ARG_SLOT_SIZE+148 # Pass gpr_res pointer ($v0 in SAVE_EVERYTHING_FRAME).
- addiu $a1, $sp, ARG_SLOT_SIZE # Pass $sp.
- la $t9, artInstrumentationMethodExitFromCode
- jalr $t9 # (Thread*, SP, gpr_res*, fpr_res*)
- move $a0, rSELF # Pass Thread::Current.
-
- beqz $v0, .Ldo_deliver_instrumentation_exception
- move $gp, $s2 # Deliver exception if we got nullptr as function.
- bnez $v1, .Ldeoptimize
-
- # Normal return.
- sw $v0, (ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING-4)($sp) # Set return pc.
- RESTORE_SAVE_EVERYTHING_FRAME
- jalr $zero, $ra
- nop
-.Ldo_deliver_instrumentation_exception:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-.Ldeoptimize:
- b art_quick_deoptimize
- sw $v1, (ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING-4)($sp)
- # Fake a call from instrumentation return pc.
-END art_quick_instrumentation_exit
-
- /*
- * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
- * will long jump to the upcall with a special exception of -1.
- */
- .extern artDeoptimize
-ENTRY_NO_GP_CUSTOM_CFA art_quick_deoptimize, ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING
- # SETUP_SAVE_EVERYTHING_FRAME has been done by art_quick_instrumentation_exit.
- .cfi_rel_offset 31, ARG_SLOT_SIZE+252
- .cfi_rel_offset 30, ARG_SLOT_SIZE+248
- .cfi_rel_offset 28, ARG_SLOT_SIZE+244
- .cfi_rel_offset 25, ARG_SLOT_SIZE+240
- .cfi_rel_offset 24, ARG_SLOT_SIZE+236
- .cfi_rel_offset 23, ARG_SLOT_SIZE+232
- .cfi_rel_offset 22, ARG_SLOT_SIZE+228
- .cfi_rel_offset 21, ARG_SLOT_SIZE+224
- .cfi_rel_offset 20, ARG_SLOT_SIZE+220
- .cfi_rel_offset 19, ARG_SLOT_SIZE+216
- .cfi_rel_offset 18, ARG_SLOT_SIZE+212
- .cfi_rel_offset 17, ARG_SLOT_SIZE+208
- .cfi_rel_offset 16, ARG_SLOT_SIZE+204
- .cfi_rel_offset 15, ARG_SLOT_SIZE+200
- .cfi_rel_offset 14, ARG_SLOT_SIZE+196
- .cfi_rel_offset 13, ARG_SLOT_SIZE+192
- .cfi_rel_offset 12, ARG_SLOT_SIZE+188
- .cfi_rel_offset 11, ARG_SLOT_SIZE+184
- .cfi_rel_offset 10, ARG_SLOT_SIZE+180
- .cfi_rel_offset 9, ARG_SLOT_SIZE+176
- .cfi_rel_offset 8, ARG_SLOT_SIZE+172
- .cfi_rel_offset 7, ARG_SLOT_SIZE+168
- .cfi_rel_offset 6, ARG_SLOT_SIZE+164
- .cfi_rel_offset 5, ARG_SLOT_SIZE+160
- .cfi_rel_offset 4, ARG_SLOT_SIZE+156
- .cfi_rel_offset 3, ARG_SLOT_SIZE+152
- .cfi_rel_offset 2, ARG_SLOT_SIZE+148
- .cfi_rel_offset 1, ARG_SLOT_SIZE+144
-
- la $t9, artDeoptimize
- jalr $t9 # (Thread*)
- move $a0, rSELF # pass Thread::current
- break
-END art_quick_deoptimize
-
- /*
- * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
- * will long jump to the upcall with a special exception of -1.
- */
- .extern artDeoptimizeFromCompiledCode
-ENTRY_NO_GP art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_EVERYTHING_FRAME
- la $t9, artDeoptimizeFromCompiledCode
- jalr $t9 # (DeoptimizationKind, Thread*)
- move $a1, rSELF # pass Thread::current
-END art_quick_deoptimize_from_compiled_code
-
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
- * 6 bits.
- * On entry:
- * $a0: low word
- * $a1: high word
- * $a2: shift count
- */
-ENTRY_NO_GP art_quick_shl_long
- /* shl-long vAA, vBB, vCC */
- sll $v0, $a0, $a2 # rlo<- alo << (shift&31)
- not $v1, $a2 # rhi<- 31-shift (shift is 5b)
- srl $a0, 1
- srl $a0, $v1 # alo<- alo >> (32-(shift&31))
- sll $v1, $a1, $a2 # rhi<- ahi << (shift&31)
- andi $a2, 0x20 # shift< shift & 0x20
- beqz $a2, 1f
- or $v1, $a0 # rhi<- rhi | alo
-
- move $v1, $v0 # rhi<- rlo (if shift&0x20)
- move $v0, $zero # rlo<- 0 (if shift&0x20)
-
-1: jalr $zero, $ra
- nop
-END art_quick_shl_long
-
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
- * 6 bits.
- * On entry:
- * $a0: low word
- * $a1: high word
- * $a2: shift count
- */
-ENTRY_NO_GP art_quick_shr_long
- sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
- srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
- sra $a3, $a1, 31 # $a3<- sign(ah)
- not $a0, $a2 # alo<- 31-shift (shift is 5b)
- sll $a1, 1
- sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
- andi $a2, 0x20 # shift & 0x20
- beqz $a2, 1f
- or $v0, $a1 # rlo<- rlo | ahi
-
- move $v0, $v1 # rlo<- rhi (if shift&0x20)
- move $v1, $a3 # rhi<- sign(ahi) (if shift&0x20)
-
-1: jalr $zero, $ra
- nop
-END art_quick_shr_long
-
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
- * 6 bits.
- * On entry:
- * $a0: low word
- * $a1: high word
- * $a2: shift count
- */
- /* ushr-long vAA, vBB, vCC */
-ENTRY_NO_GP art_quick_ushr_long
- srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
- srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
- not $a0, $a2 # alo<- 31-shift (shift is 5b)
- sll $a1, 1
- sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
- andi $a2, 0x20 # shift & 0x20
- beqz $a2, 1f
- or $v0, $a1 # rlo<- rlo | ahi
-
- move $v0, $v1 # rlo<- rhi (if shift&0x20)
- move $v1, $zero # rhi<- 0 (if shift&0x20)
-
-1: jalr $zero, $ra
- nop
-END art_quick_ushr_long
-
-/* java.lang.String.indexOf(int ch, int fromIndex=0) */
-ENTRY_NO_GP art_quick_indexof
-/* $a0 holds address of "this" */
-/* $a1 holds "ch" */
-/* $a2 holds "fromIndex" */
-#if (STRING_COMPRESSION_FEATURE)
- lw $a3, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
-#else
- lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
-#endif
- slt $t1, $a2, $zero # if fromIndex < 0
-#if defined(_MIPS_ARCH_MIPS32R6)
- seleqz $a2, $a2, $t1 # fromIndex = 0;
-#else
- movn $a2, $zero, $t1 # fromIndex = 0;
-#endif
-
-#if (STRING_COMPRESSION_FEATURE)
- srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
-#endif
- subu $t0, $t0, $a2 # this.length() - fromIndex
- blez $t0, 6f # if this.length()-fromIndex <= 0
- li $v0, -1 # return -1;
-
-#if (STRING_COMPRESSION_FEATURE)
- sll $a3, $a3, 31 # Extract compression flag.
- beqz $a3, .Lstring_indexof_compressed
- move $t2, $a0 # Save a copy in $t2 to later compute result (in branch delay slot).
-#endif
- sll $v0, $a2, 1 # $a0 += $a2 * 2
- addu $a0, $a0, $v0 # " ditto "
- move $v0, $a2 # Set i to fromIndex.
-
-1:
- lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
- beq $t3, $a1, 6f # return i;
- addu $a0, $a0, 2 # i++
- subu $t0, $t0, 1 # this.length() - i
- bnez $t0, 1b # while this.length() - i > 0
- addu $v0, $v0, 1 # i++
-
- li $v0, -1 # if this.length() - i <= 0
- # return -1;
-
-6:
- j $ra
- nop
-
-#if (STRING_COMPRESSION_FEATURE)
-.Lstring_indexof_compressed:
- addu $a0, $a0, $a2 # $a0 += $a2
-
-.Lstring_indexof_compressed_loop:
- lbu $t3, MIRROR_STRING_VALUE_OFFSET($a0)
- beq $t3, $a1, .Lstring_indexof_compressed_matched
- subu $t0, $t0, 1
- bgtz $t0, .Lstring_indexof_compressed_loop
- addu $a0, $a0, 1
-
-.Lstring_indexof_nomatch:
- jalr $zero, $ra
- li $v0, -1 # return -1;
-
-.Lstring_indexof_compressed_matched:
- jalr $zero, $ra
- subu $v0, $a0, $t2 # return (current - start);
-#endif
-END art_quick_indexof
-
-/* java.lang.String.compareTo(String anotherString) */
-ENTRY_NO_GP art_quick_string_compareto
-/* $a0 holds address of "this" */
-/* $a1 holds address of "anotherString" */
- beq $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object
- move $a3, $a2 # trick to return 0 (it returns a2 - a3)
-
-#if (STRING_COMPRESSION_FEATURE)
- lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
- lw $t1, MIRROR_STRING_COUNT_OFFSET($a1) # 'count' field of anotherString
- sra $a2, $t0, 1 # this.length()
- sra $a3, $t1, 1 # anotherString.length()
-#else
- lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
-#endif
-
- MINu $t2, $a2, $a3
- # $t2 now holds min(this.length(),anotherString.length())
-
- # while min(this.length(),anotherString.length())-i != 0
- beqz $t2, .Lstring_compareto_length_diff # if $t2==0
- nop # return (this.length() - anotherString.length())
-
-#if (STRING_COMPRESSION_FEATURE)
- # Differ cases:
- sll $t3, $t0, 31
- beqz $t3, .Lstring_compareto_this_is_compressed
- sll $t3, $t1, 31 # In branch delay slot.
- beqz $t3, .Lstring_compareto_that_is_compressed
- nop
- b .Lstring_compareto_both_not_compressed
- nop
-
-.Lstring_compareto_this_is_compressed:
- beqz $t3, .Lstring_compareto_both_compressed
- nop
- /* If (this->IsCompressed() && that->IsCompressed() == false) */
-.Lstring_compareto_loop_comparison_this_compressed:
- lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0, $t1, .Lstring_compareto_char_diff
- addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_loop_comparison_this_compressed
- addiu $a1, $a1, 2 # point at anotherString.charAt(i++) - uncompressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_that_is_compressed:
- lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0, $t1, .Lstring_compareto_char_diff
- addiu $a0, $a0, 2 # point at this.charAt(i++) - uncompressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_that_is_compressed
- addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_both_compressed:
- lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0, $t1, .Lstring_compareto_char_diff
- addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_both_compressed
- addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-#endif
-
-.Lstring_compareto_both_not_compressed:
- lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
- lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bne $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
- # return (this.charAt(i) - anotherString.charAt(i))
- addiu $a0, $a0, 2 # point at this.charAt(i++)
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_both_not_compressed
- addiu $a1, $a1, 2 # point at anotherString.charAt(i++)
-
-.Lstring_compareto_length_diff:
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_char_diff:
- jalr $zero, $ra
- subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
-END art_quick_string_compareto
-
- .extern artStringBuilderAppend
-ENTRY art_quick_string_builder_append
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artStringBuilderAppend
- addiu $a1, $sp, ARG_SLOT_SIZE + FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__ # pass args
- jalr $t9 # (uint32_t, const unit32_t*, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_string_builder_append
-
- /*
- * Create a function `name` calling the ReadBarrier::Mark routine,
- * getting its argument and returning its result through register
- * `reg`, saving and restoring all caller-save registers.
- */
-.macro READ_BARRIER_MARK_REG name, reg
-ENTRY \name
- // Null check so that we can load the lock word.
- bnez \reg, .Lnot_null_\name
- nop
-.Lret_rb_\name:
- jalr $zero, $ra
- nop
-.Lnot_null_\name:
- // Check lock word for mark bit, if marked return.
- lw $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET(\reg)
- .set push
- .set noat
- sll $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT # Move mark bit to sign bit.
- bltz $at, .Lret_rb_\name
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
- // The below code depends on the lock word state being in the highest bits
- // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
- // Test that both the forwarding state bits are 1.
- sll $at, $t9, 1
- and $at, $at, $t9 # Sign bit = 1 IFF both bits are 1.
- bltz $at, .Lret_forwarding_address\name
- nop
- .set pop
-
- addiu $sp, $sp, -160 # Includes 16 bytes of space for argument registers a0-a3.
- .cfi_adjust_cfa_offset 160
-
- sw $ra, 156($sp)
- .cfi_rel_offset 31, 156
- sw $t8, 152($sp)
- .cfi_rel_offset 24, 152
- sw $t7, 148($sp)
- .cfi_rel_offset 15, 148
- sw $t6, 144($sp)
- .cfi_rel_offset 14, 144
- sw $t5, 140($sp)
- .cfi_rel_offset 13, 140
- sw $t4, 136($sp)
- .cfi_rel_offset 12, 136
- sw $t3, 132($sp)
- .cfi_rel_offset 11, 132
- sw $t2, 128($sp)
- .cfi_rel_offset 10, 128
- sw $t1, 124($sp)
- .cfi_rel_offset 9, 124
- sw $t0, 120($sp)
- .cfi_rel_offset 8, 120
- sw $a3, 116($sp)
- .cfi_rel_offset 7, 116
- sw $a2, 112($sp)
- .cfi_rel_offset 6, 112
- sw $a1, 108($sp)
- .cfi_rel_offset 5, 108
- sw $a0, 104($sp)
- .cfi_rel_offset 4, 104
- sw $v1, 100($sp)
- .cfi_rel_offset 3, 100
- sw $v0, 96($sp)
- .cfi_rel_offset 2, 96
-
- la $t9, artReadBarrierMark
-
- sdc1 $f18, 88($sp)
- sdc1 $f16, 80($sp)
- sdc1 $f14, 72($sp)
- sdc1 $f12, 64($sp)
- sdc1 $f10, 56($sp)
- sdc1 $f8, 48($sp)
- sdc1 $f6, 40($sp)
- sdc1 $f4, 32($sp)
- sdc1 $f2, 24($sp)
-
- .ifnc \reg, $a0
- move $a0, \reg # pass obj from `reg` in a0
- .endif
- jalr $t9 # v0 <- artReadBarrierMark(obj)
- sdc1 $f0, 16($sp) # in delay slot
-
- lw $ra, 156($sp)
- .cfi_restore 31
- lw $t8, 152($sp)
- .cfi_restore 24
- lw $t7, 148($sp)
- .cfi_restore 15
- lw $t6, 144($sp)
- .cfi_restore 14
- lw $t5, 140($sp)
- .cfi_restore 13
- lw $t4, 136($sp)
- .cfi_restore 12
- lw $t3, 132($sp)
- .cfi_restore 11
- lw $t2, 128($sp)
- .cfi_restore 10
- lw $t1, 124($sp)
- .cfi_restore 9
- lw $t0, 120($sp)
- .cfi_restore 8
- lw $a3, 116($sp)
- .cfi_restore 7
- lw $a2, 112($sp)
- .cfi_restore 6
- lw $a1, 108($sp)
- .cfi_restore 5
- lw $a0, 104($sp)
- .cfi_restore 4
- lw $v1, 100($sp)
- .cfi_restore 3
-
- .ifnc \reg, $v0
- move \reg, $v0 # `reg` <- v0
- lw $v0, 96($sp)
- .cfi_restore 2
- .endif
-
- ldc1 $f18, 88($sp)
- ldc1 $f16, 80($sp)
- ldc1 $f14, 72($sp)
- ldc1 $f12, 64($sp)
- ldc1 $f10, 56($sp)
- ldc1 $f8, 48($sp)
- ldc1 $f6, 40($sp)
- ldc1 $f4, 32($sp)
- ldc1 $f2, 24($sp)
- ldc1 $f0, 16($sp)
-
- jalr $zero, $ra
- addiu $sp, $sp, 160
- .cfi_adjust_cfa_offset -160
-
-.Lret_forwarding_address\name:
- jalr $zero, $ra
- // Shift left by the forwarding address shift. This clears out the state bits since they are
- // in the top 2 bits of the lock word.
- sll \reg, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-END \name
-.endm
-
-// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
-// ZERO (register 0) is reserved.
-// AT (register 1) is reserved as a temporary/scratch register.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7
-// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
-// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
-// K0, K1, GP, SP (registers 26 - 29) are reserved.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
-// RA (register 31) is reserved.
-
-// Caller code:
-// Short constant offset/index:
-// R2: | R6:
-// lw $t9, pReadBarrierMarkReg00
-// beqz $t9, skip_call | beqzc $t9, skip_call
-// addiu $t9, $t9, thunk_disp | nop
-// jalr $t9 | jialc $t9, thunk_disp
-// nop |
-// skip_call: | skip_call:
-// lw `out`, ofs(`obj`) | lw `out`, ofs(`obj`)
-// [subu `out`, $zero, `out`] | [subu `out`, $zero, `out`] # Unpoison reference.
-.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
-1:
- # Explicit null check. May be redundant (for array elements or when the field
- # offset is larger than the page size, 4KB).
- # $ra will be adjusted to point to lw's stack map when throwing NPE.
- beqz \obj, .Lintrospection_throw_npe
-#if defined(_MIPS_ARCH_MIPS32R6)
- lapc $gp, .Lintrospection_exits # $gp = address of .Lintrospection_exits.
-#else
- addiu $gp, $t9, (.Lintrospection_exits - 1b) # $gp = address of .Lintrospection_exits.
-#endif
- .set push
- .set noat
- lw $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
- sll $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT # Move barrier state bit
- # to sign bit.
- bltz $at, .Lintrospection_field_array # If gray, load reference, mark.
- move $t8, \obj # Move `obj` to $t8 for common code.
- .set pop
- jalr $zero, $ra # Otherwise, load-load barrier and return.
- sync
-.endm
-
-// Caller code (R2):
-// Long constant offset/index: | Variable index:
-// lw $t9, pReadBarrierMarkReg00
-// lui $t8, ofs_hi | sll $t8, `index`, 2
-// beqz $t9, skip_call | beqz $t9, skip_call
-// addiu $t9, $t9, thunk_disp | addiu $t9, $t9, thunk_disp
-// jalr $t9 | jalr $t9
-// skip_call: | skip_call:
-// addu $t8, $t8, `obj` | addu $t8, $t8, `obj`
-// lw `out`, ofs_lo($t8) | lw `out`, ofs($t8)
-// [subu `out`, $zero, `out`] | [subu `out`, $zero, `out`] # Unpoison reference.
-//
-// Caller code (R6):
-// Long constant offset/index: | Variable index:
-// lw $t9, pReadBarrierMarkReg00
-// beqz $t9, skip_call | beqz $t9, skip_call
-// aui $t8, `obj`, ofs_hi | lsa $t8, `index`, `obj`, 2
-// jialc $t9, thunk_disp | jialc $t9, thunk_disp
-// skip_call: | skip_call:
-// lw `out`, ofs_lo($t8) | lw `out`, ofs($t8)
-// [subu `out`, $zero, `out`] | [subu `out`, $zero, `out`] # Unpoison reference.
-.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
-1:
- # No explicit null check for variable indices or large constant indices/offsets
- # as it must have been done earlier.
-#if defined(_MIPS_ARCH_MIPS32R6)
- lapc $gp, .Lintrospection_exits # $gp = address of .Lintrospection_exits.
-#else
- addiu $gp, $t9, (.Lintrospection_exits - 1b) # $gp = address of .Lintrospection_exits.
-#endif
- .set push
- .set noat
- lw $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
- sll $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT # Move barrier state bit
- # to sign bit.
- bltz $at, .Lintrospection_field_array # If gray, load reference, mark.
- nop
- .set pop
- jalr $zero, $ra # Otherwise, load-load barrier and return.
- sync
- break # Padding to 8 instructions.
-.endm
-
-.macro BRB_GC_ROOT_ENTRY root
-1:
-#if defined(_MIPS_ARCH_MIPS32R6)
- lapc $gp, .Lintrospection_exit_\root # $gp = exit point address.
-#else
- addiu $gp, $t9, (.Lintrospection_exit_\root - 1b) # $gp = exit point address.
-#endif
- bnez \root, .Lintrospection_common
- move $t8, \root # Move reference to $t8 for common code.
- jalr $zero, $ra # Return if null.
- # The next instruction (from the following BRB_GC_ROOT_ENTRY) fills the delay slot.
- # This instruction has no effect (actual NOP for the last entry; otherwise changes $gp,
- # which is unused after that anyway).
-.endm
-
-.macro BRB_FIELD_EXIT out
-.Lintrospection_exit_\out:
- jalr $zero, $ra
- move \out, $t8 # Return reference in expected register.
-.endm
-
-.macro BRB_FIELD_EXIT_BREAK
- break
- break
-.endm
-
-ENTRY_NO_GP art_quick_read_barrier_mark_introspection
- # Entry points for offsets/indices not fitting into int16_t and for variable indices.
- BRB_FIELD_LONG_OFFSET_ENTRY $v0
- BRB_FIELD_LONG_OFFSET_ENTRY $v1
- BRB_FIELD_LONG_OFFSET_ENTRY $a0
- BRB_FIELD_LONG_OFFSET_ENTRY $a1
- BRB_FIELD_LONG_OFFSET_ENTRY $a2
- BRB_FIELD_LONG_OFFSET_ENTRY $a3
- BRB_FIELD_LONG_OFFSET_ENTRY $t0
- BRB_FIELD_LONG_OFFSET_ENTRY $t1
- BRB_FIELD_LONG_OFFSET_ENTRY $t2
- BRB_FIELD_LONG_OFFSET_ENTRY $t3
- BRB_FIELD_LONG_OFFSET_ENTRY $t4
- BRB_FIELD_LONG_OFFSET_ENTRY $t5
- BRB_FIELD_LONG_OFFSET_ENTRY $t6
- BRB_FIELD_LONG_OFFSET_ENTRY $t7
- BRB_FIELD_LONG_OFFSET_ENTRY $s2
- BRB_FIELD_LONG_OFFSET_ENTRY $s3
- BRB_FIELD_LONG_OFFSET_ENTRY $s4
- BRB_FIELD_LONG_OFFSET_ENTRY $s5
- BRB_FIELD_LONG_OFFSET_ENTRY $s6
- BRB_FIELD_LONG_OFFSET_ENTRY $s7
- BRB_FIELD_LONG_OFFSET_ENTRY $s8
-
- # Entry points for offsets/indices fitting into int16_t.
- BRB_FIELD_SHORT_OFFSET_ENTRY $v0
- BRB_FIELD_SHORT_OFFSET_ENTRY $v1
- BRB_FIELD_SHORT_OFFSET_ENTRY $a0
- BRB_FIELD_SHORT_OFFSET_ENTRY $a1
- BRB_FIELD_SHORT_OFFSET_ENTRY $a2
- BRB_FIELD_SHORT_OFFSET_ENTRY $a3
- BRB_FIELD_SHORT_OFFSET_ENTRY $t0
- BRB_FIELD_SHORT_OFFSET_ENTRY $t1
- BRB_FIELD_SHORT_OFFSET_ENTRY $t2
- BRB_FIELD_SHORT_OFFSET_ENTRY $t3
- BRB_FIELD_SHORT_OFFSET_ENTRY $t4
- BRB_FIELD_SHORT_OFFSET_ENTRY $t5
- BRB_FIELD_SHORT_OFFSET_ENTRY $t6
- BRB_FIELD_SHORT_OFFSET_ENTRY $t7
- BRB_FIELD_SHORT_OFFSET_ENTRY $s2
- BRB_FIELD_SHORT_OFFSET_ENTRY $s3
- BRB_FIELD_SHORT_OFFSET_ENTRY $s4
- BRB_FIELD_SHORT_OFFSET_ENTRY $s5
- BRB_FIELD_SHORT_OFFSET_ENTRY $s6
- BRB_FIELD_SHORT_OFFSET_ENTRY $s7
- BRB_FIELD_SHORT_OFFSET_ENTRY $s8
-
- .global art_quick_read_barrier_mark_introspection_gc_roots
-art_quick_read_barrier_mark_introspection_gc_roots:
- # Entry points for GC roots.
- BRB_GC_ROOT_ENTRY $v0
- BRB_GC_ROOT_ENTRY $v1
- BRB_GC_ROOT_ENTRY $a0
- BRB_GC_ROOT_ENTRY $a1
- BRB_GC_ROOT_ENTRY $a2
- BRB_GC_ROOT_ENTRY $a3
- BRB_GC_ROOT_ENTRY $t0
- BRB_GC_ROOT_ENTRY $t1
- BRB_GC_ROOT_ENTRY $t2
- BRB_GC_ROOT_ENTRY $t3
- BRB_GC_ROOT_ENTRY $t4
- BRB_GC_ROOT_ENTRY $t5
- BRB_GC_ROOT_ENTRY $t6
- BRB_GC_ROOT_ENTRY $t7
- BRB_GC_ROOT_ENTRY $s2
- BRB_GC_ROOT_ENTRY $s3
- BRB_GC_ROOT_ENTRY $s4
- BRB_GC_ROOT_ENTRY $s5
- BRB_GC_ROOT_ENTRY $s6
- BRB_GC_ROOT_ENTRY $s7
- BRB_GC_ROOT_ENTRY $s8
- .global art_quick_read_barrier_mark_introspection_end_of_entries
-art_quick_read_barrier_mark_introspection_end_of_entries:
- nop # Fill the delay slot of the last BRB_GC_ROOT_ENTRY.
-
-.Lintrospection_throw_npe:
- b art_quick_throw_null_pointer_exception
- addiu $ra, $ra, 4 # Skip lw, make $ra point to lw's stack map.
-
- .set push
- .set noat
-
- // Fields and array elements.
-
-.Lintrospection_field_array:
- // Get the field/element address using $t8 and the offset from the lw instruction.
- lh $at, 0($ra) # $ra points to lw: $at = field/element offset.
- addiu $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE # Skip lw(+subu).
- addu $t8, $t8, $at # $t8 = field/element address.
-
- // Calculate the address of the exit point, store it in $gp and load the reference into $t8.
- lb $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra) # $ra-HEAP_POISON_INSTR_SIZE-4 points to
- # "lw `out`, ...".
- andi $at, $at, 31 # Extract `out` from lw.
- sll $at, $at, 3 # Multiply `out` by the exit point size (BRB_FIELD_EXIT* macros).
-
- lw $t8, 0($t8) # $t8 = reference.
- UNPOISON_HEAP_REF $t8
-
- // Return if null reference.
- bnez $t8, .Lintrospection_common
- addu $gp, $gp, $at # $gp = address of the exit point.
-
- // Early return through the exit point.
-.Lintrospection_return_early:
- jalr $zero, $gp # Move $t8 to `out` and return.
- nop
-
- // Code common for GC roots, fields and array elements.
-
-.Lintrospection_common:
- // Check lock word for mark bit, if marked return.
- lw $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
- sll $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT # Move mark bit to sign bit.
- bltz $at, .Lintrospection_return_early
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
- // The below code depends on the lock word state being in the highest bits
- // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
- // Test that both the forwarding state bits are 1.
- sll $at, $t9, 1
- and $at, $at, $t9 # Sign bit = 1 IFF both bits are 1.
- bgez $at, .Lintrospection_mark
- nop
-
- .set pop
-
- // Shift left by the forwarding address shift. This clears out the state bits since they are
- // in the top 2 bits of the lock word.
- jalr $zero, $gp # Move $t8 to `out` and return.
- sll $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-
-.Lintrospection_mark:
- // Partially set up the stack frame preserving only $ra.
- addiu $sp, $sp, -160 # Includes 16 bytes of space for argument registers $a0-$a3.
- .cfi_adjust_cfa_offset 160
- sw $ra, 156($sp)
- .cfi_rel_offset 31, 156
-
- // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
- bal 1f
- sw $gp, 152($sp) # Preserve the exit point address.
-1:
- .cpload $ra
-
- // Finalize the stack frame and call.
- sw $t7, 148($sp)
- .cfi_rel_offset 15, 148
- sw $t6, 144($sp)
- .cfi_rel_offset 14, 144
- sw $t5, 140($sp)
- .cfi_rel_offset 13, 140
- sw $t4, 136($sp)
- .cfi_rel_offset 12, 136
- sw $t3, 132($sp)
- .cfi_rel_offset 11, 132
- sw $t2, 128($sp)
- .cfi_rel_offset 10, 128
- sw $t1, 124($sp)
- .cfi_rel_offset 9, 124
- sw $t0, 120($sp)
- .cfi_rel_offset 8, 120
- sw $a3, 116($sp)
- .cfi_rel_offset 7, 116
- sw $a2, 112($sp)
- .cfi_rel_offset 6, 112
- sw $a1, 108($sp)
- .cfi_rel_offset 5, 108
- sw $a0, 104($sp)
- .cfi_rel_offset 4, 104
- sw $v1, 100($sp)
- .cfi_rel_offset 3, 100
- sw $v0, 96($sp)
- .cfi_rel_offset 2, 96
-
- la $t9, artReadBarrierMark
-
- sdc1 $f18, 88($sp)
- sdc1 $f16, 80($sp)
- sdc1 $f14, 72($sp)
- sdc1 $f12, 64($sp)
- sdc1 $f10, 56($sp)
- sdc1 $f8, 48($sp)
- sdc1 $f6, 40($sp)
- sdc1 $f4, 32($sp)
- sdc1 $f2, 24($sp)
- sdc1 $f0, 16($sp)
-
- jalr $t9 # $v0 <- artReadBarrierMark(reference)
- move $a0, $t8 # Pass reference in $a0.
- move $t8, $v0
-
- lw $ra, 156($sp)
- .cfi_restore 31
- lw $gp, 152($sp) # $gp = address of the exit point.
- lw $t7, 148($sp)
- .cfi_restore 15
- lw $t6, 144($sp)
- .cfi_restore 14
- lw $t5, 140($sp)
- .cfi_restore 13
- lw $t4, 136($sp)
- .cfi_restore 12
- lw $t3, 132($sp)
- .cfi_restore 11
- lw $t2, 128($sp)
- .cfi_restore 10
- lw $t1, 124($sp)
- .cfi_restore 9
- lw $t0, 120($sp)
- .cfi_restore 8
- lw $a3, 116($sp)
- .cfi_restore 7
- lw $a2, 112($sp)
- .cfi_restore 6
- lw $a1, 108($sp)
- .cfi_restore 5
- lw $a0, 104($sp)
- .cfi_restore 4
- lw $v1, 100($sp)
- .cfi_restore 3
- lw $v0, 96($sp)
- .cfi_restore 2
-
- ldc1 $f18, 88($sp)
- ldc1 $f16, 80($sp)
- ldc1 $f14, 72($sp)
- ldc1 $f12, 64($sp)
- ldc1 $f10, 56($sp)
- ldc1 $f8, 48($sp)
- ldc1 $f6, 40($sp)
- ldc1 $f4, 32($sp)
- ldc1 $f2, 24($sp)
- ldc1 $f0, 16($sp)
-
- // Return through the exit point.
- jalr $zero, $gp # Move $t8 to `out` and return.
- addiu $sp, $sp, 160
- .cfi_adjust_cfa_offset -160
-
-.Lintrospection_exits:
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $v0
- BRB_FIELD_EXIT $v1
- BRB_FIELD_EXIT $a0
- BRB_FIELD_EXIT $a1
- BRB_FIELD_EXIT $a2
- BRB_FIELD_EXIT $a3
- BRB_FIELD_EXIT $t0
- BRB_FIELD_EXIT $t1
- BRB_FIELD_EXIT $t2
- BRB_FIELD_EXIT $t3
- BRB_FIELD_EXIT $t4
- BRB_FIELD_EXIT $t5
- BRB_FIELD_EXIT $t6
- BRB_FIELD_EXIT $t7
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $s2
- BRB_FIELD_EXIT $s3
- BRB_FIELD_EXIT $s4
- BRB_FIELD_EXIT $s5
- BRB_FIELD_EXIT $s6
- BRB_FIELD_EXIT $s7
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $s8
- BRB_FIELD_EXIT_BREAK
-END art_quick_read_barrier_mark_introspection
-
- /*
- * Polymorphic method invocation.
- * On entry:
- * a0 = unused
- * a1 = receiver
- */
-.extern artInvokePolymorphic
-ENTRY art_quick_invoke_polymorphic
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a0, $a1 # Make $a0 the receiver.
- move $a1, rSELF # Make $a1 an alias for the current Thread.
- la $t9, artInvokePolymorphic # Invoke artInvokePolymorphic
- jalr $t9 # with args (receiver, Thread*, context).
- addiu $a2, $sp, ARG_SLOT_SIZE # Make $a2 a pointer to the saved frame context.
- lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t7, 1f
- # don't care if $v0 and/or $v1 are modified, when exception branch taken
- MTD $v0, $v1, $f0, $f1 # move float value to return value
- jalr $zero, $ra
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
-
- /*
- * InvokeCustom invocation.
- * On entry:
- * a0 = call_site_idx
- */
-.extern artInvokeCustom
-ENTRY art_quick_invoke_custom
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a1, rSELF # Make $a1 an alias for the current Thread.
- la $t9, artInvokeCustom # Invoke artInvokeCustom
- jalr $t9 # with args (call_site_idx, Thread*, context).
- addiu $a2, $sp, ARG_SLOT_SIZE # Make $a2 a pointer to the saved frame context.
- lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t7, 1f
- # don't care if $v0 and/or $v1 are modified, when exception branch taken
- MTD $v0, $v1, $f0, $f1 # move float value to return value
- jalr $zero, $ra
- nop
-END art_quick_invoke_custom
diff --git a/runtime/arch/mips/registers_mips.cc b/runtime/arch/mips/registers_mips.cc
deleted file mode 100644
index 92c2746..0000000
--- a/runtime/arch/mips/registers_mips.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "registers_mips.h"
-
-#include <ostream>
-
-namespace art {
-namespace mips {
-
-static const char* kRegisterNames[] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra",
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs) {
- if (rhs >= ZERO && rhs <= RA) {
- os << kRegisterNames[rhs];
- } else {
- os << "Register[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
- if (rhs >= F0 && rhs < kNumberOfFRegisters) {
- os << "f" << static_cast<int>(rhs);
- } else {
- os << "FRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
- if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
- os << "w" << static_cast<int>(rhs);
- } else {
- os << "VectorRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-} // namespace mips
-} // namespace art
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
deleted file mode 100644
index 4900e41..0000000
--- a/runtime/arch/mips/registers_mips.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
-
-#include <iosfwd>
-
-#include "base/macros.h"
-
-namespace art {
-namespace mips {
-
-enum Register {
- ZERO = 0,
- AT = 1, // Assembler temporary.
- V0 = 2, // Values.
- V1 = 3,
- A0 = 4, // Arguments.
- A1 = 5,
- A2 = 6,
- A3 = 7,
- T0 = 8, // Two extra arguments / temporaries.
- T1 = 9,
- T2 = 10, // Temporaries.
- T3 = 11,
- T4 = 12,
- T5 = 13,
- T6 = 14,
- T7 = 15,
- S0 = 16, // Saved values.
- S1 = 17,
- S2 = 18,
- S3 = 19,
- S4 = 20,
- S5 = 21,
- S6 = 22,
- S7 = 23,
- T8 = 24, // More temporaries.
- T9 = 25,
- K0 = 26, // Reserved for trap handler.
- K1 = 27,
- GP = 28, // Global pointer.
- SP = 29, // Stack pointer.
- FP = 30, // Saved value/frame pointer.
- RA = 31, // Return address.
- TR = S1, // ART Thread Register
- TMP = T8, // scratch register (in addition to AT)
- kNumberOfCoreRegisters = 32,
- kNoRegister = -1 // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
-// Values for single-precision floating point registers.
-enum FRegister {
- F0 = 0,
- F1 = 1,
- F2 = 2,
- F3 = 3,
- F4 = 4,
- F5 = 5,
- F6 = 6,
- F7 = 7,
- F8 = 8,
- F9 = 9,
- F10 = 10,
- F11 = 11,
- F12 = 12,
- F13 = 13,
- F14 = 14,
- F15 = 15,
- F16 = 16,
- F17 = 17,
- F18 = 18,
- F19 = 19,
- F20 = 20,
- F21 = 21,
- F22 = 22,
- F23 = 23,
- F24 = 24,
- F25 = 25,
- F26 = 26,
- F27 = 27,
- F28 = 28,
- F29 = 29,
- F30 = 30,
- F31 = 31,
- FTMP = F6, // scratch register
- FTMP2 = F7, // scratch register (in addition to FTMP, reserved for MSA instructions)
- kNumberOfFRegisters = 32,
- kNoFRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs);
-
-// Values for vector registers.
-enum VectorRegister {
- W0 = 0,
- W1 = 1,
- W2 = 2,
- W3 = 3,
- W4 = 4,
- W5 = 5,
- W6 = 6,
- W7 = 7,
- W8 = 8,
- W9 = 9,
- W10 = 10,
- W11 = 11,
- W12 = 12,
- W13 = 13,
- W14 = 14,
- W15 = 15,
- W16 = 16,
- W17 = 17,
- W18 = 18,
- W19 = 19,
- W20 = 20,
- W21 = 21,
- W22 = 22,
- W23 = 23,
- W24 = 24,
- W25 = 25,
- W26 = 26,
- W27 = 27,
- W28 = 28,
- W29 = 29,
- W30 = 30,
- W31 = 31,
- kNumberOfVectorRegisters = 32,
- kNoVectorRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
-
-} // namespace mips
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
deleted file mode 100644
index 0be7a7f..0000000
--- a/runtime/arch/mips/thread_mips.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "thread.h"
-
-#include <android-base/logging.h>
-
-#include "asm_support_mips.h"
-#include "base/enums.h"
-
-namespace art {
-
-void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
-}
-
-void Thread::CleanupCpu() {
- // Do nothing.
-}
-
-} // namespace art
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
deleted file mode 100644
index a6b249a..0000000
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
-#define ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
-
-#include "asm_support_mips64.h"
-
-// Define special registers.
-
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-
-
- // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
- .type \name, %function
- .global \name
- // Cache alignment for function entry.
- .balign 16
-\name:
- .cfi_startproc
- // Ensure we get a sane starting CFA.
- .cfi_def_cfa $sp, \cfa_offset
-.endm
-
- // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP name
- ENTRY_NO_GP_CUSTOM_CFA \name, 0
-.endm
-
- // Declare a function called name, sets up $gp.
- // This macro modifies t8.
-.macro ENTRY name
- ENTRY_NO_GP \name
- // Set up $gp and store the previous $gp value to $t8. It will be pushed to the
- // stack after the frame has been constructed.
- .cpsetup $t9, $t8, \name
- // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-.macro UNIMPLEMENTED name
- ENTRY \name
- break
- break
- END \name
-.endm
-
-// Macros to poison (negate) the reference for heap poisoning.
-.macro POISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- dsubu \rRef, $zero, \rRef
- dext \rRef, \rRef, 0, 32
-#endif // USE_HEAP_POISONING
-.endm
-
-// Macros to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- dsubu \rRef, $zero, \rRef
- dext \rRef, \rRef, 0, 32
-#endif // USE_HEAP_POISONING
-.endm
-
-// Byte size of the instructions (un)poisoning heap references.
-#ifdef USE_HEAP_POISONING
-#define HEAP_POISON_INSTR_SIZE 8
-#else
-#define HEAP_POISON_INSTR_SIZE 0
-#endif // USE_HEAP_POISONING
-
-// Based on contents of creg select the minimum integer
-// At the end of the macro the original value of creg is lost
-.macro MINint dreg,rreg,sreg,creg
- .set push
- .set noat
- .ifc \dreg, \rreg
- selnez \dreg, \rreg, \creg
- seleqz \creg, \sreg, \creg
- .else
- seleqz \dreg, \sreg, \creg
- selnez \creg, \rreg, \creg
- .endif
- or \dreg, \dreg, \creg
- .set pop
-.endm
-
-// Find minimum of two signed registers
-.macro MINs dreg,rreg,sreg
- .set push
- .set noat
- slt $at, \rreg, \sreg
- MINint \dreg, \rreg, \sreg, $at
- .set pop
-.endm
-
-// Find minimum of two unsigned registers
-.macro MINu dreg,rreg,sreg
- .set push
- .set noat
- sltu $at, \rreg, \sreg
- MINint \dreg, \rreg, \sreg, $at
- .set pop
-.endm
-
-#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
deleted file mode 100644
index a8e907e..0000000
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
-
-#include "asm_support.h"
-
-// 64 ($f24-$f31) + 64 ($s0-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 160
-// 48 ($s2-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_SAVE_REFS_ONLY 80
-// $f12-$f19, $a1-$a7, $s2-$s7 + $gp + $s8 + $ra, 16 total + 1x8 bytes padding + method*
-#define FRAME_SIZE_SAVE_REFS_AND_ARGS 208
-// $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
-#define FRAME_SIZE_SAVE_EVERYTHING 496
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
-
-// &art_quick_read_barrier_mark_introspection is the first of many entry points:
-// 20 entry points for long field offsets, large array indices and variable array indices
-// (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
-// 20 entry points for short field offsets and small array indices
-// (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
-// 20 entry points for GC roots
-// (see macro BRB_GC_ROOT_ENTRY)
-
-// There are as many entry points of each kind as there are registers that
-// can hold a reference: V0-V1, A0-A7, T0-T2, S2-S8.
-#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 20
-
-#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4) // 8 instructions in
- // BRB_FIELD_*_OFFSET_ENTRY.
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
- (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4) // 4 instructions in BRB_GC_ROOT_ENTRY.
-
-#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/callee_save_frame_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
deleted file mode 100644
index 64d6bec..0000000
--- a/runtime/arch/mips64/callee_save_frame_mips64.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_mips64.h"
-#include "runtime_globals.h"
-
-namespace art {
-namespace mips64 {
-
-static constexpr uint32_t kMips64CalleeSaveAlwaysSpills =
- (1 << art::mips64::RA);
-static constexpr uint32_t kMips64CalleeSaveRefSpills =
- (1 << art::mips64::S2) | (1 << art::mips64::S3) | (1 << art::mips64::S4) |
- (1 << art::mips64::S5) | (1 << art::mips64::S6) | (1 << art::mips64::S7) |
- (1 << art::mips64::GP) | (1 << art::mips64::S8);
-static constexpr uint32_t kMips64CalleeSaveArgSpills =
- (1 << art::mips64::A1) | (1 << art::mips64::A2) | (1 << art::mips64::A3) |
- (1 << art::mips64::A4) | (1 << art::mips64::A5) | (1 << art::mips64::A6) |
- (1 << art::mips64::A7);
-static constexpr uint32_t kMips64CalleeSaveAllSpills =
- (1 << art::mips64::S0) | (1 << art::mips64::S1);
-static constexpr uint32_t kMips64CalleeSaveEverythingSpills =
- (1 << art::mips64::AT) | (1 << art::mips64::V0) | (1 << art::mips64::V1) |
- (1 << art::mips64::A0) | (1 << art::mips64::A1) | (1 << art::mips64::A2) |
- (1 << art::mips64::A3) | (1 << art::mips64::A4) | (1 << art::mips64::A5) |
- (1 << art::mips64::A6) | (1 << art::mips64::A7) | (1 << art::mips64::T0) |
- (1 << art::mips64::T1) | (1 << art::mips64::T2) | (1 << art::mips64::T3) |
- (1 << art::mips64::S0) | (1 << art::mips64::S1) | (1 << art::mips64::T8) |
- (1 << art::mips64::T9);
-
-static constexpr uint32_t kMips64CalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kMips64CalleeSaveFpArgSpills =
- (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
- (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
- (1 << art::mips64::F18) | (1 << art::mips64::F19);
-// F12 should not be necessary to spill, as A0 is always in use.
-static constexpr uint32_t kMips64CalleeSaveFpAllSpills =
- (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
- (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
- (1 << art::mips64::F30) | (1 << art::mips64::F31);
-static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
- (1 << art::mips64::F0) | (1 << art::mips64::F1) | (1 << art::mips64::F2) |
- (1 << art::mips64::F3) | (1 << art::mips64::F4) | (1 << art::mips64::F5) |
- (1 << art::mips64::F6) | (1 << art::mips64::F7) | (1 << art::mips64::F8) |
- (1 << art::mips64::F9) | (1 << art::mips64::F10) | (1 << art::mips64::F11) |
- (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
- (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
- (1 << art::mips64::F18) | (1 << art::mips64::F19) | (1 << art::mips64::F20) |
- (1 << art::mips64::F21) | (1 << art::mips64::F22) | (1 << art::mips64::F23) |
- (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
- (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
- (1 << art::mips64::F30) | (1 << art::mips64::F31);
-
-class Mips64CalleeSaveFrame {
- public:
- static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
- }
-
- static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMips64CalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
- }
-
- static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
- POPCOUNT(GetFpSpills(type)) /* fprs */ +
- + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
- }
-
- static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
- }
-
- static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) -
- (POPCOUNT(GetCoreSpills(type)) +
- POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMips64PointerSize);
- }
-
- static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) -
- POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMips64PointerSize);
- }
-
- static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return GetFrameSize(type) - static_cast<size_t>(kMips64PointerSize);
- }
-};
-
-} // namespace mips64
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
deleted file mode 100644
index b14908f..0000000
--- a/runtime/arch/mips64/context_mips64.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "context_mips64.h"
-
-#include "base/bit_utils.h"
-#include "base/bit_utils_iterator.h"
-#include "quick/quick_method_frame_info.h"
-
-namespace art {
-namespace mips64 {
-
-static constexpr uintptr_t gZero = 0;
-
-void Mips64Context::Reset() {
- std::fill_n(gprs_, arraysize(gprs_), nullptr);
- std::fill_n(fprs_, arraysize(fprs_), nullptr);
- gprs_[SP] = &sp_;
- gprs_[T9] = &t9_;
- gprs_[A0] = &arg0_;
- // Initialize registers with easy to spot debug values.
- sp_ = Mips64Context::kBadGprBase + SP;
- t9_ = Mips64Context::kBadGprBase + T9;
- arg0_ = 0;
-}
-
-void Mips64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
- int spill_pos = 0;
-
- // Core registers come first, from the highest down to the lowest.
- for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
- ++spill_pos;
- }
- DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
-
- // FP registers come second, from the highest down to the lowest.
- for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
- ++spill_pos;
- }
- DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
-}
-
-void Mips64Context::SetGPR(uint32_t reg, uintptr_t value) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
- DCHECK(IsAccessibleGPR(reg));
- CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
- *gprs_[reg] = value;
-}
-
-void Mips64Context::SetFPR(uint32_t reg, uintptr_t value) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
- DCHECK(IsAccessibleFPR(reg));
- CHECK_NE(fprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
- *fprs_[reg] = value;
-}
-
-void Mips64Context::SmashCallerSaves() {
- // This needs to be 0 because we want a null/zero return value.
- gprs_[V0] = const_cast<uintptr_t*>(&gZero);
- gprs_[V1] = const_cast<uintptr_t*>(&gZero);
- gprs_[A1] = nullptr;
- gprs_[A0] = nullptr;
- gprs_[A2] = nullptr;
- gprs_[A3] = nullptr;
- gprs_[A4] = nullptr;
- gprs_[A5] = nullptr;
- gprs_[A6] = nullptr;
- gprs_[A7] = nullptr;
-
- // f0-f23 are caller-saved; f24-f31 are callee-saved.
- fprs_[F0] = nullptr;
- fprs_[F1] = nullptr;
- fprs_[F2] = nullptr;
- fprs_[F3] = nullptr;
- fprs_[F4] = nullptr;
- fprs_[F5] = nullptr;
- fprs_[F6] = nullptr;
- fprs_[F7] = nullptr;
- fprs_[F8] = nullptr;
- fprs_[F9] = nullptr;
- fprs_[F10] = nullptr;
- fprs_[F11] = nullptr;
- fprs_[F12] = nullptr;
- fprs_[F13] = nullptr;
- fprs_[F14] = nullptr;
- fprs_[F15] = nullptr;
- fprs_[F16] = nullptr;
- fprs_[F17] = nullptr;
- fprs_[F18] = nullptr;
- fprs_[F19] = nullptr;
- fprs_[F20] = nullptr;
- fprs_[F21] = nullptr;
- fprs_[F22] = nullptr;
- fprs_[F23] = nullptr;
-}
-
-extern "C" NO_RETURN void art_quick_do_long_jump(uintptr_t*, uintptr_t*);
-
-void Mips64Context::DoLongJump() {
- uintptr_t gprs[kNumberOfGpuRegisters];
- uintptr_t fprs[kNumberOfFpuRegisters];
- for (size_t i = 0; i < kNumberOfGpuRegisters; ++i) {
- gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : Mips64Context::kBadGprBase + i;
- }
- for (size_t i = 0; i < kNumberOfFpuRegisters; ++i) {
- fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : Mips64Context::kBadFprBase + i;
- }
- art_quick_do_long_jump(gprs, fprs);
-}
-
-} // namespace mips64
-} // namespace art
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
deleted file mode 100644
index 857abfd..0000000
--- a/runtime/arch/mips64/context_mips64.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
-
-#include <android-base/logging.h>
-
-#include "arch/context.h"
-#include "base/macros.h"
-#include "registers_mips64.h"
-
-namespace art {
-namespace mips64 {
-
-class Mips64Context : public Context {
- public:
- Mips64Context() {
- Reset();
- }
- virtual ~Mips64Context() {}
-
- void Reset() override;
-
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
-
- void SetSP(uintptr_t new_sp) override {
- SetGPR(SP, new_sp);
- }
-
- void SetPC(uintptr_t new_pc) override {
- SetGPR(T9, new_pc);
- }
-
- bool IsAccessibleGPR(uint32_t reg) override {
- DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
- return gprs_[reg] != nullptr;
- }
-
- uintptr_t* GetGPRAddress(uint32_t reg) override {
- DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
- return gprs_[reg];
- }
-
- uintptr_t GetGPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
- DCHECK(IsAccessibleGPR(reg));
- return *gprs_[reg];
- }
-
- void SetGPR(uint32_t reg, uintptr_t value) override;
-
- bool IsAccessibleFPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
- return fprs_[reg] != nullptr;
- }
-
- uintptr_t GetFPR(uint32_t reg) override {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
- DCHECK(IsAccessibleFPR(reg));
- return *fprs_[reg];
- }
-
- void SetFPR(uint32_t reg, uintptr_t value) override;
-
- void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
-
- void SetArg0(uintptr_t new_arg0_value) override {
- SetGPR(A0, new_arg0_value);
- }
-
- private:
- // Pointers to registers in the stack, initialized to null except for the special cases below.
- uintptr_t* gprs_[kNumberOfGpuRegisters];
- uint64_t* fprs_[kNumberOfFpuRegisters];
- // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
- // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
- // also need the first argument for single-frame deopt.
- uintptr_t sp_, t9_, arg0_;
-};
-
-} // namespace mips64
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
deleted file mode 100644
index 741d41a..0000000
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <math.h>
-#include <string.h>
-
-#include "arch/mips64/asm_support_mips64.h"
-#include "base/atomic.h"
-#include "base/quasi_atomic.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/math_entrypoints.h"
-#include "entrypoints/quick/quick_alloc_entrypoints.h"
-#include "entrypoints/quick/quick_default_externs.h"
-#include "entrypoints/quick/quick_default_init_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "interpreter/interpreter.h"
-
-namespace art {
-
-// Cast entrypoints.
-extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
-
-// Read barrier entrypoints.
-// art_quick_read_barrier_mark_regXX uses a non-standard calling
-// convention: it expects its input in register XX+1 and returns its
-// result in that same register, and saves and restores all
-// caller-save registers.
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
-
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
-extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-extern "C" int64_t artLmul(int64_t a, int64_t b);
-extern "C" int64_t artLdiv(int64_t a, int64_t b);
-extern "C" int64_t artLmod(int64_t a, int64_t b);
-
-// Math conversions.
-extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT
-extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT
-extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT
-extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE
-extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG
-extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __divdi3(int64_t, int64_t);
-extern "C" int64_t __moddi3(int64_t, int64_t);
-
-// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
- intptr_t introspection_field_array_entries_size =
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
- static_assert(
- BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
- BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
- "Expecting equal");
- DCHECK_EQ(introspection_field_array_entries_size,
- BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
- intptr_t introspection_gc_root_entries_size =
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
- reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
- DCHECK_EQ(introspection_gc_root_entries_size,
- BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
- qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
- qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
- qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
- qpoints->pReadBarrierMarkReg03 = is_active ? art_quick_read_barrier_mark_reg03 : nullptr;
- qpoints->pReadBarrierMarkReg04 = is_active ? art_quick_read_barrier_mark_reg04 : nullptr;
- qpoints->pReadBarrierMarkReg05 = is_active ? art_quick_read_barrier_mark_reg05 : nullptr;
- qpoints->pReadBarrierMarkReg06 = is_active ? art_quick_read_barrier_mark_reg06 : nullptr;
- qpoints->pReadBarrierMarkReg07 = is_active ? art_quick_read_barrier_mark_reg07 : nullptr;
- qpoints->pReadBarrierMarkReg08 = is_active ? art_quick_read_barrier_mark_reg08 : nullptr;
- qpoints->pReadBarrierMarkReg09 = is_active ? art_quick_read_barrier_mark_reg09 : nullptr;
- qpoints->pReadBarrierMarkReg10 = is_active ? art_quick_read_barrier_mark_reg10 : nullptr;
- qpoints->pReadBarrierMarkReg11 = is_active ? art_quick_read_barrier_mark_reg11 : nullptr;
- qpoints->pReadBarrierMarkReg12 = is_active ? art_quick_read_barrier_mark_reg12 : nullptr;
- qpoints->pReadBarrierMarkReg13 = is_active ? art_quick_read_barrier_mark_reg13 : nullptr;
- qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
- qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
- qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
- qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
- qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
- qpoints->pReadBarrierMarkReg22 = is_active ? art_quick_read_barrier_mark_reg22 : nullptr;
- qpoints->pReadBarrierMarkReg29 = is_active ? art_quick_read_barrier_mark_reg29 : nullptr;
-}
-
-void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- DefaultInitEntryPoints(jpoints, qpoints);
-
- // Cast
- qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
- qpoints->pCheckInstanceOf = art_quick_check_instance_of;
-
- // Math
- qpoints->pCmpgDouble = CmpgDouble;
- qpoints->pCmpgFloat = CmpgFloat;
- qpoints->pCmplDouble = CmplDouble;
- qpoints->pCmplFloat = CmplFloat;
- qpoints->pFmod = fmod;
- qpoints->pL2d = art_l2d;
- qpoints->pFmodf = fmodf;
- qpoints->pL2f = art_l2f;
- qpoints->pD2iz = art_d2i;
- qpoints->pF2iz = art_f2i;
- qpoints->pIdivmod = nullptr;
- qpoints->pD2l = art_d2l;
- qpoints->pF2l = art_f2l;
- qpoints->pLdiv = artLdiv;
- qpoints->pLmod = artLmod;
- qpoints->pLmul = artLmul;
- qpoints->pShlLong = nullptr;
- qpoints->pShrLong = nullptr;
- qpoints->pUshrLong = nullptr;
-
- // More math.
- qpoints->pCos = cos;
- qpoints->pSin = sin;
- qpoints->pAcos = acos;
- qpoints->pAsin = asin;
- qpoints->pAtan = atan;
- qpoints->pAtan2 = atan2;
- qpoints->pPow = pow;
- qpoints->pCbrt = cbrt;
- qpoints->pCosh = cosh;
- qpoints->pExp = exp;
- qpoints->pExpm1 = expm1;
- qpoints->pHypot = hypot;
- qpoints->pLog = log;
- qpoints->pLog10 = log10;
- qpoints->pNextAfter = nextafter;
- qpoints->pSinh = sinh;
- qpoints->pTan = tan;
- qpoints->pTanh = tanh;
-
- // Intrinsics
- qpoints->pIndexOf = art_quick_indexof;
- qpoints->pStringCompareTo = art_quick_string_compareto;
- qpoints->pMemcpy = memcpy;
-
- // TODO - use lld/scd instructions for Mips64
- // Atomic 64-bit load/store
- qpoints->pA64Load = QuasiAtomic::Read64;
- qpoints->pA64Store = QuasiAtomic::Write64;
-
- // Read barrier.
- qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
- // Cannot use the following registers to pass arguments:
- // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
- // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
- qpoints->pReadBarrierMarkReg14 = nullptr;
- qpoints->pReadBarrierMarkReg15 = nullptr;
- qpoints->pReadBarrierMarkReg16 = nullptr;
- qpoints->pReadBarrierMarkReg23 = nullptr;
- qpoints->pReadBarrierMarkReg24 = nullptr;
- qpoints->pReadBarrierMarkReg25 = nullptr;
- qpoints->pReadBarrierMarkReg26 = nullptr;
- qpoints->pReadBarrierMarkReg27 = nullptr;
- qpoints->pReadBarrierMarkReg28 = nullptr;
- qpoints->pReadBarrierSlow = artReadBarrierSlow;
- qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
-}
-
-} // namespace art
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
deleted file mode 100644
index ff53fa6..0000000
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "fault_handler.h"
-
-#include <sys/ucontext.h>
-
-#include "arch/instruction_set.h"
-#include "arch/mips64/callee_save_frame_mips64.h"
-#include "art_method.h"
-#include "base/callee_save_type.h"
-#include "base/hex_dump.h"
-#include "base/logging.h" // For VLOG.
-#include "base/macros.h"
-#include "registers_mips64.h"
-#include "runtime_globals.h"
-#include "thread-current-inl.h"
-
-extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception_from_signal();
-
-//
-// Mips64 specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
- void* context,
- ArtMethod** out_method,
- uintptr_t* out_return_pc,
- uintptr_t* out_sp,
- bool* out_is_stack_overflow) {
- struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips64::SP]);
- VLOG(signals) << "sp: " << *out_sp;
- if (*out_sp == 0) {
- return;
- }
-
- // In the case of a stack overflow, the stack is not valid and we can't
- // get the method from the top of the stack. However it's in r0.
- uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
- uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64));
- if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
- *out_is_stack_overflow = true;
- } else {
- // The method is at the top of the stack.
- *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
- *out_is_stack_overflow = false;
- }
-
- // Work out the return PC. This will be the address of the instruction
- // following the faulting ldr/str instruction.
-
- VLOG(signals) << "pc: " << std::hex
- << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
-
- *out_return_pc = sc->sc_pc + 4;
-}
-
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
- if (!IsValidImplicitCheck(info)) {
- return false;
- }
-
- // The code that looks for the catch location needs to know the value of the
- // PC at the point of call. For Null checks we insert a GC map that is immediately after
- // the load/store instruction that might cause the fault.
-
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-
- // Decrement $sp by the frame size of the kSaveEverything method and store
- // the fault address in the padding right after the ArtMethod*.
- sc->sc_regs[mips64::SP] -= mips64::Mips64CalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
- uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips64::SP]) + /* ArtMethod* */ 1;
- *padding = reinterpret_cast<uintptr_t>(info->si_addr);
-
- sc->sc_regs[mips64::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
- VLOG(signals) << "Generating null pointer exception";
- return true;
-}
-
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
-}
-
-// Stack overflow fault handler.
-//
-// This checks that the fault address is equal to the current stack pointer
-// minus the overflow region size (16K typically). The instruction that
-// generates this signal is:
-//
-// lw zero, -16384(sp)
-//
-// It will fault if sp is inside the protected region on the stack.
-//
-// If we determine this is a stack overflow we need to move the stack pointer
-// to the overflow region below the protected region.
-
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
- struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
- VLOG(signals) << "sigcontext: " << std::hex << sc;
-
- uintptr_t sp = sc->sc_regs[mips64::SP];
- VLOG(signals) << "sp: " << std::hex << sp;
-
- uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
- VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
- VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
- ", fault_addr: " << fault_addr;
-
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips64);
-
- // Check that the fault address is the value expected for a stack overflow.
- if (fault_addr != overflow_addr) {
- VLOG(signals) << "Not a stack overflow";
- return false;
- }
-
- VLOG(signals) << "Stack overflow found";
-
- // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
- // The value of RA must be the same as it was when we entered the code that
- // caused this fault. This will be inserted into a callee save frame by
- // the function to which this handler returns (art_quick_throw_stack_overflow).
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
- sc->sc_regs[mips64::T9] = sc->sc_pc; // make sure T9 points to the function
-
- // The kernel will now return to the address in sc->arm_pc.
- return true;
-}
-} // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
deleted file mode 100644
index 2031433..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips64.h"
-
-#include <fstream>
-#include <sstream>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/logging.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
- const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
- bool msa = true;
- if (variant != "default" && variant != "mips64r6") {
- LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
- }
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
- bool msa = (bitmap & kMsaBitfield) != 0;
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
-#if defined(_MIPS_ARCH_MIPS64R6)
- const bool msa = true;
-#else
- const bool msa = false;
-#endif
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
- // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
- // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- bool msa = false;
-
- std::ifstream in("/proc/cpuinfo");
- if (!in.fail()) {
- while (!in.eof()) {
- std::string line;
- std::getline(in, line);
- if (!in.eof()) {
- LOG(INFO) << "cpuinfo line: " << line;
- if (line.find("ASEs") != std::string::npos) {
- LOG(INFO) << "found Application Specific Extensions";
- if (line.find("msa") != std::string::npos) {
- msa = true;
- }
- }
- }
- }
- in.close();
- } else {
- LOG(ERROR) << "Failed to open /proc/cpuinfo";
- }
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
- UNIMPLEMENTED(WARNING);
- return FromCppDefines();
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
- UNIMPLEMENTED(WARNING);
- return FromCppDefines();
-}
-
-bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (InstructionSet::kMips64 != other->GetInstructionSet()) {
- return false;
- }
- const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures();
- return msa_ == other_as_mips64->msa_;
-}
-
-uint32_t Mips64InstructionSetFeatures::AsBitmap() const {
- return (msa_ ? kMsaBitfield : 0);
-}
-
-std::string Mips64InstructionSetFeatures::GetFeatureString() const {
- std::string result;
- if (msa_) {
- result += "msa";
- } else {
- result += "-msa";
- }
- return result;
-}
-
-std::unique_ptr<const InstructionSetFeatures>
-Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
- const std::vector<std::string>& features, std::string* error_msg) const {
- bool msa = msa_;
- for (const std::string& feature : features) {
- DCHECK_EQ(android::base::Trim(feature), feature)
- << "Feature name is not trimmed: '" << feature << "'";
- if (feature == "msa") {
- msa = true;
- } else if (feature == "-msa") {
- msa = false;
- } else {
- *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
- return nullptr;
- }
- }
- return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(msa));
-}
-
-} // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
deleted file mode 100644
index e204d9d..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
-
-#include "arch/instruction_set_features.h"
-
-namespace art {
-
-class Mips64InstructionSetFeatures;
-using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
-
-// Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
- public:
- // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
- std::string* error_msg);
-
- // Parse a bitmap and create an InstructionSetFeatures.
- static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features.
- static Mips64FeaturesUniquePtr FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static Mips64FeaturesUniquePtr FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static Mips64FeaturesUniquePtr FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static Mips64FeaturesUniquePtr FromAssembly();
-
- bool Equals(const InstructionSetFeatures* other) const override;
-
- InstructionSet GetInstructionSet() const override {
- return InstructionSet::kMips64;
- }
-
- uint32_t AsBitmap() const override;
-
- std::string GetFeatureString() const override;
-
- // Does it have MSA (MIPS SIMD Architecture) support.
- bool HasMsa() const {
- return msa_;
- }
-
- virtual ~Mips64InstructionSetFeatures() {}
-
- protected:
- // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
- std::unique_ptr<const InstructionSetFeatures>
- AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const override;
-
- private:
- explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
- }
-
- // Bitmap positions for encoding features as a bitmap.
- enum {
- kMsaBitfield = 1,
- };
-
- const bool msa_;
-
- DISALLOW_COPY_AND_ASSIGN(Mips64InstructionSetFeatures);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
deleted file mode 100644
index 933dc66..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips64.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips64_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
- ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips64_features->GetInstructionSet(), InstructionSet::kMips64);
- EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
- EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str());
- EXPECT_EQ(mips64_features->AsBitmap(), 1U);
-}
-
-TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "mips64r6", &error_msg));
- ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips64r6_features->GetInstructionSet(), InstructionSet::kMips64);
- EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
- EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
- EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
-
- std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
- InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
- ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
-}
-
-} // namespace art
diff --git a/runtime/arch/mips64/jni_entrypoints_mips64.S b/runtime/arch/mips64/jni_entrypoints_mips64.S
deleted file mode 100644
index 70d7d97..0000000
--- a/runtime/arch/mips64/jni_entrypoints_mips64.S
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips64.S"
-
- .set noreorder
- .balign 16
-
- /*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- daddiu $sp, $sp, -80 # save a0-a7 and $ra
- .cfi_adjust_cfa_offset 80
- sd $ra, 64($sp)
- .cfi_rel_offset 31, 64
- sd $a7, 56($sp)
- .cfi_rel_offset 11, 56
- sd $a6, 48($sp)
- .cfi_rel_offset 10, 48
- sd $a5, 40($sp)
- .cfi_rel_offset 9, 40
- sd $a4, 32($sp)
- .cfi_rel_offset 8, 32
- sd $a3, 24($sp)
- .cfi_rel_offset 7, 24
- sd $a2, 16($sp)
- .cfi_rel_offset 6, 16
- sd $a1, 8($sp)
- .cfi_rel_offset 5, 8
- sd $a0, 0($sp)
- .cfi_rel_offset 4, 0
- move $a0, $s1 # pass Thread::Current()
- jal artFindNativeMethod # (Thread*)
- .cpreturn # Restore gp from t8 in branch delay slot. gp is not used
- # anymore, and t8 may be clobbered in artFindNativeMethod.
-
- ld $a0, 0($sp) # restore registers from stack
- .cfi_restore 4
- ld $a1, 8($sp)
- .cfi_restore 5
- ld $a2, 16($sp)
- .cfi_restore 6
- ld $a3, 24($sp)
- .cfi_restore 7
- ld $a4, 32($sp)
- .cfi_restore 8
- ld $a5, 40($sp)
- .cfi_restore 9
- ld $a6, 48($sp)
- .cfi_restore 10
- ld $a7, 56($sp)
- .cfi_restore 11
- ld $ra, 64($sp)
- .cfi_restore 31
- beq $v0, $zero, .Lno_native_code_found
- daddiu $sp, $sp, 80 # restore the stack
- .cfi_adjust_cfa_offset -80
- move $t9, $v0 # put method code result in $t9
- jalr $zero, $t9 # leaf call to method's code
- nop
-.Lno_native_code_found:
- jalr $zero, $ra
- nop
-END art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/mips64/memcmp16_mips64.S b/runtime/arch/mips64/memcmp16_mips64.S
deleted file mode 100644
index 962977e..0000000
--- a/runtime/arch/mips64/memcmp16_mips64.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
-#define ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
-
-#include "asm_support_mips64.S"
-
-.set noreorder
-
-// u4 __memcmp16(const u2*, const u2*, size_t);
-ENTRY_NO_GP __memcmp16
- move $t0, $zero
- move $t1, $zero
- beqz $a2, done /* 0 length string */
- nop
- beq $a0, $a1, done /* addresses are identical */
- nop
-
-1:
- lhu $t0, 0($a0)
- lhu $t1, 0($a1)
- bne $t0, $t1, done
- nop
- daddu $a0, 2
- daddu $a1, 2
- dsubu $a2, 1
- bnez $a2, 1b
- nop
-
-done:
- dsubu $v0, $t0, $t1
- j $ra
- nop
-END __memcmp16
-
-#endif // ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
deleted file mode 100644
index c54e7bb..0000000
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ /dev/null
@@ -1,3106 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips64.S"
-
-#include "arch/quick_alloc_entrypoints.S"
-
- .set noreorder
- .balign 16
-
- /* Deliver the given exception */
- .extern artDeliverExceptionFromCode
- /* Deliver an exception pending on a thread */
- .extern artDeliverPendingExceptionFromCode
-
- /*
- * Macro that sets up $gp and stores the previous $gp value to $t8.
- * This macro modifies v1 and t8.
- */
-.macro SETUP_GP
- move $v1, $ra
- bal 1f
- nop
-1:
- .cpsetup $ra, $t8, 1b
- move $ra, $v1
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
- * callee-save: padding + $f24-$f31 + $s0-$s7 + $gp + $ra + $s8 = 19 total + 1x8 bytes padding
- */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- daddiu $sp, $sp, -160
- .cfi_adjust_cfa_offset 160
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 160)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS64) size not as expected."
-#endif
-
- sd $ra, 152($sp)
- .cfi_rel_offset 31, 152
- sd $s8, 144($sp)
- .cfi_rel_offset 30, 144
- sd $t8, 136($sp) # t8 holds caller's gp, now save it to the stack.
- .cfi_rel_offset 28, 136 # Value from gp is pushed, so set the cfi offset accordingly.
- sd $s7, 128($sp)
- .cfi_rel_offset 23, 128
- sd $s6, 120($sp)
- .cfi_rel_offset 22, 120
- sd $s5, 112($sp)
- .cfi_rel_offset 21, 112
- sd $s4, 104($sp)
- .cfi_rel_offset 20, 104
- sd $s3, 96($sp)
- .cfi_rel_offset 19, 96
- sd $s2, 88($sp)
- .cfi_rel_offset 18, 88
- sd $s1, 80($sp)
- .cfi_rel_offset 17, 80
- sd $s0, 72($sp)
- .cfi_rel_offset 16, 72
-
- // FP callee-saves
- s.d $f31, 64($sp)
- s.d $f30, 56($sp)
- s.d $f29, 48($sp)
- s.d $f28, 40($sp)
- s.d $f27, 32($sp)
- s.d $f26, 24($sp)
- s.d $f25, 16($sp)
- s.d $f24, 8($sp)
-
- # load appropriate callee-save-method
- ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
- ld $t1, 0($t1)
- ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t1)
- sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes
- * non-moving GC.
- * Does not include rSUSPEND or rSELF
- * callee-save: padding + $s2-$s7 + $gp + $ra + $s8 = 9 total + 1x8 bytes padding
- */
-.macro SETUP_SAVE_REFS_ONLY_FRAME
- daddiu $sp, $sp, -80
- .cfi_adjust_cfa_offset 80
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 80)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS64) size not as expected."
-#endif
-
- sd $ra, 72($sp)
- .cfi_rel_offset 31, 72
- sd $s8, 64($sp)
- .cfi_rel_offset 30, 64
- sd $t8, 56($sp) # t8 holds caller's gp, now save it to the stack.
- .cfi_rel_offset 28, 56 # Value from gp is pushed, so set the cfi offset accordingly.
- sd $s7, 48($sp)
- .cfi_rel_offset 23, 48
- sd $s6, 40($sp)
- .cfi_rel_offset 22, 40
- sd $s5, 32($sp)
- .cfi_rel_offset 21, 32
- sd $s4, 24($sp)
- .cfi_rel_offset 20, 24
- sd $s3, 16($sp)
- .cfi_rel_offset 19, 16
- sd $s2, 8($sp)
- .cfi_rel_offset 18, 8
- # load appropriate callee-save-method
- ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
- ld $t1, 0($t1)
- ld $t1, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t1)
- sd $t1, 0($sp) # Place Method* at bottom of stack.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME
- ld $ra, 72($sp)
- .cfi_restore 31
- ld $s8, 64($sp)
- .cfi_restore 30
- ld $t8, 56($sp) # Restore gp back to it's temp storage.
- .cfi_restore 28
- ld $s7, 48($sp)
- .cfi_restore 23
- ld $s6, 40($sp)
- .cfi_restore 22
- ld $s5, 32($sp)
- .cfi_restore 21
- ld $s4, 24($sp)
- .cfi_restore 20
- ld $s3, 16($sp)
- .cfi_restore 19
- ld $s2, 8($sp)
- .cfi_restore 18
- daddiu $sp, $sp, 80
- .cfi_adjust_cfa_offset -80
- .cpreturn
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
- ld $ra, 72($sp)
- .cfi_restore 31
- ld $s8, 64($sp)
- .cfi_restore 30
- ld $t8, 56($sp) # Restore gp back to it's temp storage.
- .cfi_restore 28
- ld $s7, 48($sp)
- .cfi_restore 23
- ld $s6, 40($sp)
- .cfi_restore 22
- ld $s5, 32($sp)
- .cfi_restore 21
- ld $s4, 24($sp)
- .cfi_restore 20
- ld $s3, 16($sp)
- .cfi_restore 19
- ld $s2, 8($sp)
- .cfi_restore 18
- .cpreturn
- jalr $zero, $ra
- daddiu $sp, $sp, 80
- .cfi_adjust_cfa_offset -80
-.endm
-
-// This assumes the top part of these stack frame types are identical.
-#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
-
- /*
- * Individually usable part of macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL.
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- sd $s8, 192($sp)
- .cfi_rel_offset 30, 192
- sd $s7, 176($sp)
- .cfi_rel_offset 23, 176
- sd $s6, 168($sp)
- .cfi_rel_offset 22, 168
- sd $s5, 160($sp)
- .cfi_rel_offset 21, 160
- sd $s4, 152($sp)
- .cfi_rel_offset 20, 152
-.endm
-
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL save_s4_thru_s8=1
- daddiu $sp, $sp, -208
- .cfi_adjust_cfa_offset 208
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 208)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS64) size not as expected."
-#endif
-
- sd $ra, 200($sp) # = kQuickCalleeSaveFrame_RefAndArgs_LrOffset
- .cfi_rel_offset 31, 200
- sd $t8, 184($sp) # t8 holds caller's gp, now save it to the stack.
- .cfi_rel_offset 28, 184 # Value from gp is pushed, so set the cfi offset accordingly.
- .if \save_s4_thru_s8
- SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- .endif
- sd $s3, 144($sp)
- .cfi_rel_offset 19, 144
- sd $s2, 136($sp)
- .cfi_rel_offset 18, 136
- sd $a7, 128($sp)
- .cfi_rel_offset 11, 128
- sd $a6, 120($sp)
- .cfi_rel_offset 10, 120
- sd $a5, 112($sp)
- .cfi_rel_offset 9, 112
- sd $a4, 104($sp)
- .cfi_rel_offset 8, 104
- sd $a3, 96($sp)
- .cfi_rel_offset 7, 96
- sd $a2, 88($sp)
- .cfi_rel_offset 6, 88
- sd $a1, 80($sp) # = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset
- .cfi_rel_offset 5, 80
-
- s.d $f19, 72($sp)
- s.d $f18, 64($sp)
- s.d $f17, 56($sp)
- s.d $f16, 48($sp)
- s.d $f15, 40($sp)
- s.d $f14, 32($sp)
- s.d $f13, 24($sp) # = kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset
- s.d $f12, 16($sp) # This isn't necessary to store.
- # 1x8 bytes padding + Method*
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes
- * non-moving GC.
- * callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
- */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME save_s4_thru_s8_only=0
- .if \save_s4_thru_s8_only
- // It is expected that `SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL /* save_s4_thru_s8 */ 0`
- // has been done prior to `SETUP_SAVE_REFS_AND_ARGS_FRAME /* save_s4_thru_s8_only */ 1`.
- SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
- .else
- SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
- .endif
- # load appropriate callee-save-method
- ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
- ld $t1, 0($t1)
- ld $t1, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t1)
- sd $t1, 0($sp) # Place Method* at bottom of stack.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
- sd $a0, 0($sp) # Place Method* at bottom of stack.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
- /*
- * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
- ld $a1, 80($sp)
- .cfi_restore 5
-.endm
-
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME restore_s4_thru_s8=1
- ld $ra, 200($sp)
- .cfi_restore 31
- .if \restore_s4_thru_s8
- ld $s8, 192($sp)
- .cfi_restore 30
- .endif
- ld $t8, 184($sp) # Restore gp back to it's temp storage.
- .cfi_restore 28
- .if \restore_s4_thru_s8
- ld $s7, 176($sp)
- .cfi_restore 23
- ld $s6, 168($sp)
- .cfi_restore 22
- ld $s5, 160($sp)
- .cfi_restore 21
- ld $s4, 152($sp)
- .cfi_restore 20
- .endif
- ld $s3, 144($sp)
- .cfi_restore 19
- ld $s2, 136($sp)
- .cfi_restore 18
- ld $a7, 128($sp)
- .cfi_restore 11
- ld $a6, 120($sp)
- .cfi_restore 10
- ld $a5, 112($sp)
- .cfi_restore 9
- ld $a4, 104($sp)
- .cfi_restore 8
- ld $a3, 96($sp)
- .cfi_restore 7
- ld $a2, 88($sp)
- .cfi_restore 6
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
-
- l.d $f19, 72($sp)
- l.d $f18, 64($sp)
- l.d $f17, 56($sp)
- l.d $f16, 48($sp)
- l.d $f15, 40($sp)
- l.d $f14, 32($sp)
- l.d $f13, 24($sp)
- l.d $f12, 16($sp)
-
- .cpreturn
- daddiu $sp, $sp, 208
- .cfi_adjust_cfa_offset -208
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveEverything).
- * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING.
- * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
- * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
- * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
- */
-.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING != 496)
-#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS64) size not as expected."
-#endif
-
- // Save core registers.
- sd $ra, 488($sp)
- .cfi_rel_offset 31, 488
- sd $s8, 480($sp)
- .cfi_rel_offset 30, 480
- sd $t9, 464($sp)
- .cfi_rel_offset 25, 464
- sd $t8, 456($sp)
- .cfi_rel_offset 24, 456
- sd $s7, 448($sp)
- .cfi_rel_offset 23, 448
- sd $s6, 440($sp)
- .cfi_rel_offset 22, 440
- sd $s5, 432($sp)
- .cfi_rel_offset 21, 432
- sd $s4, 424($sp)
- .cfi_rel_offset 20, 424
- sd $s3, 416($sp)
- .cfi_rel_offset 19, 416
- sd $s2, 408($sp)
- .cfi_rel_offset 18, 408
- sd $s1, 400($sp)
- .cfi_rel_offset 17, 400
- sd $s0, 392($sp)
- .cfi_rel_offset 16, 392
- sd $t3, 384($sp)
- .cfi_rel_offset 15, 384
- sd $t2, 376($sp)
- .cfi_rel_offset 14, 376
- sd $t1, 368($sp)
- .cfi_rel_offset 13, 368
- sd $t0, 360($sp)
- .cfi_rel_offset 12, 360
- sd $a7, 352($sp)
- .cfi_rel_offset 11, 352
- sd $a6, 344($sp)
- .cfi_rel_offset 10, 344
- sd $a5, 336($sp)
- .cfi_rel_offset 9, 336
- sd $a4, 328($sp)
- .cfi_rel_offset 8, 328
- sd $a3, 320($sp)
- .cfi_rel_offset 7, 320
- sd $a2, 312($sp)
- .cfi_rel_offset 6, 312
- sd $a1, 304($sp)
- .cfi_rel_offset 5, 304
- sd $a0, 296($sp)
- .cfi_rel_offset 4, 296
- sd $v1, 288($sp)
- .cfi_rel_offset 3, 288
- sd $v0, 280($sp)
- .cfi_rel_offset 2, 280
-
- // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
- bal 1f
- .set push
- .set noat
- sd $at, 272($sp)
- .cfi_rel_offset 1, 272
- .set pop
-1:
- .cpsetup $ra, 472, 1b
-
- // Save FP registers.
- s.d $f31, 264($sp)
- s.d $f30, 256($sp)
- s.d $f29, 248($sp)
- s.d $f28, 240($sp)
- s.d $f27, 232($sp)
- s.d $f26, 224($sp)
- s.d $f25, 216($sp)
- s.d $f24, 208($sp)
- s.d $f23, 200($sp)
- s.d $f22, 192($sp)
- s.d $f21, 184($sp)
- s.d $f20, 176($sp)
- s.d $f19, 168($sp)
- s.d $f18, 160($sp)
- s.d $f17, 152($sp)
- s.d $f16, 144($sp)
- s.d $f15, 136($sp)
- s.d $f14, 128($sp)
- s.d $f13, 120($sp)
- s.d $f12, 112($sp)
- s.d $f11, 104($sp)
- s.d $f10, 96($sp)
- s.d $f9, 88($sp)
- s.d $f8, 80($sp)
- s.d $f7, 72($sp)
- s.d $f6, 64($sp)
- s.d $f5, 56($sp)
- s.d $f4, 48($sp)
- s.d $f3, 40($sp)
- s.d $f2, 32($sp)
- s.d $f1, 24($sp)
- s.d $f0, 16($sp)
-
- # load appropriate callee-save-method
- ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
- ld $t1, 0($t1)
- ld $t1, \runtime_method_offset($t1)
- sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
- # Place sp in Thread::Current()->top_quick_frame.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveEverything).
- * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
- * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
- * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
- */
-.macro SETUP_SAVE_EVERYTHING_FRAME runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- daddiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING)
- .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING)
- SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP \runtime_method_offset
-.endm
-
-.macro RESTORE_SAVE_EVERYTHING_FRAME restore_a0=1
- // Restore FP registers.
- l.d $f31, 264($sp)
- l.d $f30, 256($sp)
- l.d $f29, 248($sp)
- l.d $f28, 240($sp)
- l.d $f27, 232($sp)
- l.d $f26, 224($sp)
- l.d $f25, 216($sp)
- l.d $f24, 208($sp)
- l.d $f23, 200($sp)
- l.d $f22, 192($sp)
- l.d $f21, 184($sp)
- l.d $f20, 176($sp)
- l.d $f19, 168($sp)
- l.d $f18, 160($sp)
- l.d $f17, 152($sp)
- l.d $f16, 144($sp)
- l.d $f15, 136($sp)
- l.d $f14, 128($sp)
- l.d $f13, 120($sp)
- l.d $f12, 112($sp)
- l.d $f11, 104($sp)
- l.d $f10, 96($sp)
- l.d $f9, 88($sp)
- l.d $f8, 80($sp)
- l.d $f7, 72($sp)
- l.d $f6, 64($sp)
- l.d $f5, 56($sp)
- l.d $f4, 48($sp)
- l.d $f3, 40($sp)
- l.d $f2, 32($sp)
- l.d $f1, 24($sp)
- l.d $f0, 16($sp)
-
- // Restore core registers.
- .cpreturn
- ld $ra, 488($sp)
- .cfi_restore 31
- ld $s8, 480($sp)
- .cfi_restore 30
- ld $t9, 464($sp)
- .cfi_restore 25
- ld $t8, 456($sp)
- .cfi_restore 24
- ld $s7, 448($sp)
- .cfi_restore 23
- ld $s6, 440($sp)
- .cfi_restore 22
- ld $s5, 432($sp)
- .cfi_restore 21
- ld $s4, 424($sp)
- .cfi_restore 20
- ld $s3, 416($sp)
- .cfi_restore 19
- ld $s2, 408($sp)
- .cfi_restore 18
- ld $s1, 400($sp)
- .cfi_restore 17
- ld $s0, 392($sp)
- .cfi_restore 16
- ld $t3, 384($sp)
- .cfi_restore 15
- ld $t2, 376($sp)
- .cfi_restore 14
- ld $t1, 368($sp)
- .cfi_restore 13
- ld $t0, 360($sp)
- .cfi_restore 12
- ld $a7, 352($sp)
- .cfi_restore 11
- ld $a6, 344($sp)
- .cfi_restore 10
- ld $a5, 336($sp)
- .cfi_restore 9
- ld $a4, 328($sp)
- .cfi_restore 8
- ld $a3, 320($sp)
- .cfi_restore 7
- ld $a2, 312($sp)
- .cfi_restore 6
- ld $a1, 304($sp)
- .cfi_restore 5
- .if \restore_a0
- ld $a0, 296($sp)
- .cfi_restore 4
- .endif
- ld $v1, 288($sp)
- .cfi_restore 3
- ld $v0, 280($sp)
- .cfi_restore 2
- .set push
- .set noat
- ld $at, 272($sp)
- .cfi_restore 1
- .set pop
-
- daddiu $sp, $sp, 496
- .cfi_adjust_cfa_offset -496
-.endm
-
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_ when the runtime method frame is ready.
- * Requires $gp properly set up.
- */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
- dla $t9, artDeliverPendingExceptionFromCode
- jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-.endm
-
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_.
- */
-.macro DELIVER_PENDING_EXCEPTION
- SETUP_GP
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME # save callee saves for throw
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
-.macro RETURN_IF_NO_EXCEPTION
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $t0, $zero, 1f # success if no exception is pending
- nop
- jalr $zero, $ra
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_ZERO
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $v0, $zero, 1f # success?
- nop
- jalr $zero, $ra # return on success
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RESTORE_SAVE_REFS_ONLY_FRAME
- beq $v0, $zero, 1f # success?
- nop
- jalr $zero, $ra # return on success
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-
- /*
- * On stack replacement stub.
- * On entry:
- * a0 = stack to copy
- * a1 = size of stack
- * a2 = pc to call
- * a3 = JValue* result
- * a4 = shorty
- * a5 = thread
- */
-ENTRY art_quick_osr_stub
- move $t0, $sp # save stack pointer
- daddiu $t1, $sp, -112 # reserve stack space
- dsrl $t1, $t1, 4 # enforce 16 byte stack alignment
- dsll $sp, $t1, 4 # update stack pointer
-
- // Save callee general purpose registers, SP, T8(GP), RA, A3, and A4 (8x14 bytes)
- sd $ra, 104($sp)
- .cfi_rel_offset 31, 104
- sd $s8, 96($sp)
- .cfi_rel_offset 30, 96
- sd $t0, 88($sp) # save original stack pointer stored in t0
- .cfi_rel_offset 29, 88
- sd $t8, 80($sp) # t8 holds caller's gp, now save it to the stack.
- .cfi_rel_offset 28, 80 # Value from gp is pushed, so set the cfi offset accordingly.
- sd $s7, 72($sp)
- .cfi_rel_offset 23, 72
- sd $s6, 64($sp)
- .cfi_rel_offset 22, 64
- sd $s5, 56($sp)
- .cfi_rel_offset 21, 56
- sd $s4, 48($sp)
- .cfi_rel_offset 20, 48
- sd $s3, 40($sp)
- .cfi_rel_offset 19, 40
- sd $s2, 32($sp)
- .cfi_rel_offset 18, 32
- sd $s1, 24($sp)
- .cfi_rel_offset 17, 24
- sd $s0, 16($sp)
- .cfi_rel_offset 16, 16
- sd $a4, 8($sp)
- .cfi_rel_offset 8, 8
- sd $a3, 0($sp)
- .cfi_rel_offset 7, 0
- move rSELF, $a5 # Save managed thread pointer into rSELF
-
- daddiu $sp, $sp, -16
- jal .Losr_entry
- sd $zero, 0($sp) # Store null for ArtMethod* at bottom of frame
- daddiu $sp, $sp, 16
-
- // Restore return value address and shorty address
- ld $a4, 8($sp) # shorty address
- .cfi_restore 8
- ld $a3, 0($sp) # result value address
- .cfi_restore 7
-
- lbu $t1, 0($a4) # load return type
- li $t2, 'D' # put char 'D' into t2
- beq $t1, $t2, .Losr_fp_result # branch if result type char == 'D'
- li $t2, 'F' # put char 'F' into t2
- beq $t1, $t2, .Losr_fp_result # branch if result type char == 'F'
- nop
- b .Losr_exit
- dsrl $v1, $v0, 32 # put high half of result in v1
-.Losr_fp_result:
- mfc1 $v0, $f0
- mfhc1 $v1, $f0 # put high half of FP result in v1
-.Losr_exit:
- sw $v0, 0($a3) # store low half of result
- sw $v1, 4($a3) # store high half of result
-
- // Restore callee registers
- ld $ra, 104($sp)
- .cfi_restore 31
- ld $s8, 96($sp)
- .cfi_restore 30
- ld $t0, 88($sp) # save SP into t0 for now
- .cfi_restore 29
- ld $t8, 80($sp) # Restore gp back to it's temp storage.
- .cfi_restore 28
- ld $s7, 72($sp)
- .cfi_restore 23
- ld $s6, 64($sp)
- .cfi_restore 22
- ld $s5, 56($sp)
- .cfi_restore 21
- ld $s4, 48($sp)
- .cfi_restore 20
- ld $s3, 40($sp)
- .cfi_restore 19
- ld $s2, 32($sp)
- .cfi_restore 18
- ld $s1, 24($sp)
- .cfi_restore 17
- ld $s0, 16($sp)
- .cfi_restore 16
- jalr $zero, $ra
- move $sp, $t0
-
-.Losr_entry:
- dsubu $sp, $sp, $a1 # Reserve space for callee stack
- daddiu $a1, $a1, -8
- daddu $t0, $a1, $sp
- sw $ra, 0($t0) # Store low half of RA per compiler ABI
- dsrl $t1, $ra, 32
- sw $t1, 4($t0) # Store high half of RA per compiler ABI
-
- // Copy arguments into callee stack
- // Use simple copy routine for now.
- // 4 bytes per slot.
- // a0 = source address
- // a1 = args length in bytes (does not include 8 bytes for RA)
- // sp = destination address
- beqz $a1, .Losr_loop_exit
- daddiu $a1, $a1, -4
- daddu $t1, $a0, $a1
- daddu $t2, $sp, $a1
-.Losr_loop_entry:
- lw $t0, 0($t1)
- daddiu $t1, $t1, -4
- sw $t0, 0($t2)
- bne $sp, $t2, .Losr_loop_entry
- daddiu $t2, $t2, -4
-
-.Losr_loop_exit:
- move $t9, $a2
- jalr $zero, $t9 # Jump to the OSR entry point.
- nop
-END art_quick_osr_stub
-
- /*
- * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
- * FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
- */
-ENTRY_NO_GP art_quick_do_long_jump
- l.d $f0, 0($a1)
- l.d $f1, 8($a1)
- l.d $f2, 16($a1)
- l.d $f3, 24($a1)
- l.d $f4, 32($a1)
- l.d $f5, 40($a1)
- l.d $f6, 48($a1)
- l.d $f7, 56($a1)
- l.d $f8, 64($a1)
- l.d $f9, 72($a1)
- l.d $f10, 80($a1)
- l.d $f11, 88($a1)
- l.d $f12, 96($a1)
- l.d $f13, 104($a1)
- l.d $f14, 112($a1)
- l.d $f15, 120($a1)
- l.d $f16, 128($a1)
- l.d $f17, 136($a1)
- l.d $f18, 144($a1)
- l.d $f19, 152($a1)
- l.d $f20, 160($a1)
- l.d $f21, 168($a1)
- l.d $f22, 176($a1)
- l.d $f23, 184($a1)
- l.d $f24, 192($a1)
- l.d $f25, 200($a1)
- l.d $f26, 208($a1)
- l.d $f27, 216($a1)
- l.d $f28, 224($a1)
- l.d $f29, 232($a1)
- l.d $f30, 240($a1)
- l.d $f31, 248($a1)
- .set push
- .set nomacro
- .set noat
-# no need to load zero
- ld $at, 8($a0)
- .set pop
- ld $v0, 16($a0)
- ld $v1, 24($a0)
-# a0 has to be loaded last
- ld $a1, 40($a0)
- ld $a2, 48($a0)
- ld $a3, 56($a0)
- ld $a4, 64($a0)
- ld $a5, 72($a0)
- ld $a6, 80($a0)
- ld $a7, 88($a0)
- ld $t0, 96($a0)
- ld $t1, 104($a0)
- ld $t2, 112($a0)
- ld $t3, 120($a0)
- ld $s0, 128($a0)
- ld $s1, 136($a0)
- ld $s2, 144($a0)
- ld $s3, 152($a0)
- ld $s4, 160($a0)
- ld $s5, 168($a0)
- ld $s6, 176($a0)
- ld $s7, 184($a0)
- ld $t8, 192($a0)
- ld $t9, 200($a0)
-# no need to load k0, k1
- ld $gp, 224($a0)
- ld $sp, 232($a0)
- ld $s8, 240($a0)
- ld $ra, 248($a0)
- ld $a0, 32($a0)
- move $v0, $zero # clear result registers v0 and v1
- jalr $zero, $t9 # do long jump (do not use ra, it must not be clobbered)
- move $v1, $zero
-END art_quick_do_long_jump
-
- /*
- * Called by managed code, saves most registers (forms basis of long jump
- * context) and passes the bottom of the stack.
- * artDeliverExceptionFromCode will place the callee save Method* at
- * the bottom of the thread. On entry a0 holds Throwable*
- */
-ENTRY art_quick_deliver_exception
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- dla $t9, artDeliverExceptionFromCode
- jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
- move $a1, rSELF # pass Thread::Current
-END art_quick_deliver_exception
-
- /*
- * Called by managed code to create and deliver a NullPointerException
- */
- .extern artThrowNullPointerExceptionFromCode
-ENTRY_NO_GP art_quick_throw_null_pointer_exception
- // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
- // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
- SETUP_SAVE_EVERYTHING_FRAME
- dla $t9, artThrowNullPointerExceptionFromCode
- jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_null_pointer_exception
-
- /*
- * Call installed by a signal handler to create and deliver a NullPointerException
- */
- .extern artThrowNullPointerExceptionFromSignal
-ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING
- SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
- # Retrieve the fault address from the padding where the signal handler stores it.
- ld $a0, (__SIZEOF_POINTER__)($sp)
- dla $t9, artThrowNullPointerExceptionFromSignal
- jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*)
- move $a1, rSELF # pass Thread::Current
-END art_quick_throw_null_pointer_exception_from_signal
-
- /*
- * Called by managed code to create and deliver an ArithmeticException
- */
- .extern artThrowDivZeroFromCode
-ENTRY_NO_GP art_quick_throw_div_zero
- SETUP_SAVE_EVERYTHING_FRAME
- dla $t9, artThrowDivZeroFromCode
- jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_div_zero
-
- /*
- * Called by managed code to create and deliver an
- * ArrayIndexOutOfBoundsException
- */
- .extern artThrowArrayBoundsFromCode
-ENTRY_NO_GP art_quick_throw_array_bounds
- // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
- // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
- SETUP_SAVE_EVERYTHING_FRAME
- dla $t9, artThrowArrayBoundsFromCode
- jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_throw_array_bounds
-
- /*
- * Called by managed code to create and deliver a StringIndexOutOfBoundsException
- * as if thrown from a call to String.charAt().
- */
- .extern artThrowStringBoundsFromCode
-ENTRY_NO_GP art_quick_throw_string_bounds
- SETUP_SAVE_EVERYTHING_FRAME
- dla $t9, artThrowStringBoundsFromCode
- jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_throw_string_bounds
-
- /*
- * Called by managed code to create and deliver a StackOverflowError.
- */
- .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- dla $t9, artThrowStackOverflowFromCode
- jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
- move $a0, rSELF # pass Thread::Current
-END art_quick_throw_stack_overflow
-
- /*
- * All generated callsites for interface invokes and invocation slow paths will load arguments
- * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
- * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
- *
- * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
- * of the target Method* in $v0 and method->code_ in $v1.
- *
- * If unsuccessful, the helper will return null/null. There will be a pending exception in the
- * thread and we branch to another stub to deliver it.
- *
- * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
- * pointing back to the original caller.
- */
-.macro INVOKE_TRAMPOLINE_BODY cxx_name, save_s4_thru_s8_only=0
- .extern \cxx_name
- SETUP_SAVE_REFS_AND_ARGS_FRAME \save_s4_thru_s8_only # save callee saves in case
- # allocation triggers GC
- move $a2, rSELF # pass Thread::Current
- jal \cxx_name # (method_idx, this, Thread*, $sp)
- move $a3, $sp # pass $sp
- move $a0, $v0 # save target Method*
- move $t9, $v1 # save $v0->code_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- beq $v0, $zero, 1f
- nop
- jalr $zero, $t9
- nop
-1:
- DELIVER_PENDING_EXCEPTION
-.endm
-.macro INVOKE_TRAMPOLINE c_name, cxx_name
-ENTRY \c_name
- INVOKE_TRAMPOLINE_BODY \cxx_name
-END \c_name
-.endm
-
-INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
-
-INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
-
- # On entry:
- # t0 = shorty
- # t1 = ptr to arg_array
- # t2 = number of argument bytes remain
- # v0 = ptr to stack frame where to copy arg_array
- # This macro modifies t3, t9 and v0
-.macro LOOP_OVER_SHORTY_LOADING_REG gpu, fpu, label
- lbu $t3, 0($t0) # get argument type from shorty
- beqz $t3, \label
- daddiu $t0, 1
- li $t9, 68 # put char 'D' into t9
- beq $t9, $t3, 1f # branch if result type char == 'D'
- li $t9, 70 # put char 'F' into t9
- beq $t9, $t3, 2f # branch if result type char == 'F'
- li $t9, 74 # put char 'J' into t9
- beq $t9, $t3, 3f # branch if result type char == 'J'
- nop
- lw $\gpu, 0($t1)
- sw $\gpu, 0($v0)
- daddiu $v0, 4
- daddiu $t1, 4
- b 4f
- daddiu $t2, -4 # delay slot
-
-1: # found double
- lwu $t3, 0($t1)
- mtc1 $t3, $\fpu
- sw $t3, 0($v0)
- lwu $t3, 4($t1)
- mthc1 $t3, $\fpu
- sw $t3, 4($v0)
- daddiu $v0, 8
- daddiu $t1, 8
- b 4f
- daddiu $t2, -8 # delay slot
-
-2: # found float
- lwu $t3, 0($t1)
- mtc1 $t3, $\fpu
- sw $t3, 0($v0)
- daddiu $v0, 4
- daddiu $t1, 4
- b 4f
- daddiu $t2, -4 # delay slot
-
-3: # found long (8 bytes)
- lwu $t3, 0($t1)
- sw $t3, 0($v0)
- lwu $t9, 4($t1)
- sw $t9, 4($v0)
- dsll $t9, $t9, 32
- or $\gpu, $t9, $t3
- daddiu $v0, 8
- daddiu $t1, 8
- daddiu $t2, -8
-4:
-.endm
-
- /*
- * Invocation stub for quick code.
- * On entry:
- * a0 = method pointer
- * a1 = argument array that must at least contain the this ptr.
- * a2 = size of argument array in bytes
- * a3 = (managed) thread pointer
- * a4 = JValue* result
- * a5 = shorty
- */
-ENTRY_NO_GP art_quick_invoke_stub
- # push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra onto the stack
- daddiu $sp, $sp, -48
- .cfi_adjust_cfa_offset 48
- sd $ra, 40($sp)
- .cfi_rel_offset 31, 40
- sd $s8, 32($sp)
- .cfi_rel_offset 30, 32
- sd $s1, 24($sp)
- .cfi_rel_offset 17, 24
- sd $s0, 16($sp)
- .cfi_rel_offset 16, 16
- sd $a5, 8($sp)
- .cfi_rel_offset 9, 8
- sd $a4, 0($sp)
- .cfi_rel_offset 8, 0
-
- move $s1, $a3 # move managed thread pointer into s1 (rSELF)
- move $s8, $sp # save sp in s8 (fp)
-
- daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
- dsrl $t3, $t3, 4 # shift the frame size right 4
- dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
- dsubu $sp, $sp, $t3 # reserve stack space for argument array
-
- daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
- daddiu $t1, $a1, 4 # t1 = ptr to arg_array[4] (skip this ptr)
- daddiu $t2, $a2, -4 # t2 = number of argument bytes remain (skip this ptr)
- daddiu $v0, $sp, 12 # v0 points to where to copy arg_array
- LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn
- LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn
- LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn
- LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_fn
- LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_fn
- LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_fn
-
- # copy arguments onto stack (t2 should be multiples of 4)
- ble $t2, $zero, call_fn # t2 = number of argument bytes remain
-1:
- lw $t3, 0($t1) # load from argument array
- daddiu $t1, $t1, 4
- sw $t3, 0($v0) # save to stack
- daddiu $t2, -4
- bgt $t2, $zero, 1b # t2 = number of argument bytes remain
- daddiu $v0, $v0, 4
-
-call_fn:
- # call method (a0 and a1 have been untouched)
- lwu $a1, 0($a1) # make a1 = this ptr
- sw $a1, 8($sp) # copy this ptr (skip 8 bytes for ArtMethod*)
- sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
- ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
- jalr $t9 # call the method
- nop
- move $sp, $s8 # restore sp
-
- # pop a4, a5, s1(rSELF), s8, ra off of the stack
- ld $a4, 0($sp)
- .cfi_restore 8
- ld $a5, 8($sp)
- .cfi_restore 9
- ld $s0, 16($sp)
- .cfi_restore 16
- ld $s1, 24($sp)
- .cfi_restore 17
- ld $s8, 32($sp)
- .cfi_restore 30
- ld $ra, 40($sp)
- .cfi_restore 31
- daddiu $sp, $sp, 48
- .cfi_adjust_cfa_offset -48
-
- # a4 = JValue* result
- # a5 = shorty string
- lbu $t1, 0($a5) # get result type from shorty
- li $t2, 68 # put char 'D' into t2
- beq $t1, $t2, 1f # branch if result type char == 'D'
- li $t3, 70 # put char 'F' into t3
- beq $t1, $t3, 1f # branch if result type char == 'F'
- sw $v0, 0($a4) # store the result
- dsrl $v1, $v0, 32
- jalr $zero, $ra
- sw $v1, 4($a4) # store the other half of the result
-1:
- mfc1 $v0, $f0
- mfhc1 $v1, $f0
- sw $v0, 0($a4) # store the result
- jalr $zero, $ra
- sw $v1, 4($a4) # store the other half of the result
-END art_quick_invoke_stub
-
- /*
- * Invocation static stub for quick code.
- * On entry:
- * a0 = method pointer
- * a1 = argument array that must at least contain the this ptr.
- * a2 = size of argument array in bytes
- * a3 = (managed) thread pointer
- * a4 = JValue* result
- * a5 = shorty
- */
-ENTRY_NO_GP art_quick_invoke_static_stub
-
- # push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra, onto the stack
- daddiu $sp, $sp, -48
- .cfi_adjust_cfa_offset 48
- sd $ra, 40($sp)
- .cfi_rel_offset 31, 40
- sd $s8, 32($sp)
- .cfi_rel_offset 30, 32
- sd $s1, 24($sp)
- .cfi_rel_offset 17, 24
- sd $s0, 16($sp)
- .cfi_rel_offset 16, 16
- sd $a5, 8($sp)
- .cfi_rel_offset 9, 8
- sd $a4, 0($sp)
- .cfi_rel_offset 8, 0
-
- move $s1, $a3 # move managed thread pointer into s1 (rSELF)
- move $s8, $sp # save sp in s8 (fp)
-
- daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
- dsrl $t3, $t3, 4 # shift the frame size right 4
- dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
- dsubu $sp, $sp, $t3 # reserve stack space for argument array
-
- daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
- move $t1, $a1 # t1 = arg_array
- move $t2, $a2 # t2 = number of argument bytes remain
- daddiu $v0, $sp, 8 # v0 points to where to copy arg_array
- LOOP_OVER_SHORTY_LOADING_REG a1, f13, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_sfn
- LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_sfn
-
- # copy arguments onto stack (t2 should be multiples of 4)
- ble $t2, $zero, call_sfn # t2 = number of argument bytes remain
-1:
- lw $t3, 0($t1) # load from argument array
- daddiu $t1, $t1, 4
- sw $t3, 0($v0) # save to stack
- daddiu $t2, -4
- bgt $t2, $zero, 1b # t2 = number of argument bytes remain
- daddiu $v0, $v0, 4
-
-call_sfn:
- # call method (a0 has been untouched)
- sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
- ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
- jalr $t9 # call the method
- nop
- move $sp, $s8 # restore sp
-
- # pop a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra off of the stack
- ld $a4, 0($sp)
- .cfi_restore 8
- ld $a5, 8($sp)
- .cfi_restore 9
- ld $s0, 16($sp)
- .cfi_restore 16
- ld $s1, 24($sp)
- .cfi_restore 17
- ld $s8, 32($sp)
- .cfi_restore 30
- ld $ra, 40($sp)
- .cfi_restore 31
- daddiu $sp, $sp, 48
- .cfi_adjust_cfa_offset -48
-
- # a4 = JValue* result
- # a5 = shorty string
- lbu $t1, 0($a5) # get result type from shorty
- li $t2, 68 # put char 'D' into t2
- beq $t1, $t2, 1f # branch if result type char == 'D'
- li $t3, 70 # put char 'F' into t3
- beq $t1, $t3, 1f # branch if result type char == 'F'
- sw $v0, 0($a4) # store the result
- dsrl $v1, $v0, 32
- jalr $zero, $ra
- sw $v1, 4($a4) # store the other half of the result
-1:
- mfc1 $v0, $f0
- mfhc1 $v1, $f0
- sw $v0, 0($a4) # store the result
- jalr $zero, $ra
- sw $v1, 4($a4) # store the other half of the result
-END art_quick_invoke_static_stub
-
- /*
- * Entry from managed code that calls artHandleFillArrayDataFromCode and
- * delivers exception on failure.
- */
- .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_handle_fill_data
-
- /*
- * Entry from managed code that calls artLockObjectFromCode, may block for GC.
- */
- .extern artLockObjectFromCode
-ENTRY_NO_GP art_quick_lock_object
- beqzc $a0, art_quick_throw_null_pointer_exception
- li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
- li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_lock:
- lw $t0, THREAD_ID_OFFSET(rSELF) # TODO: Can the thread ID really change during the loop?
- ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- and $t2, $t1, $t3 # zero the gc bits
- bnezc $t2, .Lnot_unlocked # already thin locked
- # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
- or $t2, $t1, $t0 # $t2 holds thread id with count of 0 with preserved read barrier bits
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqzc $t2, .Lretry_lock # store failed, retry
- sync # full (LoadLoad|LoadStore) memory barrier
- jic $ra, 0
-.Lnot_unlocked:
- # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
- srl $t2, $t1, LOCK_WORD_STATE_SHIFT
- bnezc $t2, .Lslow_lock # if either of the top two bits are set, go slow path
- xor $t2, $t1, $t0 # lock_word.ThreadId() ^ self->ThreadId()
- andi $t2, $t2, 0xFFFF # zero top 16 bits
- bnezc $t2, .Lslow_lock # lock word and self thread id's match -> recursive lock
- # otherwise contention, go to slow path
- and $t2, $t1, $t3 # zero the gc bits
- addu $t2, $t2, $t8 # increment count in lock word
- srl $t2, $t2, LOCK_WORD_STATE_SHIFT # if the first gc state bit is set, we overflowed.
- bnezc $t2, .Lslow_lock # if we overflow the count go slow path
- addu $t2, $t1, $t8 # increment count for real
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqzc $t2, .Lretry_lock # store failed, retry
- nop
- jic $ra, 0
-.Lslow_lock:
- .cpsetup $t9, $t8, art_quick_lock_object
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
- jal artLockObjectFromCode # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_lock_object
-
-ENTRY_NO_GP art_quick_lock_object_no_inline
- beq $a0, $zero, art_quick_throw_null_pointer_exception
- nop
- .cpsetup $t9, $t8, art_quick_lock_object_no_inline
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
- jal artLockObjectFromCode # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_lock_object_no_inline
-
- /*
- * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
- */
- .extern artUnlockObjectFromCode
-ENTRY_NO_GP art_quick_unlock_object
- beqzc $a0, art_quick_throw_null_pointer_exception
- li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
- li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_unlock:
-#ifndef USE_READ_BARRIER
- lw $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0) # Need to use atomic read-modify-write for read barrier
-#endif
- srl $t2, $t1, LOCK_WORD_STATE_SHIFT
- bnezc $t2, .Lslow_unlock # if either of the top two bits are set, go slow path
- lw $t0, THREAD_ID_OFFSET(rSELF)
- and $t2, $t1, $t3 # zero the gc bits
- xor $t2, $t2, $t0 # lock_word.ThreadId() ^ self->ThreadId()
- andi $t2, $t2, 0xFFFF # zero top 16 bits
- bnezc $t2, .Lslow_unlock # do lock word and self thread id's match?
- and $t2, $t1, $t3 # zero the gc bits
- bgeuc $t2, $t8, .Lrecursive_thin_unlock
- # transition to unlocked
- nor $t2, $zero, $t3 # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
- and $t2, $t1, $t2 # $t2: zero except for the preserved gc bits
- sync # full (LoadStore|StoreStore) memory barrier
-#ifndef USE_READ_BARRIER
- sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqzc $t2, .Lretry_unlock # store failed, retry
- nop
-#endif
- jic $ra, 0
-.Lrecursive_thin_unlock:
- # t1: original lock word
- subu $t2, $t1, $t8 # decrement count
-#ifndef USE_READ_BARRIER
- sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
- sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
- beqzc $t2, .Lretry_unlock # store failed, retry
- nop
-#endif
- jic $ra, 0
-.Lslow_unlock:
- .cpsetup $t9, $t8, art_quick_unlock_object
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- jal artUnlockObjectFromCode # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_unlock_object
-
-ENTRY_NO_GP art_quick_unlock_object_no_inline
- beq $a0, $zero, art_quick_throw_null_pointer_exception
- nop
- .cpsetup $t9, $t8, art_quick_unlock_object_no_inline
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
- jal artUnlockObjectFromCode # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_unlock_object_no_inline
-
- /*
- * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
- */
- .extern artInstanceOfFromCode
- .extern artThrowClassCastExceptionForObject
-ENTRY art_quick_check_instance_of
- // Type check using the bit string passes null as the target class. In that case just throw.
- beqzc $a1, .Lthrow_class_cast_exception_for_bitstring_check
-
- daddiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sd $ra, 24($sp)
- .cfi_rel_offset 31, 24
- sd $t9, 16($sp)
- sd $a1, 8($sp)
- sd $a0, 0($sp)
- jal artInstanceOfFromCode
- .cpreturn # Restore gp from t8 in branch delay slot.
- # t8 may be clobbered in artIsAssignableFromCode.
- beq $v0, $zero, .Lthrow_class_cast_exception
- ld $ra, 24($sp)
- jalr $zero, $ra
- daddiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception:
- ld $t9, 16($sp)
- ld $a1, 8($sp)
- ld $a0, 0($sp)
- daddiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception_for_bitstring_check:
- SETUP_GP
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- dla $t9, artThrowClassCastExceptionForObject
- jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_check_instance_of
-
-
- /*
- * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
- * nReg is the register number for rReg.
- */
-.macro POP_REG_NE rReg, nReg, offset, rExclude
- .ifnc \rReg, \rExclude
- ld \rReg, \offset($sp) # restore rReg
- .cfi_restore \nReg
- .endif
-.endm
-
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER rDest, rObj, offset
-#ifdef USE_READ_BARRIER
- # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned.
- daddiu $sp, $sp, -64
- .cfi_adjust_cfa_offset 64
- sd $ra, 56($sp)
- .cfi_rel_offset 31, 56
- sd $t9, 48($sp)
- .cfi_rel_offset 25, 48
- sd $t1, 40($sp)
- .cfi_rel_offset 13, 40
- sd $t0, 32($sp)
- .cfi_rel_offset 12, 32
- sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B)
- .cfi_rel_offset 6, 16
- sd $a1, 8($sp)
- .cfi_rel_offset 5, 8
- sd $a0, 0($sp)
- .cfi_rel_offset 4, 0
-
- # move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
- .ifnc \rObj, $a1
- move $a1, \rObj # pass rObj
- .endif
- daddiu $a2, $zero, \offset # pass offset
- jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
- .cpreturn # Restore gp from t8 in branch delay slot.
- # t8 may be clobbered in artReadBarrierSlow.
- # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
- move \rDest, $v0 # save return value in rDest
- # (rDest cannot be v0 in art_quick_aput_obj)
-
- ld $a0, 0($sp) # restore registers except rDest
- # (rDest can only be t0 or t1 in art_quick_aput_obj)
- .cfi_restore 4
- ld $a1, 8($sp)
- .cfi_restore 5
- ld $a2, 16($sp)
- .cfi_restore 6
- POP_REG_NE $t0, 12, 32, \rDest
- POP_REG_NE $t1, 13, 40, \rDest
- ld $t9, 48($sp)
- .cfi_restore 25
- ld $ra, 56($sp) # restore $ra
- .cfi_restore 31
- daddiu $sp, $sp, 64
- .cfi_adjust_cfa_offset -64
- SETUP_GP # set up gp because we are not returning
-#else
- lwu \rDest, \offset(\rObj)
- UNPOISON_HEAP_REF \rDest
-#endif // USE_READ_BARRIER
-.endm
-
-ENTRY art_quick_aput_obj
- beq $a2, $zero, .Ldo_aput_null
- nop
- READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
- bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
- nop
-.Ldo_aput:
- dsll $a1, $a1, 2
- daddu $t0, $a0, $a1
- POISON_HEAP_REF $a2
- sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
- ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
- dsrl $t1, $a0, CARD_TABLE_CARD_SHIFT
- daddu $t1, $t1, $t0
- sb $t0, ($t1)
- jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
-.Ldo_aput_null:
- dsll $a1, $a1, 2
- daddu $t0, $a0, $a1
- sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
- jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
-.Lcheck_assignability:
- daddiu $sp, $sp, -64
- .cfi_adjust_cfa_offset 64
- sd $ra, 56($sp)
- .cfi_rel_offset 31, 56
- sd $t9, 24($sp)
- sd $a2, 16($sp)
- sd $a1, 8($sp)
- sd $a0, 0($sp)
- move $a1, $t1
- move $a0, $t0
- jal artIsAssignableFromCode # (Class*, Class*)
- .cpreturn # Restore gp from t8 in branch delay slot.
- # t8 may be clobbered in artIsAssignableFromCode.
- ld $ra, 56($sp)
- ld $t9, 24($sp)
- ld $a2, 16($sp)
- ld $a1, 8($sp)
- ld $a0, 0($sp)
- daddiu $sp, $sp, 64
- .cfi_adjust_cfa_offset -64
- SETUP_GP
- bne $v0, $zero, .Ldo_aput
- nop
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- move $a1, $a2
- dla $t9, artThrowArrayStoreException
- jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
- move $a2, rSELF # pass Thread::Current
-END art_quick_aput_obj
-
-// Macros taking opportunity of code similarities for downcalls.
-.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- dla $t9, \entrypoint
- jalr $t9 # (field_idx, Thread*)
- move $a1, rSELF # pass Thread::Current
- .if \extend
- sll $v0, $v0, 0 # sign-extend 32-bit result
- .endif
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- dla $t9, \entrypoint
- jalr $t9 # (field_idx, Object*, Thread*) or
- # (field_idx, new_val, Thread*)
- move $a2, rSELF # pass Thread::Current
- .if \extend
- sll $v0, $v0, 0 # sign-extend 32-bit result
- .endif
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- dla $t9, \entrypoint
- jalr $t9 # (field_idx, Object*, new_val, Thread*)
- move $a3, rSELF # pass Thread::Current
- .if \extend
- sll $v0, $v0, 0 # sign-extend 32-bit result
- .endif
- \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
- /*
- * Called by managed code to resolve a static/instance field and load/store a value.
- *
- * Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
- * defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
- */
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro ONE_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- jal \entrypoint
- move $a1, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro TWO_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- jal \entrypoint
- move $a2, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-.macro THREE_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- jal \entrypoint
- move $a3, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-.macro FOUR_ARG_DOWNCALL name, entrypoint, return
- .extern \entrypoint
-ENTRY \name
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- jal \entrypoint
- move $a4, rSELF # pass Thread::Current
- \return
-END \name
-.endm
-
-// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-// Comment out allocators that have mips64 specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-// A hand-written override for:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
-ENTRY_NO_GP \c_name
- # Fast path rosalloc allocation
- # a0: type
- # s1: Thread::Current
- # -----------------------------
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # a4: thread stack bottom offset
- # v0: free list head
- #
- # a5, a6 : temps
- ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
- ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
- bgeuc $t3, $a4, .Lslow_path_\c_name
-
- lwu $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
- li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation. Also does the initialized
- # and finalizable checks.
- # When isInitialized == 0, then the class is potentially not yet initialized.
- # If the class is not yet initialized, the object size will be very large to force the branch
- # below to be taken.
- #
- # See InitializeClassVisitors in class-inl.h for more details.
- bltuc $a5, $t1, .Lslow_path_\c_name
-
- # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
- # combine the two shifts together.
- dsrl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
-
- daddu $t2, $t1, $s1
- ld $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
- ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqzc $v0, .Lslow_path_\c_name
-
- # Load the next pointer of the head and update the list head with the next pointer.
- ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
- sw $v0, 0($t3)
- daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
- lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $a5, $a5, -1
- sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
-.if \isInitialized == 0
- # This barrier is only necessary when the allocation also requires a class initialization check.
- #
- # If the class is already observably initialized, then new-instance allocations are protected
- # from publishing by the compiler which inserts its own StoreStore barrier.
- sync # Fence.
-.endif
- jic $ra, 0
-
-.Lslow_path_\c_name:
- SETUP_GP
- SETUP_SAVE_REFS_ONLY_FRAME
- jal \cxx_name
- move $a1 ,$s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \c_name
-.endm
-
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-//
-// a0: type, s1(rSELF): Thread::Current
-// Need to preserve a0 to the slow path.
-//
-// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
-// If isInitialized=0 the compiler can only assume it's been at least resolved.
-.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
- ld $v0, THREAD_LOCAL_POS_OFFSET(rSELF) # Load thread_local_pos.
- ld $a2, THREAD_LOCAL_END_OFFSET(rSELF) # Load thread_local_end.
- lwu $t0, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load the object size.
- daddu $a3, $v0, $t0 # Add object size to tlab pos.
-
- # When isInitialized == 0, then the class is potentially not yet initialized.
- # If the class is not yet initialized, the object size will be very large to force the branch
- # below to be taken.
- #
- # See InitializeClassVisitors in class-inl.h for more details.
- bltuc $a2, $a3, \slowPathLabel # Check if it fits, overflow works since the
- # tlab pos and end are 32 bit values.
- # "Point of no slow path". Won't go to the slow path from here on.
- sd $a3, THREAD_LOCAL_POS_OFFSET(rSELF) # Store new thread_local_pos.
- ld $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF) # Increment thread_local_objects.
- daddiu $a2, $a2, 1
- sd $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0) # Store the class pointer.
-
-.if \isInitialized == 0
- # This barrier is only necessary when the allocation also requires a class initialization check.
- #
- # If the class is already observably initialized, then new-instance allocations are protected
- # from publishing by the compiler which inserts its own StoreStore barrier.
- sync # Fence.
-.endif
- jic $ra, 0
-.endm
-
-// The common code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-.macro GENERATE_ALLOC_OBJECT_TLAB name, entrypoint, isInitialized
-ENTRY_NO_GP \name
- # Fast path tlab allocation.
- # a0: type, s1(rSELF): Thread::Current.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path_\name, \isInitialized
-.Lslow_path_\name:
- SETUP_GP
- SETUP_SAVE_REFS_ONLY_FRAME # Save callee saves in case of GC.
- jal \entrypoint # (mirror::Class*, Thread*)
- move $a1, rSELF # Pass Thread::Current.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
-// and art_quick_alloc_array_resolved/initialized_region_tlab.
-//
-// a0: type, a1: component_count, a2: total_size, s1(rSELF): Thread::Current.
-// Need to preserve a0 and a1 to the slow path.
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
- dli $a3, OBJECT_ALIGNMENT_MASK_TOGGLED64 # Apply alignemnt mask (addr + 7) & ~7.
- and $a2, $a2, $a3 # The mask must be 64 bits to keep high
- # bits in case of overflow.
- # Negative sized arrays are handled here since a1 holds a zero extended 32 bit value.
- # Negative ints become large 64 bit unsigned ints which will always be larger than max signed
- # 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int.
- dli $a3, MIN_LARGE_OBJECT_THRESHOLD
- bgeuc $a2, $a3, \slowPathLabel # Possibly a large object, go slow path.
-
- ld $v0, THREAD_LOCAL_POS_OFFSET(rSELF) # Load thread_local_pos.
- ld $t1, THREAD_LOCAL_END_OFFSET(rSELF) # Load thread_local_end.
- dsubu $t2, $t1, $v0 # Compute the remaining buffer size.
- bltuc $t2, $a2, \slowPathLabel # Check tlab for space, note that we use
- # (end - begin) to handle negative size
- # arrays. It is assumed that a negative size
- # will always be greater unsigned than region
- # size.
-
- # "Point of no slow path". Won't go to the slow path from here on.
- daddu $a2, $v0, $a2 # Add object size to tlab pos.
- sd $a2, THREAD_LOCAL_POS_OFFSET(rSELF) # Store new thread_local_pos.
- ld $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF) # Increment thread_local_objects.
- daddiu $a2, $a2, 1
- sd $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
- POISON_HEAP_REF $a0
- sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0) # Store the class pointer.
- sw $a1, MIRROR_ARRAY_LENGTH_OFFSET($v0) # Store the array length.
-
- jic $ra, 0
-.endm
-
-.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
-ENTRY_NO_GP \name
- # Fast path array allocation for region tlab allocation.
- # a0: mirror::Class* type
- # a1: int32_t component_count
- # s1(rSELF): Thread::Current
- dext $a4, $a1, 0, 32 # Create zero-extended component_count. Value
- # in a1 is preserved in a case of slow path.
- \size_setup .Lslow_path_\name
- ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path_\name
-.Lslow_path_\name:
- # a0: mirror::Class* type
- # a1: int32_t component_count
- # a2: Thread* self
- SETUP_GP
- SETUP_SAVE_REFS_ONLY_FRAME # Save callee saves in case of GC.
- jal \entrypoint
- move $a2, rSELF # Pass Thread::Current.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
- # Array classes are never finalizable or uninitialized, no need to check.
- lwu $a3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($a0) # Load component type.
- UNPOISON_HEAP_REF $a3
- lw $a3, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET($a3)
- dsrl $a3, $a3, PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT # Component size shift is in high 16 bits.
- dsllv $a2, $a4, $a3 # Calculate data size.
- # Add array data offset and alignment.
- daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
-
- daddiu $a3, $a3, 1 # Add 4 to the length only if the component
- andi $a3, $a3, 4 # size shift is 3 (for 64 bit alignment).
- daddu $a2, $a2, $a3
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_8 slow_path
- # Add array data offset and alignment.
- daddiu $a2, $a4, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_16 slow_path
- dsll $a2, $a4, 1
- # Add array data offset and alignment.
- daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_32 slow_path
- dsll $a2, $a4, 2
- # Add array data offset and alignment.
- daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_64 slow_path
- dsll $a2, $a4, 3
- # Add array data offset and alignment.
- daddiu $a2, $a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-
- /*
- * Macro for resolution and initialization of indexed DEX file
- * constants such as classes and strings. $a0 is both input and
- * output.
- */
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
- .extern \entrypoint
-ENTRY_NO_GP \name
- SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset # Save everything in case of GC.
- dla $t9, \entrypoint
- jalr $t9 # (uint32_t index, Thread*)
- move $a1, rSELF # Pass Thread::Current (in delay slot).
- beqz $v0, 1f # Success?
- move $a0, $v0 # Move result to $a0 (in delay slot).
- RESTORE_SAVE_EVERYTHING_FRAME 0 # Restore everything except $a0.
- jic $ra, 0 # Return on success.
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END \name
-.endm
-
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint
- ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
-.endm
-
- /*
- * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
- * index. On success the MethodHandle is returned, otherwise an exception is raised.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
-
- /*
- * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
- * On success the MethodType is returned, otherwise an exception is raised.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
-
- /*
- * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. A0 holds the string index. The fast
- * path check for hit in strings cache has already been performed.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
-
- /*
- * Entry from managed code when uninitialized static storage, this stub will run the class
- * initializer and deliver the exception on error. On success the static storage base is
- * returned.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-
- /*
- * Entry from managed code when dex cache misses for a type_idx.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
-
- /*
- * Entry from managed code when type_idx needs to be checked for access and dex cache may also
- * miss.
- */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
-
- /*
- * Called by managed code when the value in rSUSPEND has been decremented to 0.
- */
- .extern artTestSuspendFromCode
-ENTRY_NO_GP art_quick_test_suspend
- SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
- # save everything for stack crawl
- jal artTestSuspendFromCode # (Thread*)
- move $a0, rSELF
- RESTORE_SAVE_EVERYTHING_FRAME
- jalr $zero, $ra
- nop
-END art_quick_test_suspend
-
- /*
- * Called by managed code that is attempting to call a method on a proxy class. On entry
- * r0 holds the proxy method; r1, r2 and r3 may contain arguments.
- */
- .extern artQuickProxyInvokeHandler
-ENTRY art_quick_proxy_invoke_handler
- SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- move $a2, rSELF # pass Thread::Current
- jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $t0, $zero, 1f
- dmtc1 $v0, $f0 # place return value to FP return value
- jalr $zero, $ra
- dmtc1 $v1, $f1 # place return value to FP return value
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_proxy_invoke_handler
-
- /*
- * Called to resolve an imt conflict.
- * a0 is the conflict ArtMethod.
- * t0 is a hidden argument that holds the target interface method's dex method index.
- *
- * Mote that this stub writes to v0-v1, a0, t0-t3, t8-t9, f0-f11, f20-f23.
- */
- .extern artLookupResolvedMethod
- .extern __atomic_load_16 # For __int128_t std::atomic::load(std::memory_order).
-ENTRY art_quick_imt_conflict_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL /* save_s4_thru_s8 */ 0
-
- ld $t1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp) # $t1 = referrer.
- // If the method is obsolete, just go through the dex cache miss slow path.
- // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
- lw $t9, ART_METHOD_ACCESS_FLAGS_OFFSET($t1) # $t9 = access flags.
- sll $t9, $t9, 31 - ACC_OBSOLETE_METHOD_SHIFT # Move obsolete method bit to sign bit.
- bltzc $t9, .Limt_conflict_trampoline_dex_cache_miss
- lwu $t1, ART_METHOD_DECLARING_CLASS_OFFSET($t1) # $t1 = declaring class (no read barrier).
- lwu $t1, MIRROR_CLASS_DEX_CACHE_OFFSET($t1) # $t1 = dex cache (without read barrier).
- UNPOISON_HEAP_REF $t1
- dla $t9, __atomic_load_16
- ld $t1, MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET($t1) # $t1 = dex cache methods array.
-
- dext $s2, $t0, 0, 32 # $s2 = zero-extended method index
- # (callee-saved).
- ld $s3, ART_METHOD_JNI_OFFSET_64($a0) # $s3 = ImtConflictTable (callee-saved).
-
- dext $t0, $t0, 0, METHOD_DEX_CACHE_HASH_BITS # $t0 = slot index.
-
- li $a1, STD_MEMORY_ORDER_RELAXED # $a1 = std::memory_order_relaxed.
- jalr $t9 # [$v0, $v1] = __atomic_load_16($a0, $a1).
- dlsa $a0, $t0, $t1, POINTER_SIZE_SHIFT + 1 # $a0 = DexCache method slot address.
-
- bnec $v1, $s2, .Limt_conflict_trampoline_dex_cache_miss # Branch if method index miss.
-
-.Limt_table_iterate:
- ld $t1, 0($s3) # Load next entry in ImtConflictTable.
- # Branch if found.
- beq $t1, $v0, .Limt_table_found
- nop
- # If the entry is null, the interface method is not in the ImtConflictTable.
- beqzc $t1, .Lconflict_trampoline
- # Iterate over the entries of the ImtConflictTable.
- daddiu $s3, $s3, 2 * __SIZEOF_POINTER__ # Iterate to the next entry.
- bc .Limt_table_iterate
-
-.Limt_table_found:
- # We successfully hit an entry in the table. Load the target method and jump to it.
- .cfi_remember_state
- ld $a0, __SIZEOF_POINTER__($s3)
- ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0)
- RESTORE_SAVE_REFS_AND_ARGS_FRAME /* restore_s4_thru_s8 */ 0
- jic $t9, 0
- .cfi_restore_state
-
-.Lconflict_trampoline:
- # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
- .cfi_remember_state
- RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1 # Restore this.
- move $a0, $v0 # Load interface method.
- INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline, /* save_s4_thru_s8_only */ 1
- .cfi_restore_state
-
-.Limt_conflict_trampoline_dex_cache_miss:
- # We're not creating a proper runtime method frame here,
- # artLookupResolvedMethod() is not allowed to walk the stack.
- dla $t9, artLookupResolvedMethod
- ld $a1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp) # $a1 = referrer.
- jalr $t9 # (uint32_t method_index, ArtMethod* referrer).
- sll $a0, $s2, 0 # $a0 = sign-extended method index.
-
- # If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
- beqzc $v0, .Lconflict_trampoline
- nop
- bc .Limt_table_iterate
-END art_quick_imt_conflict_trampoline
-
- .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a2, rSELF # pass Thread::Current
- jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
- beq $v0, $zero, 1f
- ld $a0, 0($sp) # load resolved method in $a0
- # artQuickResolutionTrampoline puts resolved method in *SP
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
- jalr $zero, $t9 # tail call to method
- nop
-1:
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- DELIVER_PENDING_EXCEPTION
-END art_quick_resolution_trampoline
-
- .extern artQuickGenericJniTrampoline
- .extern artQuickGenericJniEndTrampoline
-ENTRY art_quick_generic_jni_trampoline
- SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
- move $s8, $sp # save $sp
-
- # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
- move $a0, rSELF # pass Thread::Current
- move $a1, $sp # pass $sp
- jal artQuickGenericJniTrampoline # (Thread*, SP)
- daddiu $sp, $sp, -5120 # reserve space on the stack
-
- # The C call will have registered the complete save-frame on success.
- # The result of the call is:
- # v0: ptr to native code, 0 on error.
- # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
- beq $v0, $zero, 1f # check entry error
- move $t9, $v0 # save the code ptr
- move $sp, $v1 # release part of the alloca
-
- # Load parameters from stack into registers
- ld $a0, 0($sp)
- ld $a1, 8($sp)
- ld $a2, 16($sp)
- ld $a3, 24($sp)
- ld $a4, 32($sp)
- ld $a5, 40($sp)
- ld $a6, 48($sp)
- ld $a7, 56($sp)
- # Load FPRs the same as GPRs. Look at BuildNativeCallFrameStateMachine.
- l.d $f12, 0($sp)
- l.d $f13, 8($sp)
- l.d $f14, 16($sp)
- l.d $f15, 24($sp)
- l.d $f16, 32($sp)
- l.d $f17, 40($sp)
- l.d $f18, 48($sp)
- l.d $f19, 56($sp)
- jalr $t9 # native call
- daddiu $sp, $sp, 64
-
- # result sign extension is handled in C code
- # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
- move $a0, rSELF # pass Thread::Current
- move $a1, $v0
- jal artQuickGenericJniEndTrampoline
- dmfc1 $a2, $f0
-
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- bne $t0, $zero, 1f # check for pending exceptions
- move $sp, $s8 # tear down the alloca
-
- # tear dpown the callee-save frame
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
-
- jalr $zero, $ra
- dmtc1 $v0, $f0 # place return value to FP return value
-
-1:
- ld $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
- daddiu $sp, $t0, -1 // Remove the GenericJNI tag.
- # This will create a new save-all frame, required by the runtime.
- DELIVER_PENDING_EXCEPTION
-END art_quick_generic_jni_trampoline
-
- .extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a1, rSELF # pass Thread::Current
- jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
- move $a2, $sp # pass $sp
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $t0, $zero, 1f
- dmtc1 $v0, $f0 # place return value to FP return value
- jalr $zero, $ra
- dmtc1 $v1, $f1 # place return value to FP return value
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_to_interpreter_bridge
-
- .extern artInvokeObsoleteMethod
-ENTRY art_invoke_obsolete_method_stub
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- jal artInvokeObsoleteMethod # (Method* method, Thread* self)
- move $a1, rSELF # pass Thread::Current
-END art_invoke_obsolete_method_stub
-
- /*
- * Routine that intercepts method calls and returns.
- */
- .extern artInstrumentationMethodEntryFromCode
- .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- # Preserve $a0 knowing there is a spare slot in kSaveRefsAndArgs.
- sd $a0, 8($sp) # Save arg0.
- move $a3, $sp # Pass $sp.
- jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP)
- move $a2, rSELF # pass Thread::Current
- beqzc $v0, .Ldeliver_instrumentation_entry_exception
- # Deliver exception if we got nullptr as function.
- move $t9, $v0 # $t9 holds reference to code
- ld $a0, 8($sp) # Restore arg0.
- dla $v0, art_quick_instrumentation_exit
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- move $ra, $v0
- jic $t9, 0 # call method, returning to art_quick_instrumentation_exit
-.Ldeliver_instrumentation_entry_exception:
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- DELIVER_PENDING_EXCEPTION
-END art_quick_instrumentation_entry
-
-ENTRY_NO_GP art_quick_instrumentation_exit
- move $ra, $zero # RA points here, so clobber with 0 for later checks.
- SETUP_SAVE_EVERYTHING_FRAME
-
- daddiu $a3, $sp, 16 # Pass fpr_res pointer ($f0 in SAVE_EVERYTHING_FRAME).
- daddiu $a2, $sp, 280 # Pass gpr_res pointer ($v0 in SAVE_EVERYTHING_FRAME).
- move $a1, $sp # Pass $sp.
- jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res*, fpr_res*)
- move $a0, rSELF # pass Thread::Current
-
- beqzc $v0, .Ldo_deliver_instrumentation_exception
- # Deliver exception if we got nullptr as function.
- nop
- bnez $v1, .Ldeoptimize
-
- # Normal return.
- sd $v0, (FRAME_SIZE_SAVE_EVERYTHING-8)($sp) # Set return pc.
- RESTORE_SAVE_EVERYTHING_FRAME
- jic $ra, 0
-.Ldo_deliver_instrumentation_exception:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-.Ldeoptimize:
- b art_quick_deoptimize
- sd $v1, (FRAME_SIZE_SAVE_EVERYTHING-8)($sp)
- # Fake a call from instrumentation return pc.
-END art_quick_instrumentation_exit
-
- /*
- * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
- * will long jump to the upcall with a special exception of -1.
- */
- .extern artDeoptimize
-ENTRY_NO_GP_CUSTOM_CFA art_quick_deoptimize, FRAME_SIZE_SAVE_EVERYTHING
- # SETUP_SAVE_EVERYTHING_FRAME has been done by art_quick_instrumentation_exit.
- .cfi_rel_offset 31, 488
- .cfi_rel_offset 30, 480
- .cfi_rel_offset 28, 472
- .cfi_rel_offset 25, 464
- .cfi_rel_offset 24, 456
- .cfi_rel_offset 23, 448
- .cfi_rel_offset 22, 440
- .cfi_rel_offset 21, 432
- .cfi_rel_offset 20, 424
- .cfi_rel_offset 19, 416
- .cfi_rel_offset 18, 408
- .cfi_rel_offset 17, 400
- .cfi_rel_offset 16, 392
- .cfi_rel_offset 15, 384
- .cfi_rel_offset 14, 376
- .cfi_rel_offset 13, 368
- .cfi_rel_offset 12, 360
- .cfi_rel_offset 11, 352
- .cfi_rel_offset 10, 344
- .cfi_rel_offset 9, 336
- .cfi_rel_offset 8, 328
- .cfi_rel_offset 7, 320
- .cfi_rel_offset 6, 312
- .cfi_rel_offset 5, 304
- .cfi_rel_offset 4, 296
- .cfi_rel_offset 3, 288
- .cfi_rel_offset 2, 280
- .cfi_rel_offset 1, 272
-
- jal artDeoptimize # artDeoptimize(Thread*)
- move $a0, rSELF # pass Thread::current
- break
-END art_quick_deoptimize
-
- /*
- * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
- * will long jump to the upcall with a special exception of -1.
- */
- .extern artDeoptimizeFromCompiledCode
-ENTRY_NO_GP art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_EVERYTHING_FRAME
- jal artDeoptimizeFromCompiledCode # (DeoptimizationKind, Thread*)
- move $a1, rSELF # pass Thread::current
-END art_quick_deoptimize_from_compiled_code
-
- .set push
- .set noat
-/* java.lang.String.compareTo(String anotherString) */
-ENTRY_NO_GP art_quick_string_compareto
-/* $a0 holds address of "this" */
-/* $a1 holds address of "anotherString" */
- move $a2, $zero
- beq $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object
- move $a3, $zero # return 0 (it returns a2 - a3)
-
-#if (STRING_COMPRESSION_FEATURE)
- lw $a4, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
- lw $a5, MIRROR_STRING_COUNT_OFFSET($a1) # 'count' field of anotherString
- sra $a2, $a4, 1 # this.length()
- sra $a3, $a5, 1 # anotherString.length()
-#else
- lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
-#endif
-
- MINu $t2, $a2, $a3
- # $t2 now holds min(this.length(),anotherString.length())
-
- # while min(this.length(),anotherString.length())-i != 0
- beqzc $t2, .Lstring_compareto_length_diff # if $t2==0
- # return (this.length() - anotherString.length())
-
-#if (STRING_COMPRESSION_FEATURE)
- # Differ cases:
- dext $a6, $a4, 0, 1
- beqz $a6, .Lstring_compareto_this_is_compressed
- dext $a6, $a5, 0, 1 # In branch delay slot.
- beqz $a6, .Lstring_compareto_that_is_compressed
- nop
- b .Lstring_compareto_both_not_compressed
- nop
-
-.Lstring_compareto_this_is_compressed:
- beqzc $a6, .Lstring_compareto_both_compressed
- /* If (this->IsCompressed() && that->IsCompressed() == false) */
-.Lstring_compareto_loop_comparison_this_compressed:
- lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bnec $t0, $t1, .Lstring_compareto_char_diff
- daddiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_loop_comparison_this_compressed
- daddiu $a1, $a1, 2 # point at anotherString.charAt(i++) - uncompressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_that_is_compressed:
- lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bnec $t0, $t1, .Lstring_compareto_char_diff
- daddiu $a0, $a0, 2 # point at this.charAt(i++) - uncompressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_that_is_compressed
- daddiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_both_compressed:
- lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0)
- lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bnec $t0, $t1, .Lstring_compareto_char_diff
- daddiu $a0, $a0, 1 # point at this.charAt(i++) - compressed
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_both_compressed
- daddiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-#endif
-
-.Lstring_compareto_both_not_compressed:
- lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
- lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
- bnec $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
- # return (this.charAt(i) - anotherString.charAt(i))
- daddiu $a0, $a0, 2 # point at this.charAt(i++)
- subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i
- bnez $t2, .Lstring_compareto_both_not_compressed
- daddiu $a1, $a1, 2 # point at anotherString.charAt(i++)
-
-.Lstring_compareto_length_diff:
- jalr $zero, $ra
- subu $v0, $a2, $a3 # return (this.length() - anotherString.length())
-
-.Lstring_compareto_char_diff:
- jalr $zero, $ra
- subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
-END art_quick_string_compareto
-
-/* java.lang.String.indexOf(int ch, int fromIndex=0) */
-ENTRY_NO_GP art_quick_indexof
-/* $a0 holds address of "this" */
-/* $a1 holds "ch" */
-/* $a2 holds "fromIndex" */
-#if (STRING_COMPRESSION_FEATURE)
- lw $a3, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this
-#else
- lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
-#endif
- slt $at, $a2, $zero # if fromIndex < 0
- seleqz $a2, $a2, $at # fromIndex = 0;
-#if (STRING_COMPRESSION_FEATURE)
- srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
-#endif
- subu $t0, $t0, $a2 # this.length() - fromIndex
- blez $t0, 6f # if this.length()-fromIndex <= 0
- li $v0, -1 # return -1;
-
-#if (STRING_COMPRESSION_FEATURE)
- dext $a3, $a3, 0, 1 # Extract compression flag.
- beqzc $a3, .Lstring_indexof_compressed
-#endif
-
- sll $v0, $a2, 1 # $a0 += $a2 * 2
- daddu $a0, $a0, $v0 # " ditto "
- move $v0, $a2 # Set i to fromIndex.
-
-1:
- lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
- beq $t3, $a1, 6f # return i;
- daddu $a0, $a0, 2 # i++
- subu $t0, $t0, 1 # this.length() - i
- bnez $t0, 1b # while this.length() - i > 0
- addu $v0, $v0, 1 # i++
-
- li $v0, -1 # if this.length() - i <= 0
- # return -1;
-
-6:
- j $ra
- nop
-
-#if (STRING_COMPRESSION_FEATURE)
-.Lstring_indexof_compressed:
- move $a4, $a0 # Save a copy in $a4 to later compute result.
- daddu $a0, $a0, $a2 # $a0 += $a2
-
-.Lstring_indexof_compressed_loop:
- lbu $t3, MIRROR_STRING_VALUE_OFFSET($a0)
- beq $t3, $a1, .Lstring_indexof_compressed_matched
- subu $t0, $t0, 1
- bgtz $t0, .Lstring_indexof_compressed_loop
- daddu $a0, $a0, 1
-
-.Lstring_indexof_nomatch:
- jalr $zero, $ra
- li $v0, -1 # return -1;
-
-.Lstring_indexof_compressed_matched:
- jalr $zero, $ra
- dsubu $v0, $a0, $a4 # return (current - start);
-#endif
-END art_quick_indexof
-
- .extern artStringBuilderAppend
-ENTRY art_quick_string_builder_append
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- dla $t9, artStringBuilderAppend
- daddiu $a1, $sp, FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__ # pass args
- jalr $t9 # (uint32_t, const unit32_t*, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_string_builder_append
-
- /*
- * Create a function `name` calling the ReadBarrier::Mark routine,
- * getting its argument and returning its result through register
- * `reg`, saving and restoring all caller-save registers.
- */
-.macro READ_BARRIER_MARK_REG name, reg
-ENTRY \name
- // Null check so that we can load the lock word.
- bnezc \reg, .Lnot_null_\name
- nop
-.Lret_rb_\name:
- jic $ra, 0
-.Lnot_null_\name:
- // Check lock word for mark bit, if marked return.
- lw $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET(\reg)
- .set push
- .set noat
- sll $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT # Move mark bit to sign bit.
- bltzc $at, .Lret_rb_\name
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
- // The below code depends on the lock word state being in the highest bits
- // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
- // Test that both the forwarding state bits are 1.
- sll $at, $t9, 1
- and $at, $at, $t9 # Sign bit = 1 IFF both bits are 1.
- bltzc $at, .Lret_forwarding_address\name
- .set pop
-
- daddiu $sp, $sp, -320
- .cfi_adjust_cfa_offset 320
-
- sd $ra, 312($sp)
- .cfi_rel_offset 31, 312
- sd $t8, 304($sp) # save t8 holding caller's gp
- .cfi_rel_offset 24, 304
- sd $t3, 296($sp)
- .cfi_rel_offset 15, 296
- sd $t2, 288($sp)
- .cfi_rel_offset 14, 288
- sd $t1, 280($sp)
- .cfi_rel_offset 13, 280
- sd $t0, 272($sp)
- .cfi_rel_offset 12, 272
- sd $a7, 264($sp)
- .cfi_rel_offset 11, 264
- sd $a6, 256($sp)
- .cfi_rel_offset 10, 256
- sd $a5, 248($sp)
- .cfi_rel_offset 9, 248
- sd $a4, 240($sp)
- .cfi_rel_offset 8, 240
- sd $a3, 232($sp)
- .cfi_rel_offset 7, 232
- sd $a2, 224($sp)
- .cfi_rel_offset 6, 224
- sd $a1, 216($sp)
- .cfi_rel_offset 5, 216
- sd $a0, 208($sp)
- .cfi_rel_offset 4, 208
- sd $v1, 200($sp)
- .cfi_rel_offset 3, 200
- sd $v0, 192($sp)
- .cfi_rel_offset 2, 192
-
- dla $t9, artReadBarrierMark
-
- sdc1 $f23, 184($sp)
- sdc1 $f22, 176($sp)
- sdc1 $f21, 168($sp)
- sdc1 $f20, 160($sp)
- sdc1 $f19, 152($sp)
- sdc1 $f18, 144($sp)
- sdc1 $f17, 136($sp)
- sdc1 $f16, 128($sp)
- sdc1 $f15, 120($sp)
- sdc1 $f14, 112($sp)
- sdc1 $f13, 104($sp)
- sdc1 $f12, 96($sp)
- sdc1 $f11, 88($sp)
- sdc1 $f10, 80($sp)
- sdc1 $f9, 72($sp)
- sdc1 $f8, 64($sp)
- sdc1 $f7, 56($sp)
- sdc1 $f6, 48($sp)
- sdc1 $f5, 40($sp)
- sdc1 $f4, 32($sp)
- sdc1 $f3, 24($sp)
- sdc1 $f2, 16($sp)
- sdc1 $f1, 8($sp)
-
- .ifnc \reg, $a0
- move $a0, \reg # pass obj from `reg` in a0
- .endif
- jalr $t9 # v0 <- artReadBarrierMark(obj)
- sdc1 $f0, 0($sp) # in delay slot
-
- ld $ra, 312($sp)
- .cfi_restore 31
- ld $t8, 304($sp) # restore t8 holding caller's gp
- .cfi_restore 24
- ld $t3, 296($sp)
- .cfi_restore 15
- ld $t2, 288($sp)
- .cfi_restore 14
- ld $t1, 280($sp)
- .cfi_restore 13
- ld $t0, 272($sp)
- .cfi_restore 12
- ld $a7, 264($sp)
- .cfi_restore 11
- ld $a6, 256($sp)
- .cfi_restore 10
- ld $a5, 248($sp)
- .cfi_restore 9
- ld $a4, 240($sp)
- .cfi_restore 8
- ld $a3, 232($sp)
- .cfi_restore 7
- ld $a2, 224($sp)
- .cfi_restore 6
- ld $a1, 216($sp)
- .cfi_restore 5
- ld $a0, 208($sp)
- .cfi_restore 4
- ld $v1, 200($sp)
- .cfi_restore 3
-
- .ifnc \reg, $v0
- move \reg, $v0 # `reg` <- v0
- ld $v0, 192($sp)
- .cfi_restore 2
- .endif
-
- ldc1 $f23, 184($sp)
- ldc1 $f22, 176($sp)
- ldc1 $f21, 168($sp)
- ldc1 $f20, 160($sp)
- ldc1 $f19, 152($sp)
- ldc1 $f18, 144($sp)
- ldc1 $f17, 136($sp)
- ldc1 $f16, 128($sp)
- ldc1 $f15, 120($sp)
- ldc1 $f14, 112($sp)
- ldc1 $f13, 104($sp)
- ldc1 $f12, 96($sp)
- ldc1 $f11, 88($sp)
- ldc1 $f10, 80($sp)
- ldc1 $f9, 72($sp)
- ldc1 $f8, 64($sp)
- ldc1 $f7, 56($sp)
- ldc1 $f6, 48($sp)
- ldc1 $f5, 40($sp)
- ldc1 $f4, 32($sp)
- ldc1 $f3, 24($sp)
- ldc1 $f2, 16($sp)
- ldc1 $f1, 8($sp)
- ldc1 $f0, 0($sp)
-
- .cpreturn # restore caller's gp from t8
- jalr $zero, $ra
- daddiu $sp, $sp, 320
- .cfi_adjust_cfa_offset -320
-
-.Lret_forwarding_address\name:
- // Shift left by the forwarding address shift. This clears out the state bits since they are
- // in the top 2 bits of the lock word.
- sll \reg, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
- jalr $zero, $ra
- dext \reg, \reg, 0, 32 # Make sure the address is zero-extended.
-END \name
-.endm
-
-// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
-// ZERO (register 0) is reserved.
-// AT (register 1) is reserved as a temporary/scratch register.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $a4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $a5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $a6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $a7
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t2
-// T3 (register 15) is reserved as a temporary/scratch register.
-// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
-// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
-// K0, K1, GP, SP (registers 26 - 29) are reserved.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
-// RA (register 31) is reserved.
-
-// Caller code:
-// Short constant offset/index:
-// ld $t9, pReadBarrierMarkReg00
-// beqzc $t9, skip_call
-// nop
-// jialc $t9, thunk_disp
-// skip_call:
-// lwu `out`, ofs(`obj`)
-// [dsubu `out`, $zero, `out`
-// dext `out`, `out`, 0, 32] # Unpoison reference.
-.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
- # Explicit null check. May be redundant (for array elements or when the field
- # offset is larger than the page size, 4KB).
- # $ra will be adjusted to point to lwu's stack map when throwing NPE.
- beqzc \obj, .Lintrospection_throw_npe
- lapc $t3, .Lintrospection_exits # $t3 = address of .Lintrospection_exits.
- .set push
- .set noat
- lw $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
- sll $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT # Move barrier state bit
- # to sign bit.
- bltz $at, .Lintrospection_field_array # If gray, load reference, mark.
- move $t8, \obj # Move `obj` to $t8 for common code.
- .set pop
- jalr $zero, $ra # Otherwise, load-load barrier and return.
- sync
-.endm
-
-// Caller code:
-// Long constant offset/index: | Variable index:
-// ld $t9, pReadBarrierMarkReg00
-// beqz $t9, skip_call | beqz $t9, skip_call
-// daui $t8, `obj`, ofs_hi | dlsa $t8, `index`, `obj`, 2
-// jialc $t9, thunk_disp | jialc $t9, thunk_disp
-// skip_call: | skip_call:
-// lwu `out`, ofs_lo($t8) | lwu `out`, ofs($t8)
-// [dsubu `out`, $zero, `out` | [dsubu `out`, $zero, `out`
-// dext `out`, `out`, 0, 32] | dext `out`, `out`, 0, 32] # Unpoison reference.
-.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
- # No explicit null check for variable indices or large constant indices/offsets
- # as it must have been done earlier.
- lapc $t3, .Lintrospection_exits # $t3 = address of .Lintrospection_exits.
- .set push
- .set noat
- lw $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
- sll $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT # Move barrier state bit
- # to sign bit.
- bltzc $at, .Lintrospection_field_array # If gray, load reference, mark.
- .set pop
- sync # Otherwise, load-load barrier and return.
- jic $ra, 0
- break # Padding to 8 instructions.
- break
-.endm
-
-.macro BRB_GC_ROOT_ENTRY root
- lapc $t3, .Lintrospection_exit_\root # $t3 = exit point address.
- bnez \root, .Lintrospection_common
- move $t8, \root # Move reference to $t8 for common code.
- jic $ra, 0 # Return if null.
-.endm
-
-.macro BRB_FIELD_EXIT out
-.Lintrospection_exit_\out:
- jalr $zero, $ra
- move \out, $t8 # Return reference in expected register.
-.endm
-
-.macro BRB_FIELD_EXIT_BREAK
- break
- break
-.endm
-
-ENTRY_NO_GP art_quick_read_barrier_mark_introspection
- # Entry points for offsets/indices not fitting into int16_t and for variable indices.
- BRB_FIELD_LONG_OFFSET_ENTRY $v0
- BRB_FIELD_LONG_OFFSET_ENTRY $v1
- BRB_FIELD_LONG_OFFSET_ENTRY $a0
- BRB_FIELD_LONG_OFFSET_ENTRY $a1
- BRB_FIELD_LONG_OFFSET_ENTRY $a2
- BRB_FIELD_LONG_OFFSET_ENTRY $a3
- BRB_FIELD_LONG_OFFSET_ENTRY $a4
- BRB_FIELD_LONG_OFFSET_ENTRY $a5
- BRB_FIELD_LONG_OFFSET_ENTRY $a6
- BRB_FIELD_LONG_OFFSET_ENTRY $a7
- BRB_FIELD_LONG_OFFSET_ENTRY $t0
- BRB_FIELD_LONG_OFFSET_ENTRY $t1
- BRB_FIELD_LONG_OFFSET_ENTRY $t2
- BRB_FIELD_LONG_OFFSET_ENTRY $s2
- BRB_FIELD_LONG_OFFSET_ENTRY $s3
- BRB_FIELD_LONG_OFFSET_ENTRY $s4
- BRB_FIELD_LONG_OFFSET_ENTRY $s5
- BRB_FIELD_LONG_OFFSET_ENTRY $s6
- BRB_FIELD_LONG_OFFSET_ENTRY $s7
- BRB_FIELD_LONG_OFFSET_ENTRY $s8
-
- # Entry points for offsets/indices fitting into int16_t.
- BRB_FIELD_SHORT_OFFSET_ENTRY $v0
- BRB_FIELD_SHORT_OFFSET_ENTRY $v1
- BRB_FIELD_SHORT_OFFSET_ENTRY $a0
- BRB_FIELD_SHORT_OFFSET_ENTRY $a1
- BRB_FIELD_SHORT_OFFSET_ENTRY $a2
- BRB_FIELD_SHORT_OFFSET_ENTRY $a3
- BRB_FIELD_SHORT_OFFSET_ENTRY $a4
- BRB_FIELD_SHORT_OFFSET_ENTRY $a5
- BRB_FIELD_SHORT_OFFSET_ENTRY $a6
- BRB_FIELD_SHORT_OFFSET_ENTRY $a7
- BRB_FIELD_SHORT_OFFSET_ENTRY $t0
- BRB_FIELD_SHORT_OFFSET_ENTRY $t1
- BRB_FIELD_SHORT_OFFSET_ENTRY $t2
- BRB_FIELD_SHORT_OFFSET_ENTRY $s2
- BRB_FIELD_SHORT_OFFSET_ENTRY $s3
- BRB_FIELD_SHORT_OFFSET_ENTRY $s4
- BRB_FIELD_SHORT_OFFSET_ENTRY $s5
- BRB_FIELD_SHORT_OFFSET_ENTRY $s6
- BRB_FIELD_SHORT_OFFSET_ENTRY $s7
- BRB_FIELD_SHORT_OFFSET_ENTRY $s8
-
- .global art_quick_read_barrier_mark_introspection_gc_roots
-art_quick_read_barrier_mark_introspection_gc_roots:
- # Entry points for GC roots.
- BRB_GC_ROOT_ENTRY $v0
- BRB_GC_ROOT_ENTRY $v1
- BRB_GC_ROOT_ENTRY $a0
- BRB_GC_ROOT_ENTRY $a1
- BRB_GC_ROOT_ENTRY $a2
- BRB_GC_ROOT_ENTRY $a3
- BRB_GC_ROOT_ENTRY $a4
- BRB_GC_ROOT_ENTRY $a5
- BRB_GC_ROOT_ENTRY $a6
- BRB_GC_ROOT_ENTRY $a7
- BRB_GC_ROOT_ENTRY $t0
- BRB_GC_ROOT_ENTRY $t1
- BRB_GC_ROOT_ENTRY $t2
- BRB_GC_ROOT_ENTRY $s2
- BRB_GC_ROOT_ENTRY $s3
- BRB_GC_ROOT_ENTRY $s4
- BRB_GC_ROOT_ENTRY $s5
- BRB_GC_ROOT_ENTRY $s6
- BRB_GC_ROOT_ENTRY $s7
- BRB_GC_ROOT_ENTRY $s8
- .global art_quick_read_barrier_mark_introspection_end_of_entries
-art_quick_read_barrier_mark_introspection_end_of_entries:
-
-.Lintrospection_throw_npe:
- b art_quick_throw_null_pointer_exception
- daddiu $ra, $ra, 4 # Skip lwu, make $ra point to lwu's stack map.
-
- .set push
- .set noat
-
- // Fields and array elements.
-
-.Lintrospection_field_array:
- // Get the field/element address using $t8 and the offset from the lwu instruction.
- lh $at, 0($ra) # $ra points to lwu: $at = low 16 bits of field/element offset.
- daddiu $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE # Skip lwu(+dsubu+dext).
- daddu $t8, $t8, $at # $t8 = field/element address.
-
- // Calculate the address of the exit point, store it in $t3 and load the reference into $t8.
- lb $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra) # $ra-HEAP_POISON_INSTR_SIZE-4 points to
- # "lwu `out`, ...".
- andi $at, $at, 31 # Extract `out` from lwu.
-
- lwu $t8, 0($t8) # $t8 = reference.
- UNPOISON_HEAP_REF $t8
-
- // Return if null reference.
- bnez $t8, .Lintrospection_common
- dlsa $t3, $at, $t3, 3 # $t3 = address of the exit point
- # (BRB_FIELD_EXIT* macro is 8 bytes).
-
- // Early return through the exit point.
-.Lintrospection_return_early:
- jic $t3, 0 # Move $t8 to `out` and return.
-
- // Code common for GC roots, fields and array elements.
-
-.Lintrospection_common:
- // Check lock word for mark bit, if marked return.
- lw $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
- sll $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT # Move mark bit to sign bit.
- bltzc $at, .Lintrospection_return_early
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
- // The below code depends on the lock word state being in the highest bits
- // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
- // Test that both the forwarding state bits are 1.
- sll $at, $t9, 1
- and $at, $at, $t9 # Sign bit = 1 IFF both bits are 1.
- bgezc $at, .Lintrospection_mark
-
- .set pop
-
- // Shift left by the forwarding address shift. This clears out the state bits since they are
- // in the top 2 bits of the lock word.
- sll $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
- jalr $zero, $t3 # Move $t8 to `out` and return.
- dext $t8, $t8, 0, 32 # Make sure the address is zero-extended.
-
-.Lintrospection_mark:
- // Partially set up the stack frame preserving only $ra.
- daddiu $sp, $sp, -320
- .cfi_adjust_cfa_offset 320
- sd $ra, 312($sp)
- .cfi_rel_offset 31, 312
-
- // Set up $gp, clobbering $ra.
- lapc $ra, 1f
-1:
- .cpsetup $ra, 304, 1b # Save old $gp in 304($sp).
-
- // Finalize the stack frame and call.
- sd $t3, 296($sp) # Preserve the exit point address.
- sd $t2, 288($sp)
- .cfi_rel_offset 14, 288
- sd $t1, 280($sp)
- .cfi_rel_offset 13, 280
- sd $t0, 272($sp)
- .cfi_rel_offset 12, 272
- sd $a7, 264($sp)
- .cfi_rel_offset 11, 264
- sd $a6, 256($sp)
- .cfi_rel_offset 10, 256
- sd $a5, 248($sp)
- .cfi_rel_offset 9, 248
- sd $a4, 240($sp)
- .cfi_rel_offset 8, 240
- sd $a3, 232($sp)
- .cfi_rel_offset 7, 232
- sd $a2, 224($sp)
- .cfi_rel_offset 6, 224
- sd $a1, 216($sp)
- .cfi_rel_offset 5, 216
- sd $a0, 208($sp)
- .cfi_rel_offset 4, 208
- sd $v1, 200($sp)
- .cfi_rel_offset 3, 200
- sd $v0, 192($sp)
- .cfi_rel_offset 2, 192
-
- dla $t9, artReadBarrierMark
-
- sdc1 $f23, 184($sp)
- sdc1 $f22, 176($sp)
- sdc1 $f21, 168($sp)
- sdc1 $f20, 160($sp)
- sdc1 $f19, 152($sp)
- sdc1 $f18, 144($sp)
- sdc1 $f17, 136($sp)
- sdc1 $f16, 128($sp)
- sdc1 $f15, 120($sp)
- sdc1 $f14, 112($sp)
- sdc1 $f13, 104($sp)
- sdc1 $f12, 96($sp)
- sdc1 $f11, 88($sp)
- sdc1 $f10, 80($sp)
- sdc1 $f9, 72($sp)
- sdc1 $f8, 64($sp)
- sdc1 $f7, 56($sp)
- sdc1 $f6, 48($sp)
- sdc1 $f5, 40($sp)
- sdc1 $f4, 32($sp)
- sdc1 $f3, 24($sp)
- sdc1 $f2, 16($sp)
- sdc1 $f1, 8($sp)
- sdc1 $f0, 0($sp)
-
- jalr $t9 # $v0 <- artReadBarrierMark(reference)
- move $a0, $t8 # Pass reference in $a0.
- move $t8, $v0
-
- ld $ra, 312($sp)
- .cfi_restore 31
- .cpreturn # Restore old $gp from 304($sp).
- ld $t3, 296($sp) # $t3 = address of the exit point.
- ld $t2, 288($sp)
- .cfi_restore 14
- ld $t1, 280($sp)
- .cfi_restore 13
- ld $t0, 272($sp)
- .cfi_restore 12
- ld $a7, 264($sp)
- .cfi_restore 11
- ld $a6, 256($sp)
- .cfi_restore 10
- ld $a5, 248($sp)
- .cfi_restore 9
- ld $a4, 240($sp)
- .cfi_restore 8
- ld $a3, 232($sp)
- .cfi_restore 7
- ld $a2, 224($sp)
- .cfi_restore 6
- ld $a1, 216($sp)
- .cfi_restore 5
- ld $a0, 208($sp)
- .cfi_restore 4
- ld $v1, 200($sp)
- .cfi_restore 3
- ld $v0, 192($sp)
- .cfi_restore 2
-
- ldc1 $f23, 184($sp)
- ldc1 $f22, 176($sp)
- ldc1 $f21, 168($sp)
- ldc1 $f20, 160($sp)
- ldc1 $f19, 152($sp)
- ldc1 $f18, 144($sp)
- ldc1 $f17, 136($sp)
- ldc1 $f16, 128($sp)
- ldc1 $f15, 120($sp)
- ldc1 $f14, 112($sp)
- ldc1 $f13, 104($sp)
- ldc1 $f12, 96($sp)
- ldc1 $f11, 88($sp)
- ldc1 $f10, 80($sp)
- ldc1 $f9, 72($sp)
- ldc1 $f8, 64($sp)
- ldc1 $f7, 56($sp)
- ldc1 $f6, 48($sp)
- ldc1 $f5, 40($sp)
- ldc1 $f4, 32($sp)
- ldc1 $f3, 24($sp)
- ldc1 $f2, 16($sp)
- ldc1 $f1, 8($sp)
- ldc1 $f0, 0($sp)
-
- // Return through the exit point.
- jalr $zero, $t3 # Move $t8 to `out` and return.
- daddiu $sp, $sp, 320
- .cfi_adjust_cfa_offset -320
-
-.Lintrospection_exits:
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $v0
- BRB_FIELD_EXIT $v1
- BRB_FIELD_EXIT $a0
- BRB_FIELD_EXIT $a1
- BRB_FIELD_EXIT $a2
- BRB_FIELD_EXIT $a3
- BRB_FIELD_EXIT $a4
- BRB_FIELD_EXIT $a5
- BRB_FIELD_EXIT $a6
- BRB_FIELD_EXIT $a7
- BRB_FIELD_EXIT $t0
- BRB_FIELD_EXIT $t1
- BRB_FIELD_EXIT $t2
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $s2
- BRB_FIELD_EXIT $s3
- BRB_FIELD_EXIT $s4
- BRB_FIELD_EXIT $s5
- BRB_FIELD_EXIT $s6
- BRB_FIELD_EXIT $s7
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT_BREAK
- BRB_FIELD_EXIT $s8
- BRB_FIELD_EXIT_BREAK
-END art_quick_read_barrier_mark_introspection
-
- /*
- * Polymorphic method invocation.
- * On entry:
- * a0 = unused
- * a1 = receiver
- */
-.extern artInvokePolymorphic
-ENTRY art_quick_invoke_polymorphic
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a0, $a1 # Make $a0 the receiver
- move $a1, rSELF # Make $a1 an alias for the current Thread.
- jal artInvokePolymorphic # artInvokePolymorphic(receiver, Thread*, context)
- move $a2, $sp # Make $a3 a pointer to the saved frame context.
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $t0, $zero, 1f
- dmtc1 $v0, $f0 # place return value to FP return value
- jalr $zero, $ra
- dmtc1 $v1, $f1 # place return value to FP return value
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
-
- /*
- * InvokeCustom invocation.
- * On entry:
- * a0 = call_site_idx
- */
-.extern artInvokeCustom
-ENTRY art_quick_invoke_custom
- SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a1, rSELF # Make $a1 an alias for the current Thread.
- jal artInvokeCustom # Call artInvokeCustom(call_site_idx, Thread*, context).
- move $a2, $sp # Make $a1 a pointer to the saved frame context.
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_SAVE_REFS_ONLY_FRAME
- bne $t0, $zero, 1f
- dmtc1 $v0, $f0 # place return value to FP return value
- jalr $zero, $ra
- dmtc1 $v1, $f1 # place return value to FP return value
-1:
- DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
- .set pop
diff --git a/runtime/arch/mips64/registers_mips64.cc b/runtime/arch/mips64/registers_mips64.cc
deleted file mode 100644
index 1ee2cdd..0000000
--- a/runtime/arch/mips64/registers_mips64.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "registers_mips64.h"
-
-#include <ostream>
-
-namespace art {
-namespace mips64 {
-
-static const char* kRegisterNames[] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra",
-};
-
-std::ostream& operator<<(std::ostream& os, const GpuRegister& rhs) {
- if (rhs >= ZERO && rhs < kNumberOfGpuRegisters) {
- os << kRegisterNames[rhs];
- } else {
- os << "GpuRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs) {
- if (rhs >= F0 && rhs < kNumberOfFpuRegisters) {
- os << "f" << static_cast<int>(rhs);
- } else {
- os << "FpuRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
- if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
- os << "w" << static_cast<int>(rhs);
- } else {
- os << "VectorRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-} // namespace mips64
-} // namespace art
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
deleted file mode 100644
index 1c22c07..0000000
--- a/runtime/arch/mips64/registers_mips64.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
-
-#include <iosfwd>
-
-#include "base/macros.h"
-
-namespace art {
-namespace mips64 {
-
-enum GpuRegister {
- ZERO = 0,
- AT = 1, // Assembler temporary.
- V0 = 2, // Values.
- V1 = 3,
- A0 = 4, // Arguments.
- A1 = 5,
- A2 = 6,
- A3 = 7,
- A4 = 8,
- A5 = 9,
- A6 = 10,
- A7 = 11,
- T0 = 12, // Temporaries.
- T1 = 13,
- T2 = 14,
- T3 = 15,
- S0 = 16, // Saved values.
- S1 = 17,
- S2 = 18,
- S3 = 19,
- S4 = 20,
- S5 = 21,
- S6 = 22,
- S7 = 23,
- T8 = 24, // More temporaries.
- T9 = 25,
- K0 = 26, // Reserved for trap handler.
- K1 = 27,
- GP = 28, // Global pointer.
- SP = 29, // Stack pointer.
- S8 = 30, // Saved value/frame pointer.
- RA = 31, // Return address.
- TR = S1, // ART Thread Register
- TMP = T8, // scratch register (in addition to AT)
- TMP2 = T3, // scratch register (in addition to AT, reserved for assembler)
- kNumberOfGpuRegisters = 32,
- kNoGpuRegister = -1 // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const GpuRegister& rhs);
-
-// Values for floating point registers.
-enum FpuRegister {
- F0 = 0,
- F1 = 1,
- F2 = 2,
- F3 = 3,
- F4 = 4,
- F5 = 5,
- F6 = 6,
- F7 = 7,
- F8 = 8,
- F9 = 9,
- F10 = 10,
- F11 = 11,
- F12 = 12,
- F13 = 13,
- F14 = 14,
- F15 = 15,
- F16 = 16,
- F17 = 17,
- F18 = 18,
- F19 = 19,
- F20 = 20,
- F21 = 21,
- F22 = 22,
- F23 = 23,
- F24 = 24,
- F25 = 25,
- F26 = 26,
- F27 = 27,
- F28 = 28,
- F29 = 29,
- F30 = 30,
- F31 = 31,
- FTMP = F8, // scratch register
- FTMP2 = F9, // scratch register (in addition to FTMP, reserved for MSA instructions)
- kNumberOfFpuRegisters = 32,
- kNoFpuRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs);
-
-// Values for vector registers.
-enum VectorRegister {
- W0 = 0,
- W1 = 1,
- W2 = 2,
- W3 = 3,
- W4 = 4,
- W5 = 5,
- W6 = 6,
- W7 = 7,
- W8 = 8,
- W9 = 9,
- W10 = 10,
- W11 = 11,
- W12 = 12,
- W13 = 13,
- W14 = 14,
- W15 = 15,
- W16 = 16,
- W17 = 17,
- W18 = 18,
- W19 = 19,
- W20 = 20,
- W21 = 21,
- W22 = 22,
- W23 = 23,
- W24 = 24,
- W25 = 25,
- W26 = 26,
- W27 = 27,
- W28 = 28,
- W29 = 29,
- W30 = 30,
- W31 = 31,
- kNumberOfVectorRegisters = 32,
- kNoVectorRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
-
-} // namespace mips64
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
diff --git a/runtime/arch/mips64/thread_mips64.cc b/runtime/arch/mips64/thread_mips64.cc
deleted file mode 100644
index c1c390b..0000000
--- a/runtime/arch/mips64/thread_mips64.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "thread.h"
-
-#include <android-base/logging.h>
-
-#include "asm_support_mips64.h"
-#include "base/enums.h"
-
-namespace art {
-
-void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
-}
-
-void Thread::CleanupCpu() {
- // Do nothing.
-}
-
-} // namespace art
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c82b445..2b47cef 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -322,138 +322,6 @@
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
"memory");
-#elif defined(__mips__) && !defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
- "addiu $sp, $sp, -64\n\t"
- "sw $a0, 0($sp)\n\t"
- "sw $a1, 4($sp)\n\t"
- "sw $a2, 8($sp)\n\t"
- "sw $a3, 12($sp)\n\t"
- "sw $t0, 16($sp)\n\t"
- "sw $t1, 20($sp)\n\t"
- "sw $t2, 24($sp)\n\t"
- "sw $t3, 28($sp)\n\t"
- "sw $t4, 32($sp)\n\t"
- "sw $t5, 36($sp)\n\t"
- "sw $t6, 40($sp)\n\t"
- "sw $t7, 44($sp)\n\t"
- // Spill gp register since it is caller save.
- "sw $gp, 52($sp)\n\t"
-
- "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sw %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "addiu $sp, $sp, -24\n\t"
- "sw %[arg0], 0($sp)\n\t"
- "sw %[arg1], 4($sp)\n\t"
- "sw %[arg2], 8($sp)\n\t"
- "sw %[code], 12($sp)\n\t"
- "sw %[self], 16($sp)\n\t"
- "sw %[hidden], 20($sp)\n\t"
-
- // Load call params into the right registers.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $t9, 12($sp)\n\t"
- "lw $s1, 16($sp)\n\t"
- "lw $t7, 20($sp)\n\t"
- "addiu $sp, $sp, 24\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $a3, 12($sp)\n\t"
- "lw $t0, 16($sp)\n\t"
- "lw $t1, 20($sp)\n\t"
- "lw $t2, 24($sp)\n\t"
- "lw $t3, 28($sp)\n\t"
- "lw $t4, 32($sp)\n\t"
- "lw $t5, 36($sp)\n\t"
- "lw $t6, 40($sp)\n\t"
- "lw $t7, 44($sp)\n\t"
- // Restore gp.
- "lw $gp, 52($sp)\n\t"
- "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer), [hidden] "r"(hidden)
- : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
- "fp", "ra",
- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
- "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
- "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
- "memory"); // clobber.
-#elif defined(__mips__) && defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a7 which we say we don't clobber. May contain args.
- "daddiu $sp, $sp, -64\n\t"
- "sd $a0, 0($sp)\n\t"
- "sd $a1, 8($sp)\n\t"
- "sd $a2, 16($sp)\n\t"
- "sd $a3, 24($sp)\n\t"
- "sd $a4, 32($sp)\n\t"
- "sd $a5, 40($sp)\n\t"
- "sd $a6, 48($sp)\n\t"
- "sd $a7, 56($sp)\n\t"
-
- "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sd %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "daddiu $sp, $sp, -48\n\t"
- "sd %[arg0], 0($sp)\n\t"
- "sd %[arg1], 8($sp)\n\t"
- "sd %[arg2], 16($sp)\n\t"
- "sd %[code], 24($sp)\n\t"
- "sd %[self], 32($sp)\n\t"
- "sd %[hidden], 40($sp)\n\t"
-
- // Load call params into the right registers.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $t9, 24($sp)\n\t"
- "ld $s1, 32($sp)\n\t"
- "ld $t0, 40($sp)\n\t"
- "daddiu $sp, $sp, 48\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $a3, 24($sp)\n\t"
- "ld $a4, 32($sp)\n\t"
- "ld $a5, 40($sp)\n\t"
- "ld $a6, 48($sp)\n\t"
- "ld $a7, 56($sp)\n\t"
- "daddiu $sp, $sp, 64\n\t"
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer), [hidden] "r"(hidden)
- // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
- // t0-t3 are ambiguous.
- : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
- "s7", "t8", "t9", "k0", "k1", "fp", "ra",
- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
- "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
- "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
- "memory"); // clobber.
#elif defined(__x86_64__) && !defined(__APPLE__)
#define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
#define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
@@ -546,7 +414,7 @@
TEST_F(StubTest, Memcpy) {
-#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
uint32_t orig[20];
@@ -583,7 +451,7 @@
}
TEST_F(StubTest, LockObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
@@ -657,7 +525,7 @@
// NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
@@ -808,13 +676,13 @@
TestUnlockObject(this);
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
extern "C" void art_quick_check_instance_of(void);
#endif
TEST_F(StubTest, CheckCast) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -913,7 +781,7 @@
}
TEST_F(StubTest, AllocObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// This will lead to OOM error messages in the log.
ScopedLogSeverity sls(LogSeverity::FATAL);
@@ -1030,7 +898,7 @@
}
TEST_F(StubTest, AllocObjectArray) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -1095,8 +963,7 @@
TEST_F(StubTest, StringCompareTo) {
TEST_DISABLED_FOR_STRING_COMPRESSION();
// There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
-#if defined(__i386__) || defined(__mips__) || \
- (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
Thread* self = Thread::Current();
@@ -1178,7 +1045,7 @@
static void GetSetBooleanStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
@@ -1209,7 +1076,7 @@
static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1240,7 +1107,7 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1275,7 +1142,7 @@
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1310,7 +1177,7 @@
static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1340,7 +1207,7 @@
static void GetSetShortStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1371,7 +1238,7 @@
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1405,7 +1272,7 @@
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1440,7 +1307,7 @@
static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1458,11 +1325,7 @@
self,
referrer);
-#if defined(__mips__) && defined(__LP64__)
- EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
-#else
EXPECT_EQ(res, values[i]) << "Iteration " << i;
-#endif
}
#else
UNUSED(f, self, referrer, test);
@@ -1476,7 +1339,7 @@
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1511,7 +1374,7 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_static(uint32_t f_idx,
@@ -1543,7 +1406,7 @@
static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1561,7 +1424,7 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_instance(ArtField* f,
ObjPtr<mirror::Object> trg,
@@ -1596,7 +1459,7 @@
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
@@ -1619,8 +1482,7 @@
static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
- || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1652,8 +1514,7 @@
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
REQUIRES_SHARED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
- defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1842,7 +1703,7 @@
// and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
// the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest, DISABLED_IMT) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -1981,7 +1842,7 @@
}
TEST_F(StubTest, StringIndexOf) {
-#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+#if defined(__arm__) || defined(__aarch64__)
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -2058,7 +1919,7 @@
TEST_F(StubTest, ReadBarrier) {
#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
- defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+ defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
Thread* self = Thread::Current();
const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
@@ -2094,7 +1955,7 @@
TEST_F(StubTest, ReadBarrierForRoot) {
#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
- defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+ defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
Thread* self = Thread::Current();
const uintptr_t readBarrierForRootSlow =
diff --git a/runtime/base/quasi_atomic.h b/runtime/base/quasi_atomic.h
index 0012f64..5aa4dde 100644
--- a/runtime/base/quasi_atomic.h
+++ b/runtime/base/quasi_atomic.h
@@ -46,9 +46,9 @@
// quasiatomic operations that are performed on partially-overlapping
// memory.
class QuasiAtomic {
- static constexpr bool NeedSwapMutexes(InstructionSet isa) {
- // TODO - mips64 still need this for Cas64 ???
- return (isa == InstructionSet::kMips) || (isa == InstructionSet::kMips64);
+ static constexpr bool NeedSwapMutexes(InstructionSet isa ATTRIBUTE_UNUSED) {
+ // TODO: Remove this function now that mips support has been removed.
+ return false;
}
public:
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 40f35b3..ed8a024 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -93,12 +93,7 @@
return GetAndroidToolsDir("prebuilts/gcc/linux-x86/x86",
"x86_64-linux-android",
"x86_64-linux-android");
- case InstructionSet::kMips:
- case InstructionSet::kMips64:
- return GetAndroidToolsDir("prebuilts/gcc/linux-x86/mips",
- "mips64el-linux-android",
- "mips64el-linux-android");
- case InstructionSet::kNone:
+ default:
break;
}
ADD_FAILURE() << "Invalid isa " << isa;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 7500515..2dc8744 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -260,18 +260,6 @@
return; \
}
-#define TEST_DISABLED_FOR_MIPS() \
- if (kRuntimeISA == InstructionSet::kMips) { \
- printf("WARNING: TEST DISABLED FOR MIPS\n"); \
- return; \
- }
-
-#define TEST_DISABLED_FOR_MIPS64() \
- if (kRuntimeISA == InstructionSet::kMips64) { \
- printf("WARNING: TEST DISABLED FOR MIPS64\n"); \
- return; \
- }
-
#define TEST_DISABLED_FOR_X86() \
if (kRuntimeISA == InstructionSet::kX86) { \
printf("WARNING: TEST DISABLED FOR X86\n"); \
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 12c33de..6bd1c8f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1076,7 +1076,8 @@
return true;
}
-static InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
+static InstructionSet GetInstructionSetFromELF(uint16_t e_machine,
+ uint32_t e_flags ATTRIBUTE_UNUSED) {
switch (e_machine) {
case EM_ARM:
return InstructionSet::kArm;
@@ -1086,15 +1087,6 @@
return InstructionSet::kX86;
case EM_X86_64:
return InstructionSet::kX86_64;
- case EM_MIPS: {
- if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
- (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
- return InstructionSet::kMips;
- } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
- return InstructionSet::kMips64;
- }
- break;
- }
}
return InstructionSet::kNone;
}
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index e555d68..1baccee 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -28,8 +28,6 @@
// specialize the code.
#include "arch/arm/callee_save_frame_arm.h"
#include "arch/arm64/callee_save_frame_arm64.h"
-#include "arch/mips/callee_save_frame_mips.h"
-#include "arch/mips64/callee_save_frame_mips64.h"
#include "arch/x86/callee_save_frame_x86.h"
#include "arch/x86_64/callee_save_frame_x86_64.h"
@@ -79,10 +77,6 @@
template <>
struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
template <>
-struct CSFSelector<InstructionSet::kMips> { using type = mips::MipsCalleeSaveFrame; };
-template <>
-struct CSFSelector<InstructionSet::kMips64> { using type = mips64::Mips64CalleeSaveFrame; };
-template <>
struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
template <>
struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 954450f..d75893d 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -91,7 +91,7 @@
// Read barrier entrypoints.
//
-// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to these
+// Compilers for ARM, ARM64 can insert a call to these
// functions directly. For x86 and x86-64, compilers need a wrapper
// assembly function, to handle mismatch in ABI.
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.cc b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
index 81f152b..5387e44 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.cc
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
@@ -61,6 +61,7 @@
case kQuickUshrLong:
return false;
+ // TODO: Remove these entrypoints now that MIPS support was removed.
/* Used by mips for 64bit volatile load/stores. */
case kQuickA64Load:
case kQuickA64Store:
@@ -112,6 +113,7 @@
case kQuickUshrLong:
return false;
+ // TODO: Remove these entrypoints now that MIPS support was removed.
/* Used by mips for 64bit volatile load/stores. */
case kQuickA64Load:
case kQuickA64Store:
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aab5ff5..95c9ffc 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -139,90 +139,6 @@
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
-#elif defined(__mips__) && !defined(__LP64__)
- // The callee save frame is pointed to by SP.
- // | argN | |
- // | ... | |
- // | arg4 | |
- // | arg3 spill | | Caller's frame
- // | arg2 spill | |
- // | arg1 spill | |
- // | Method* | ---
- // | RA |
- // | ... | callee saves
- // | T1 | arg5
- // | T0 | arg4
- // | A3 | arg3
- // | A2 | arg2
- // | A1 | arg1
- // | F19 |
- // | F18 | f_arg5
- // | F17 |
- // | F16 | f_arg4
- // | F15 |
- // | F14 | f_arg3
- // | F13 |
- // | F12 | f_arg2
- // | F11 |
- // | F10 | f_arg1
- // | F9 |
- // | F8 | f_arg0
- // | | padding
- // | A0/Method* | <- sp
- static constexpr bool kSplitPairAcrossRegisterAndStack = false;
- static constexpr bool kAlignPairRegister = true;
- static constexpr bool kQuickSoftFloatAbi = false;
- static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
- static constexpr bool kQuickSkipOddFpRegisters = true;
- static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
- static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be
- // passed only in even numbered registers and each
- // double occupies two registers.
- static constexpr bool kGprFprLockstep = false;
- static size_t GprIndexToGprOffset(uint32_t gpr_index) {
- return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
- }
-#elif defined(__mips__) && defined(__LP64__)
- // The callee save frame is pointed to by SP.
- // | argN | |
- // | ... | |
- // | arg4 | |
- // | arg3 spill | | Caller's frame
- // | arg2 spill | |
- // | arg1 spill | |
- // | Method* | ---
- // | RA |
- // | ... | callee saves
- // | A7 | arg7
- // | A6 | arg6
- // | A5 | arg5
- // | A4 | arg4
- // | A3 | arg3
- // | A2 | arg2
- // | A1 | arg1
- // | F19 | f_arg7
- // | F18 | f_arg6
- // | F17 | f_arg5
- // | F16 | f_arg4
- // | F15 | f_arg3
- // | F14 | f_arg2
- // | F13 | f_arg1
- // | F12 | f_arg0
- // | | padding
- // | A0/Method* | <- sp
- // NOTE: for Mip64, when A0 is skipped, F12 is also skipped.
- static constexpr bool kSplitPairAcrossRegisterAndStack = false;
- static constexpr bool kAlignPairRegister = false;
- static constexpr bool kQuickSoftFloatAbi = false;
- static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
- static constexpr bool kQuickSkipOddFpRegisters = false;
- static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
- static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs.
- static constexpr bool kGprFprLockstep = true;
-
- static size_t GprIndexToGprOffset(uint32_t gpr_index) {
- return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
- }
#elif defined(__i386__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -520,15 +436,10 @@
case Primitive::kPrimLong:
if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
if (cur_type_ == Primitive::kPrimLong &&
-#if defined(__mips__) && !defined(__LP64__)
- (gpr_index_ == 0 || gpr_index_ == 2) &&
-#else
gpr_index_ == 0 &&
-#endif
kAlignPairRegister) {
- // Currently, this is only for ARM and MIPS, where we align long parameters with
- // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using
- // R2 (on ARM) or A2(T0) (on MIPS) instead.
+ // Currently, this is only for ARM, where we align long parameters with
+ // even-numbered registers by skipping R1 and using R2 instead.
IncGprIndex();
}
is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
@@ -1570,31 +1481,6 @@
static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
-#elif defined(__mips__) && !defined(__LP64__)
- static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI.
- static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs.
- static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
-
- static constexpr size_t kRegistersNeededForLong = 2;
- static constexpr size_t kRegistersNeededForDouble = 2;
- static constexpr bool kMultiRegistersAligned = true;
- static constexpr bool kMultiFPRegistersWidened = true;
- static constexpr bool kMultiGPRegistersWidened = false;
- static constexpr bool kAlignLongOnStack = true;
- static constexpr bool kAlignDoubleOnStack = true;
-#elif defined(__mips__) && defined(__LP64__)
- // Let the code prepare GPRs only and we will load the FPRs with same data.
- static constexpr bool kNativeSoftFloatAbi = true;
- static constexpr size_t kNumNativeGprArgs = 8;
- static constexpr size_t kNumNativeFprArgs = 0;
-
- static constexpr size_t kRegistersNeededForLong = 1;
- static constexpr size_t kRegistersNeededForDouble = 1;
- static constexpr bool kMultiRegistersAligned = false;
- static constexpr bool kMultiFPRegistersWidened = false;
- static constexpr bool kMultiGPRegistersWidened = true;
- static constexpr bool kAlignLongOnStack = false;
- static constexpr bool kAlignDoubleOnStack = false;
#elif defined(__i386__)
// TODO: Check these!
static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
@@ -2354,41 +2240,6 @@
// does not handle that case. Calls from compiled stubs are also broken.
void const* nativeCode = called->GetEntryPointFromJni();
-#if defined(__mips__) && !defined(__LP64__)
- // On MIPS32 if the first two arguments are floating-point, we need to know their types
- // so that art_quick_generic_jni_trampoline can correctly extract them from the stack
- // and load into floating-point registers.
- // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU
- // view):
- // (1)
- // | DOUBLE | DOUBLE | other args, if any
- // | F12 | F13 | F14 | F15 |
- // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16
- // (2)
- // | DOUBLE | FLOAT | (PAD) | other args, if any
- // | F12 | F13 | F14 | |
- // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16
- // (3)
- // | FLOAT | (PAD) | DOUBLE | other args, if any
- // | F12 | | F14 | F15 |
- // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16
- // (4)
- // | FLOAT | FLOAT | other args, if any
- // | F12 | F14 |
- // | SP+0 | SP+4 | SP+8
- // As you can see, only the last case (4) is special. In all others we can just
- // load F12/F13 and F14/F15 in the same manner.
- // Set bit 0 of the native code address to 1 in this case (valid code addresses
- // are always a multiple of 4 on MIPS32, so we have 2 spare bits available).
- if (nativeCode != nullptr &&
- shorty != nullptr &&
- shorty_len >= 3 &&
- shorty[1] == 'F' &&
- shorty[2] == 'F') {
- nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1);
- }
-#endif
-
VLOG(third_party_jni) << "GenericJNI: "
<< called->PrettyMethod()
<< " -> "
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index dba2860..0c49b90 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -279,7 +279,7 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
}
-#if !defined(__aarch64__) && !defined(__x86_64__) && !defined(__mips__)
+#if !defined(__aarch64__) && !defined(__x86_64__)
static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 292db14..9dc029b 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -518,8 +518,8 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
// Due to lack of 16-byte atomics support, we use hand-crafted routines.
-#if defined(__aarch64__) || defined(__mips__)
- // 16-byte atomics are supported on aarch64, mips and mips64.
+#if defined(__aarch64__)
+ // 16-byte atomics are supported on aarch64.
ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
std::atomic<ConversionPair64>* target) {
return target->load(std::memory_order_relaxed);
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index ee84997..8b5703e 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -116,12 +116,9 @@
uint32_t rb_state = lw.ReadBarrierState();
return rb_state;
#else
- // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
- LockWord lw = GetLockWord(false);
- *fake_address_dependency = 0;
- std::atomic_thread_fence(std::memory_order_acquire);
- uint32_t rb_state = lw.ReadBarrierState();
- return rb_state;
+ UNUSED(fake_address_dependency);
+ LOG(FATAL) << "Unsupported architecture.";
+ UNREACHABLE();
#endif
}
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index dd9ca23..8873eb9 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -163,13 +163,11 @@
EXPECT_EQ(kRuntimeISA, isa);
}
- const char* isa_strings[] = { "arm", "arm64", "x86", "x86_64", "mips", "mips64" };
+ const char* isa_strings[] = { "arm", "arm64", "x86", "x86_64" };
InstructionSet ISAs[] = { InstructionSet::kArm,
InstructionSet::kArm64,
InstructionSet::kX86,
- InstructionSet::kX86_64,
- InstructionSet::kMips,
- InstructionSet::kMips64 };
+ InstructionSet::kX86_64 };
static_assert(arraysize(isa_strings) == arraysize(ISAs), "Need same amount.");
for (size_t i = 0; i < arraysize(isa_strings); ++i) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a40049f..1906b53 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -45,8 +45,6 @@
#include "arch/arm64/registers_arm64.h"
#include "arch/context.h"
#include "arch/instruction_set_features.h"
-#include "arch/mips/registers_mips.h"
-#include "arch/mips64/registers_mips64.h"
#include "arch/x86/registers_x86.h"
#include "arch/x86_64/registers_x86_64.h"
#include "art_field-inl.h"
@@ -1487,8 +1485,6 @@
case InstructionSet::kX86:
case InstructionSet::kArm64:
case InstructionSet::kX86_64:
- case InstructionSet::kMips:
- case InstructionSet::kMips64:
implicit_null_checks_ = true;
// Historical note: Installing stack protection was not playing well with Valgrind.
implicit_so_checks_ = true;
@@ -2374,8 +2370,6 @@
break;
case InstructionSet::kArm:
case InstructionSet::kArm64:
- case InstructionSet::kMips:
- case InstructionSet::kMips64:
case InstructionSet::kX86:
case InstructionSet::kX86_64:
break;
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index e5a61c2..c4a695f 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -281,7 +281,6 @@
DumpArmStatusRegister(os, context.pstate);
os << '\n';
#else
- // TODO: Add support for MIPS32 and MIPS64.
os << "Unknown architecture/word size/OS in ucontext dump";
#endif
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 0459091..8916618 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -370,14 +370,6 @@
reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
}
- // MIPS32 float registers are used as 64-bit (for MIPS32r2 it is pair
- // F(2n)-F(2n+1), and for MIPS32r6 it is 64-bit register F(2n)). When
- // accessing upper 32-bits from double, reg + 1 should be used.
- if ((kRuntimeISA == InstructionSet::kMips) && (kind == kDoubleHiVReg)) {
- DCHECK_ALIGNED(reg, 2);
- reg++;
- }
-
if (!IsAccessibleRegister(reg, is_float)) {
return false;
}
diff --git a/test/Android.bp b/test/Android.bp
index 4a805bd..400fc3a 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -26,12 +26,6 @@
android_arm64: {
relative_install_path: "art/arm64",
},
- android_mips: {
- relative_install_path: "art/mips",
- },
- android_mips64: {
- relative_install_path: "art/mips64",
- },
android_x86: {
relative_install_path: "art/x86",
},