Merge "Make verify-profile not look at the profile."
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index df2dbc7..c1b3a40 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -128,7 +128,9 @@
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? Location::RegisterPairLocation(R2, R3)
+ ? (is_instance
+ ? Location::RegisterPairLocation(R2, R3)
+ : Location::RegisterPairLocation(R1, R2))
: (is_instance
? Location::RegisterLocation(R2)
: Location::RegisterLocation(R1));
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7d3c655..f6cb90a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -210,12 +210,11 @@
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return helpers::LocationFrom(vixl::aarch64::x0);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED,
+ bool is_instance) const OVERRIDE {
+ return is_instance
? helpers::LocationFrom(vixl::aarch64::x2)
- : (is_instance
- ? helpers::LocationFrom(vixl::aarch64::x2)
- : helpers::LocationFrom(vixl::aarch64::x1));
+ : helpers::LocationFrom(vixl::aarch64::x1);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return helpers::LocationFrom(vixl::aarch64::d0);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index ffaf18f..893e465 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3998,8 +3998,8 @@
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetOut(LocationFrom(r0));
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 8ae3b7d..9105118 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -199,7 +199,9 @@
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
+ ? (is_instance
+ ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
+ : helpers::LocationFrom(vixl::aarch32::r1, vixl::aarch32::r2))
: (is_instance
? helpers::LocationFrom(vixl::aarch32::r2)
: helpers::LocationFrom(vixl::aarch32::r1));
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7350fcc..5360dc9 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -110,7 +110,9 @@
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? Location::RegisterPairLocation(EDX, EBX)
+ ? (is_instance
+ ? Location::RegisterPairLocation(EDX, EBX)
+ : Location::RegisterPairLocation(ECX, EDX))
: (is_instance
? Location::RegisterLocation(EDX)
: Location::RegisterLocation(ECX));
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 3438b81..3a83731 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -92,12 +92,11 @@
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(RAX);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance)
+ const OVERRIDE {
+ return is_instance
? Location::RegisterLocation(RDX)
- : (is_instance
- ? Location::RegisterLocation(RDX)
- : Location::RegisterLocation(RSI));
+ : Location::RegisterLocation(RSI);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(XMM0);
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 6c2c815..8384460 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -31,6 +31,7 @@
#if defined(__arm__)
extern "C" bool artCheckForArmSdivInstruction();
+extern "C" bool artCheckForArmv8AInstructions();
#endif
namespace art {
@@ -39,22 +40,34 @@
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
+ static const char* arm_variants_with_armv8a[] = {
+ "cortex-a32",
+ "cortex-a35",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a53.a72",
+ "cortex-a57",
+ "cortex-a72",
+ "cortex-a73",
+ "exynos-m1",
+ "denver",
+ "kryo"
+ };
+ bool has_armv8a = FindVariantInArray(arm_variants_with_armv8a,
+ arraysize(arm_variants_with_armv8a),
+ variant);
+
// Look for variants that have divide support.
static const char* arm_variants_with_div[] = {
"cortex-a7",
"cortex-a12",
"cortex-a15",
"cortex-a17",
- "cortex-a53",
- "cortex-a53.a57",
- "cortex-a57",
- "denver",
"krait",
};
-
- bool has_div = FindVariantInArray(arm_variants_with_div,
- arraysize(arm_variants_with_div),
- variant);
+ bool has_div = has_armv8a || FindVariantInArray(arm_variants_with_div,
+ arraysize(arm_variants_with_div),
+ variant);
// Look for variants that have LPAE support.
static const char* arm_variants_with_lpae[] = {
@@ -62,17 +75,13 @@
"cortex-a12",
"cortex-a15",
"cortex-a17",
- "cortex-a53",
- "cortex-a53.a57",
- "cortex-a57",
- "denver",
"krait",
};
- bool has_lpae = FindVariantInArray(arm_variants_with_lpae,
- arraysize(arm_variants_with_lpae),
- variant);
+ bool has_atomic_ldrd_strd = has_armv8a || FindVariantInArray(arm_variants_with_lpae,
+ arraysize(arm_variants_with_lpae),
+ variant);
- if (has_div == false && has_lpae == false) {
+ if (has_armv8a == false && has_div == false && has_atomic_ldrd_strd == false) {
static const char* arm_variants_with_default_features[] = {
"cortex-a5",
"cortex-a8",
@@ -92,34 +101,48 @@
<< ") using conservative defaults";
}
}
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool has_div = (bitmap & kDivBitfield) != 0;
bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd));
+ bool has_armv8a = (bitmap & kARMv8A) != 0;
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() {
-#if defined(__ARM_ARCH_EXT_IDIV__)
+// Note: This will not work for now since we still build the 32-bit as __ARCH_ARM_7A__.
+#if defined(__ARM_ARCH_8A__)
+ const bool has_armv8a = true;
+#else
+ const bool has_armv8a = false;
+#endif
+#if defined (__ARM_ARCH_8A__) || defined(__ARM_ARCH_EXT_IDIV__)
const bool has_div = true;
#else
const bool has_div = false;
#endif
-#if defined(__ARM_FEATURE_LPAE)
- const bool has_lpae = true;
+#if defined (__ARM_ARCH_8A__) || defined(__ARM_FEATURE_LPAE)
+ const bool has_atomic_ldrd_strd = true;
#else
- const bool has_lpae = false;
+ const bool has_atomic_ldrd_strd = false;
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- bool has_lpae = false;
+ bool has_atomic_ldrd_strd = false;
bool has_div = false;
+ bool has_armv8a = false;
std::ifstream in("/proc/cpuinfo");
if (!in.fail()) {
@@ -137,21 +160,33 @@
has_div = true;
}
if (line.find("lpae") != std::string::npos) {
- has_lpae = true;
+ has_atomic_ldrd_strd = true;
}
}
+ if (line.find("architecture") != std::string::npos
+ && line.find(": 8") != std::string::npos) {
+ LOG(INFO) << "found architecture ARMv8";
+ // Android is only run on A cores, so ARMv8 implies ARMv8-A.
+ has_armv8a = true;
+ // ARMv8 CPUs have LPAE and div support.
+ has_div = true;
+ has_atomic_ldrd_strd = true;
+ }
}
}
in.close();
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() {
bool has_div = false;
- bool has_lpae = false;
+ bool has_atomic_ldrd_strd = false;
+ bool has_armv8a = false;
#if defined(ART_TARGET_ANDROID) && defined(__arm__)
uint64_t hwcaps = getauxval(AT_HWCAP);
@@ -163,18 +198,27 @@
has_div = true;
}
if ((hwcaps & HWCAP_LPAE) != 0) {
- has_lpae = true;
+ has_atomic_ldrd_strd = true;
+ }
+ // TODO: Fix this once FPMISC makes it upstream.
+ // For now we detect if we run on an ARMv8 CPU by looking for CRC32 and SHA1
+ // (only available on ARMv8 CPUs).
+ if ((hwcaps & HWCAP2_CRC32) != 0 && (hwcaps & HWCAP2_SHA1) != 0) {
+ has_armv8a = true;
}
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
// and then increment the PC in the signal context to return to the next instruction. We know the
-// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
- void* data) {
+// instruction is 4 bytes long.
+static void bad_instr_handle(int signo ATTRIBUTE_UNUSED,
+ siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
#if defined(__arm__)
struct ucontext *uc = (struct ucontext *)data;
struct sigcontext *sc = &uc->uc_mcontext;
@@ -190,15 +234,19 @@
// instruction. If we get a SIGILL then it's not supported.
struct sigaction sa, osa;
sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
- sa.sa_sigaction = bad_divide_inst_handle;
+ sa.sa_sigaction = bad_instr_handle;
sigemptyset(&sa.sa_mask);
sigaction(SIGILL, &sa, &osa);
bool has_div = false;
+ bool has_armv8a = false;
#if defined(__arm__)
if (artCheckForArmSdivInstruction()) {
has_div = true;
}
+ if (artCheckForArmv8AInstructions()) {
+ has_armv8a = true;
+ }
#endif
// Restore the signal handler.
@@ -207,11 +255,13 @@
// Use compile time features to "detect" LPAE support.
// TODO: write an assembly LPAE support test.
#if defined(__ARM_FEATURE_LPAE)
- const bool has_lpae = true;
+ const bool has_atomic_ldrd_strd = true;
#else
- const bool has_lpae = false;
+ const bool has_atomic_ldrd_strd = false;
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
@@ -219,13 +269,26 @@
return false;
}
const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
- return has_div_ == other_as_arm->has_div_ &&
- has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_;
+ return has_div_ == other_as_arm->has_div_
+ && has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_
+ && has_armv8a_ == other_as_arm->has_armv8a_;
+}
+
+bool ArmInstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
+ if (kArm != other->GetInstructionSet()) {
+ return false;
+ }
+ const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
+
+ return (has_div_ || (has_div_ == other_as_arm->has_div_))
+ && (has_atomic_ldrd_strd_ || (has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_))
+ && (has_armv8a_ || (has_armv8a_ == other_as_arm->has_armv8a_));
}
uint32_t ArmInstructionSetFeatures::AsBitmap() const {
- return (has_div_ ? kDivBitfield : 0) |
- (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0);
+ return (has_div_ ? kDivBitfield : 0)
+ | (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0)
+ | (has_armv8a_ ? kARMv8A : 0);
}
std::string ArmInstructionSetFeatures::GetFeatureString() const {
@@ -240,6 +303,11 @@
} else {
result += ",-atomic_ldrd_strd";
}
+ if (has_armv8a_) {
+ result += ",armv8a";
+ } else {
+ result += ",-armv8a";
+ }
return result;
}
@@ -248,6 +316,7 @@
const std::vector<std::string>& features, std::string* error_msg) const {
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
+ bool has_armv8a = has_armv8a_;
for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = android::base::Trim(*i);
if (feature == "div") {
@@ -258,13 +327,17 @@
has_atomic_ldrd_strd = true;
} else if (feature == "-atomic_ldrd_strd") {
has_atomic_ldrd_strd = false;
+ } else if (feature == "armv8a") {
+ has_armv8a = true;
+ } else if (feature == "-armv8a") {
+ has_armv8a = false;
} else {
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
}
return std::unique_ptr<const InstructionSetFeatures>(
- new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd));
+ new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd, has_armv8a));
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index 11f8bf0..f438a76 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -49,6 +49,8 @@
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+
InstructionSet GetInstructionSet() const OVERRIDE {
return kArm;
}
@@ -69,6 +71,11 @@
return has_atomic_ldrd_strd_;
}
+ // Are ARMv8-A instructions available?
+ bool HasARMv8AInstructions() const {
+ return has_armv8a_;
+ }
+
virtual ~ArmInstructionSetFeatures() {}
protected:
@@ -78,19 +85,24 @@
std::string* error_msg) const OVERRIDE;
private:
- ArmInstructionSetFeatures(bool has_div, bool has_atomic_ldrd_strd)
+ ArmInstructionSetFeatures(bool has_div,
+ bool has_atomic_ldrd_strd,
+ bool has_armv8a)
: InstructionSetFeatures(),
- has_div_(has_div), has_atomic_ldrd_strd_(has_atomic_ldrd_strd) {
- }
+ has_div_(has_div),
+ has_atomic_ldrd_strd_(has_atomic_ldrd_strd),
+ has_armv8a_(has_armv8a) {}
// Bitmap positions for encoding features as a bitmap.
enum {
kDivBitfield = 1 << 0,
kAtomicLdrdStrdBitfield = 1 << 1,
+ kARMv8A = 1 << 2,
};
const bool has_div_;
const bool has_atomic_ldrd_strd_;
+ const bool has_armv8a_;
DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
};
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 697ca90..6d5dd6d 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -31,7 +31,7 @@
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
// Build features for a 32-bit ARM denver processor.
@@ -40,12 +40,13 @@
ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(denver_features.get()));
+ EXPECT_FALSE(krait_features->HasAtLeast(denver_features.get()));
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
// Build features for a 32-bit ARMv7 processor.
std::unique_ptr<const InstructionSetFeatures> generic_features(
@@ -57,7 +58,7 @@
EXPECT_FALSE(krait_features->Equals(generic_features.get()));
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd,-armv8a", generic_features->GetFeatureString().c_str());
EXPECT_EQ(generic_features->AsBitmap(), 0U);
// ARM6 is not a supported architecture variant.
@@ -82,21 +83,22 @@
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
// Build features for a 32-bit ARM processor with LPAE and div flipped.
std::unique_ptr<const InstructionSetFeatures> denver_features(
- base_features->AddFeaturesFromString("div,atomic_ldrd_strd", &error_msg));
+ base_features->AddFeaturesFromString("div,atomic_ldrd_strd,armv8a", &error_msg));
ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_FALSE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(denver_features.get()));
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
// Build features for a 32-bit default ARM processor.
std::unique_ptr<const InstructionSetFeatures> generic_features(
@@ -108,7 +110,7 @@
EXPECT_FALSE(krait_features->Equals(generic_features.get()));
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd,-armv8a", generic_features->GetFeatureString().c_str());
EXPECT_EQ(generic_features->AsBitmap(), 0U);
}
diff --git a/runtime/arch/arm/instruction_set_features_assembly_tests.S b/runtime/arch/arm/instruction_set_features_assembly_tests.S
index c1086df..5c7f202 100644
--- a/runtime/arch/arm/instruction_set_features_assembly_tests.S
+++ b/runtime/arch/arm/instruction_set_features_assembly_tests.S
@@ -17,22 +17,49 @@
#include "asm_support_arm.S"
.section .text
-// This function is used to check for the CPU's support for the sdiv
-// instruction at runtime. It will either return the value 1 or
-// will cause an invalid instruction trap (SIGILL signal). The
-// caller must arrange for the signal handler to set the r0
-// register to 0 and move the pc forward by 4 bytes (to skip
-// the invalid instruction).
+// These functions are used to check for the CPU's support for the sdiv and
+// ARMv8-A instructions at runtime. They will either return the value 1 or will
+// cause an invalid instruction trap (SIGILL signal), for which the signal handler
+// (bad_instr_handle(), in instruction_set_features_arm.cc) must arrange to set
+// the r0 register to 0 and move the pc forward by 4 bytes (to skip the invalid
+// instruction).
+// Note: For ARM T32, instructions can be either 16b or 32b, but bad_instr_handle()
+// deals only with 32b instructions for now.
+
ENTRY artCheckForArmSdivInstruction
mov r1,#1
- // depending on the architecture, the assembler will not allow an
+ // Depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
- // sdiv r0,r1,r1 is two words: 0xfb91 0xf1f0. We need little endian.
- .byte 0x91,0xfb,0xf1,0xf0
+ // The T32 encoding for sdiv r0,r1,r1 is two 16bit words: 0xfb91 0xf0f1, with little endianness.
+ .byte 0x91,0xfb
+ .byte 0xf1,0xf0
- // if the divide worked, r0 will have the value #1 (result of sdiv).
+ // If the divide worked, r0 will have the value #1 (result of sdiv).
// It will have 0 otherwise (set by the signal handler)
// the value is just returned from this function.
bx lr
END artCheckForArmSdivInstruction
+
+ENTRY artCheckForArmv8AInstructions
+ // Depending on the architecture, the assembler will not allow a
+ // `vrint` instruction, so we will have to output the bytes directly.
+
+ // Move `true` into the result register. The signal handler will set it to 0
+ // if execution of the instruction below fails
+ mov r0,#1
+
+ // Store S0 in the caller saved R1. If the instruction below succeeds, S0 will
+ // be clobbered but it will not be caller saved (ARM still uses soft FP).
+ vmov r1, s0
+
+ // The T32 encoding for vrinta.f32.f32 s0,s0 is two 16bit words: 0xfeb8,0x0a40, with little
+ // endianness.
+ .byte 0xb8,0xfe
+ .byte 0x40,0x0a
+
+ // Restore S0 (see above comment).
+ vmov s0, r1
+
+ bx lr
+END artCheckForArmv8AInstructions
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index db1cad6..d1225b3 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -351,14 +351,13 @@
DELIVER_PENDING_EXCEPTION
.endm
-// Macros taking opportunity of code similarities for downcalls with referrer for non-wide fields.
+// Macros taking opportunity of code similarities for downcalls.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r2, r9 @ pass Thread::Current
- bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl \entrypoint @ (uint32_t field_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -368,9 +367,8 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r3, r9 @ pass Thread::Current
- bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
+ mov r2, r9 @ pass Thread::Current
+ bl \entrypoint @ (field_idx, Object*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -380,12 +378,8 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- ldr r3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 16
- bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
- add sp, #16 @ release out args
- .cfi_adjust_cfa_offset -16
+ mov r3, r9 @ pass Thread::Current
+ bl \entrypoint @ (field_idx, Object*, new_val, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
\return
END \name
@@ -978,21 +972,20 @@
/*
* Called by managed code to resolve a static field and load a non-wide value.
*/
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
- .extern artGet64StaticFromCode
+ .extern artGet64StaticFromCompiledCode
ENTRY art_quick_get64_static
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r2, r9 @ pass Thread::Current
- bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
@@ -1004,21 +997,20 @@
/*
* Called by managed code to resolve an instance field and load a non-wide value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
- .extern artGet64InstanceFromCode
+ .extern artGet64InstanceFromCompiledCode
ENTRY art_quick_get64_instance
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r3, r9 @ pass Thread::Current
- bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
+ mov r2, r9 @ pass Thread::Current
+ bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
@@ -1028,51 +1020,32 @@
END art_quick_get64_instance
/*
- * Called by managed code to resolve a static field and store a non-wide value.
+ * Called by managed code to resolve a static field and store a value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- * On entry r0 holds field index, r2:r3 hold new_val
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- @ r2:r3 contain the wide argument
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 16
- bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*)
- add sp, #16 @ release out args
- .cfi_adjust_cfa_offset -16
- RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_set64_static
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve an instance field and store a non-wide value.
*/
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+
/*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ * Called by managed code to resolve an instance field and store a wide value.
*/
- .extern artSet64InstanceFromCode
+ .extern artSet64InstanceFromCompiledCode
ENTRY art_quick_set64_instance
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- ldr r12, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 12
- str r12, [sp, #-4]! @ expand the frame and pass the referrer
- .cfi_adjust_cfa_offset 4
- bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Method* referrer, Thread*)
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .cfi_adjust_cfa_offset 16
+ bl artSet64InstanceFromCompiledCode @ (field_idx, Object*, new_val, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 00518e1..a84e553 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1519,14 +1519,13 @@
END \name
.endm
-// Macros taking opportunity of code similarities for downcalls with referrer.
+// Macros taking opportunity of code similarities for downcalls.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x2, xSELF // pass Thread::Current
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
+ mov x1, xSELF // pass Thread::Current
+ bl \entrypoint // (uint32_t type_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -1536,8 +1535,7 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x3, xSELF // pass Thread::Current
+ mov x2, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
\return
@@ -1548,8 +1546,7 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x4, xSELF // pass Thread::Current
+ mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
\return
@@ -1579,44 +1576,33 @@
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-
-// This is separated out as the argument order is different.
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- // x2 contains the parameter
- mov x3, xSELF // pass Thread::Current
- bl artSet64StaticFromCode
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
-END art_quick_set64_static
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
/*
* Entry from managed code to resolve a string, this stub will
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index b6c5c71..5f1a507 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -67,6 +67,24 @@
// Are these features the same as the other given features?
virtual bool Equals(const InstructionSetFeatures* other) const = 0;
+ // For testing purposes we want to make sure that the system we run on has at
+ // least the options we claim it has. In this cases Equals() does not
+ // suffice and will cause the test to fail, since the runtime cpu feature
+ // detection claims more capabilities then statically specified from the
+ // build system.
+ //
+ // A good example of this is the armv8 ART test target that declares
+ // "CPU_VARIANT=generic". If the generic target is specified and the code
+ // is run on a platform with enhanced capabilities, the
+ // instruction_set_features test will fail if we resort to using Equals()
+ // between statically defined cpu features and runtime cpu features.
+ //
+ // For now we default this to Equals() in case the architecture does not
+ // provide it.
+ virtual bool HasAtLeast(const InstructionSetFeatures* other) const {
+ return Equals(other);
+ }
+
// Return the ISA these features relate to.
virtual InstructionSet GetInstructionSet() const = 0;
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index d489392..67e2f35 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -52,7 +52,7 @@
InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(property_features->HasAtLeast(instruction_set_features.get()))
<< "System property features: " << *property_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -89,7 +89,7 @@
base_features->AddFeaturesFromString(dex2oat_isa_features, &error_msg));
ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(property_features->HasAtLeast(instruction_set_features.get()))
<< "System property features: " << *property_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -109,7 +109,7 @@
// Check we get the same instruction set features using /proc/cpuinfo.
std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
InstructionSetFeatures::FromCpuInfo());
- EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(cpuinfo_features->HasAtLeast(instruction_set_features.get()))
<< "CPU Info features: " << *cpuinfo_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -124,7 +124,7 @@
std::unique_ptr<const InstructionSetFeatures> cpp_features(
InstructionSetFeatures::FromCppDefines());
- EXPECT_TRUE(default_features->Equals(cpp_features.get()))
+ EXPECT_TRUE(cpp_features->HasAtLeast(default_features.get()))
<< "Default variant features: " << *default_features.get()
<< "\nFeatures from build: " << *cpp_features.get();
}
@@ -143,7 +143,7 @@
// Check we get the same instruction set features using AT_HWCAP.
std::unique_ptr<const InstructionSetFeatures> hwcap_features(
InstructionSetFeatures::FromHwcap());
- EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(hwcap_features->HasAtLeast(instruction_set_features.get()))
<< "Hwcap features: " << *hwcap_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -156,7 +156,7 @@
// Check we get the same instruction set features using assembly tests.
std::unique_ptr<const InstructionSetFeatures> assembly_features(
InstructionSetFeatures::FromAssembly());
- EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(assembly_features->HasAtLeast(instruction_set_features.get()))
<< "Assembly features: " << *assembly_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 547b57e..9e75cba 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1610,8 +1610,8 @@
for (size_t i = 0; i < arraysize(values); ++i) {
// 64 bit FieldSet stores the set value in the second register.
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
- 0U,
values[i],
+ 0U,
StubTest::GetEntrypoint(self, kQuickSet64Static),
self,
referrer);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c420259..47dc34a 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -875,13 +875,12 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
- PUSH eax // push padding
+ subl MACRO_LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass referrer
PUSH eax // pass arg1
- call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
@@ -893,10 +892,9 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %edx // get referrer
+ PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*)
@@ -911,18 +909,13 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ebx // get referrer
- subl MACRO_LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass referrer
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, referrer,
- // Thread*)
- addl LITERAL(32), %esp // pop arguments
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*)
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
@@ -1556,77 +1549,53 @@
ret
END_FUNCTION art_quick_lushr
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
+// Call artSet64InstanceFromCode with 4 word size arguments.
DEFINE_FUNCTION art_quick_set64_instance
movd %ebx, %xmm0
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
// Outgoing argument set up
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- pushl (FRAME_SIZE_SAVE_REFS_ONLY+12)(%esp) // pass referrer
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass high half of new_val
- PUSH edx // pass low half of new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_instance
-
-// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
-// so that new_val is aligned on even registers were we passing arguments in registers.
-DEFINE_FUNCTION art_quick_set64_static
- // TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
- movd %ebx, %xmm0
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
- movd %xmm0, %ebx
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
- subl LITERAL(12), %esp // alignment padding
+ subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
- PUSH ecx // pass referrer
+ PUSH ecx // pass object
PUSH eax // pass field_idx
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
+ call SYMBOL(artSet64InstanceFromCompiledCode) // (field_idx, Object*, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_static
+END_FUNCTION art_quick_set64_instance
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 46bee39..544e3ea 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -919,11 +919,10 @@
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rsi // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0 is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -931,11 +930,10 @@
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rdx // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0 and arg1 are in rdi/rsi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call CALLVAR(cxx_name) // (arg0, arg1, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call CALLVAR(cxx_name) // (arg0, arg1, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -943,11 +941,10 @@
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rcx // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
- movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -1239,7 +1236,7 @@
// Outgoing argument set up
movl %eax, %edi // pass string index
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, referrer, Thread*)
+ call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, Thread*)
testl %eax, %eax // If result is null, deliver the OOME.
jz 1f
@@ -1551,45 +1548,33 @@
UNIMPLEMENTED art_quick_lshr
UNIMPLEMENTED art_quick_lushr
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-
-// This is singled out as the argument order is different.
-DEFINE_FUNCTION art_quick_set64_static
- // new_val is already in %rdx
- movq 8(%rsp), %rsi // pass referrer
- SETUP_SAVE_REFS_ONLY_FRAME
- // field_idx is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_static
-
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 917db3e..80af8e7 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -132,7 +132,6 @@
return (object)->GetField ## type(GetOffset());
#define FIELD_SET(object, type, value) \
- DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(); \
DCHECK((object) != nullptr) << PrettyField(); \
DCHECK(!IsStatic() || ((object) == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
if (UNLIKELY(IsVolatile())) { \
@@ -147,6 +146,12 @@
template<bool kTransactionActive>
inline void ArtField::SetBoolean(ObjPtr<mirror::Object> object, uint8_t z) {
+ if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both boolean and byte fields.
+ Primitive::Type type = GetTypeAsPrimitiveType();
+ DCHECK(type == Primitive::kPrimBoolean || type == Primitive::kPrimByte) << PrettyField();
+ }
FIELD_SET(object, Boolean, z);
}
@@ -156,6 +161,7 @@
template<bool kTransactionActive>
inline void ArtField::SetByte(ObjPtr<mirror::Object> object, int8_t b) {
+ DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField();
FIELD_SET(object, Byte, b);
}
@@ -165,6 +171,12 @@
template<bool kTransactionActive>
inline void ArtField::SetChar(ObjPtr<mirror::Object> object, uint16_t c) {
+ if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both char and short fields.
+ Primitive::Type type = GetTypeAsPrimitiveType();
+ DCHECK(type == Primitive::kPrimChar || type == Primitive::kPrimShort) << PrettyField();
+ }
FIELD_SET(object, Char, c);
}
@@ -174,6 +186,7 @@
template<bool kTransactionActive>
inline void ArtField::SetShort(ObjPtr<mirror::Object> object, int16_t s) {
+ DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField();
FIELD_SET(object, Short, s);
}
@@ -182,6 +195,8 @@
inline int32_t ArtField::GetInt(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both int and float fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField();
}
@@ -191,6 +206,8 @@
template<bool kTransactionActive>
inline void ArtField::SetInt(ObjPtr<mirror::Object> object, int32_t i) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both int and float fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField();
}
@@ -199,6 +216,8 @@
inline int64_t ArtField::GetLong(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both long and double fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField();
}
@@ -208,6 +227,8 @@
template<bool kTransactionActive>
inline void ArtField::SetLong(ObjPtr<mirror::Object> object, int64_t j) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both long and double fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d7d39af..a3d9ba6 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -719,21 +719,7 @@
}
std::string ArtMethod::JniShortName() {
- std::string class_name(GetDeclaringClassDescriptor());
- // Remove the leading 'L' and trailing ';'...
- CHECK_EQ(class_name[0], 'L') << class_name;
- CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
- class_name.erase(0, 1);
- class_name.erase(class_name.size() - 1, 1);
-
- std::string method_name(GetName());
-
- std::string short_name;
- short_name += "Java_";
- short_name += MangleForJni(class_name);
- short_name += "_";
- short_name += MangleForJni(method_name);
- return short_name;
+ return GetJniShortName(GetDeclaringClassDescriptor(), GetName());
}
std::string ArtMethod::JniLongName() {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 25fd727..06c11f5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -254,5 +254,10 @@
return result;
}
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
+}
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 6a04f20..69ee3eb 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -191,6 +191,9 @@
Runtime::CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 6d17000..4544aef 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -55,261 +55,207 @@
return field;
}
-extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetByte(field->GetDeclaringClass());
+static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ // stub_test doesn't call this code with a proper frame, so get the outer, and if
+ // it does not have compiled code return it.
+ ArtMethod* outer = GetCalleeSaveOuterMethod(self, Runtime::kSaveRefsOnly);
+ if (outer->GetEntryPointFromQuickCompiledCode() == nullptr) {
+ return outer;
+ }
}
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetByte(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly).caller;
}
-extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
+#define ART_GET_FIELD_FROM_CODE(Kind, PrimitiveType, RetType, SetType, \
+ PrimitiveOrObject, IsObject, Ptr) \
+ extern "C" RetType artGet ## Kind ## StaticFromCode(uint32_t field_idx, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Static ## PrimitiveOrObject ## Read, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (field->GetDeclaringClass())Ptr; \
+ } \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Read, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (field->GetDeclaringClass())Ptr; \
+ } \
+ /* Will throw exception by checking with Thread::Current. */ \
+ return 0; \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## InstanceFromCode(uint32_t field_idx, \
+ mirror::Object* obj, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Instance ## PrimitiveOrObject ## Read, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr) && obj != nullptr) { \
+ return field->Get ## Kind (obj)Ptr; \
+ } \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Read, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType), &obj); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (obj)Ptr; \
+ } \
+ /* Will throw exception by checking with Thread::Current. */ \
+ return 0; \
+ } \
+ \
+ extern "C" int artSet ## Kind ## StaticFromCode(uint32_t field_idx, \
+ SetType new_value, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Static ## PrimitiveOrObject ## Write, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind <false>(field->GetDeclaringClass(), new_value); \
+ return 0; \
+ } \
+ if (IsObject) { \
+ StackHandleScope<1> hs(self); \
+ HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
+ reinterpret_cast<mirror::Object**>(&new_value))); \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ } else { \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ } \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind <false>(field->GetDeclaringClass(), new_value); \
+ return 0; \
+ } \
+ return -1; \
+ } \
+ \
+ extern "C" int artSet ## Kind ## InstanceFromCode(uint32_t field_idx, \
+ mirror::Object* obj, \
+ SetType new_value, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Instance ## PrimitiveOrObject ## Write, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr && obj != nullptr)) { \
+ field->Set ## Kind <false>(obj, new_value); \
+ return 0; \
+ } \
+ if (IsObject) { \
+ StackHandleScope<1> hs(self); \
+ HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
+ reinterpret_cast<mirror::Object**>(&new_value))); \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
+ field_idx, \
+ referrer, \
+ self, \
+ sizeof(PrimitiveType), \
+ &obj); \
+ } else { \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
+ field_idx, \
+ referrer, \
+ self, \
+ sizeof(PrimitiveType), \
+ &obj); \
+ } \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind<false>(obj, new_value); \
+ return 0; \
+ } \
+ return -1; \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## StaticFromCompiledCode( \
+ uint32_t field_idx, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artGet ## Kind ## StaticFromCode( \
+ field_idx, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## InstanceFromCompiledCode( \
+ uint32_t field_idx, \
+ mirror::Object* obj, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artGet ## Kind ## InstanceFromCode( \
+ field_idx, obj, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" int artSet ## Kind ## StaticFromCompiledCode( \
+ uint32_t field_idx, \
+ SetType new_value, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artSet ## Kind ## StaticFromCode( \
+ field_idx, new_value, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" int artSet ## Kind ## InstanceFromCompiledCode( \
+ uint32_t field_idx, \
+ mirror::Object* obj, \
+ SetType new_value, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artSet ## Kind ## InstanceFromCode( \
+ field_idx, obj, new_value, GetReferrer(self), self); \
+ }
+
+ART_GET_FIELD_FROM_CODE(Byte, int8_t, ssize_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Boolean, int8_t, size_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Short, int16_t, ssize_t, uint16_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Char, int16_t, size_t, uint16_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(32, int32_t, size_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(64, int64_t, uint64_t, uint64_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*,
+ mirror::Object*, Object, true, .Ptr())
+
+
+// To cut on the number of entrypoints, we have shared entries for
+// byte/boolean and char/short for setting an instance or static field. We just
+// forward those to the unsigned variant.
+extern "C" int artSet8StaticFromCompiledCode(uint32_t field_idx,
+ uint32_t new_value,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetBooleanStaticFromCode(field_idx, new_value, GetReferrer(self), self);
}
-extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetShort(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetShort(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetChar(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetChar(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- return field->Get32(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- return field->Get32(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- return field->Get64(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- return field->Get64(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- StaticObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass()).Ptr();
- }
- field = FindFieldFromCode<StaticObjectRead, true>(field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass()).Ptr();
- }
- return nullptr; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" ssize_t artGetByteInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
+extern "C" int artSet16StaticFromCompiledCode(uint32_t field_idx,
+ uint16_t new_value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetByte(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetByte(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetCharStaticFromCode(field_idx, new_value, GetReferrer(self), self);
}
-extern "C" size_t artGetBooleanInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetBoolean(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-extern "C" ssize_t artGetShortInstanceFromCode(uint32_t field_idx,
+extern "C" int artSet8InstanceFromCompiledCode(uint32_t field_idx,
mirror::Object* obj,
- ArtMethod* referrer,
+ uint8_t new_value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetShort(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetShort(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetBooleanInstanceFromCode(field_idx, obj, new_value, GetReferrer(self), self);
}
-extern "C" size_t artGetCharInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int artSet16InstanceFromCompiledCode(uint32_t field_idx,
+ mirror::Object* obj,
+ uint16_t new_value,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetChar(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetChar(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGet32InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->Get32(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int32_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->Get32(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->Get64(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int64_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->Get64(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- InstanceObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetObj(obj).Ptr();
- }
- field = FindInstanceField<InstanceObjectRead, true>(field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetObj(obj).Ptr();
- }
- return nullptr; // Will throw exception by checking with Thread::Current.
+ return artSetCharInstanceFromCode(field_idx, obj, new_value, GetReferrer(self), self);
}
extern "C" int artSet8StaticFromCode(uint32_t field_idx,
@@ -317,32 +263,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- return -1; // failure
+ return artSetBooleanStaticFromCode(field_idx, new_value, referrer, self);
}
extern "C" int artSet16StaticFromCode(uint32_t field_idx,
@@ -350,108 +271,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet32StaticFromCode(uint32_t field_idx,
- uint32_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet64StaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- uint64_t new_value,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSetObjStaticFromCode(uint32_t field_idx,
- mirror::Object* new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- StaticObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- if (LIKELY(!field->IsPrimitiveType())) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- }
- {
- StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&new_value));
- field = FindFieldFromCode<StaticObjectWrite, true>(
- field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- }
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
+ return artSetCharStaticFromCode(field_idx, new_value, referrer, self);
}
extern "C" int artSet8InstanceFromCode(uint32_t field_idx,
@@ -460,35 +280,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(obj, new_value);
- }
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(obj, new_value);
- } else {
- field->SetByte<false>(obj, new_value);
- }
- return 0; // success
- }
- return -1; // failure
+ return artSetBooleanInstanceFromCode(field_idx, obj, new_value, referrer, self);
}
extern "C" int artSet16InstanceFromCode(uint32_t field_idx,
@@ -497,126 +289,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(obj, new_value);
- }
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(obj, new_value);
- }
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet32InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- uint32_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int32_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet64InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- uint64_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int64_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0;
- }
- return -1; // failure
-}
-
-extern "C" int artSetObjInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- mirror::Object* new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0; // success
- }
- {
- StackHandleScope<2> hs(self);
- HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
- HandleWrapper<mirror::Object> h_new_value(hs.NewHandleWrapper(&new_value));
- field = FindFieldFromCode<InstanceObjectWrite, true>(
- field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- }
- if (LIKELY(field != nullptr)) {
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(field, false);
- } else {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0; // success
- }
- }
- return -1; // failure
+ return artSetCharInstanceFromCode(field_idx, obj, new_value, referrer, self);
}
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 071b0e2..0ceb23a 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -303,6 +303,7 @@
ArtMethod* owners_method,
uint32_t owners_dex_pc,
size_t num_waiters) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
const char* owners_filename;
int32_t owners_line_number = 0;
if (owners_method != nullptr) {
@@ -359,7 +360,7 @@
self->SetMonitorEnterObject(GetObject());
{
uint32_t original_owner_thread_id = 0u;
- ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
+ ScopedThreadSuspension tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
{
// Reacquire monitor_lock_ without mutator_lock_ for Wait.
MutexLock mu2(self, monitor_lock_);
@@ -367,22 +368,26 @@
original_owner_thread_id = owner_->GetThreadId();
if (ATRACE_ENABLED()) {
std::ostringstream oss;
- std::string name;
- owner_->GetThreadName(name);
- oss << PrettyContentionInfo(name,
- owner_->GetTid(),
- owners_method,
- owners_dex_pc,
- num_waiters);
- // Add info for contending thread.
- uint32_t pc;
- ArtMethod* m = self->GetCurrentMethod(&pc);
- const char* filename;
- int32_t line_number;
- TranslateLocation(m, pc, &filename, &line_number);
- oss << " blocking from "
- << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
- << ":" << line_number << ")";
+ {
+ // Reacquire mutator_lock_ for getting the location info.
+ ScopedObjectAccess soa(self);
+ std::string name;
+ owner_->GetThreadName(name);
+ oss << PrettyContentionInfo(name,
+ owner_->GetTid(),
+ owners_method,
+ owners_dex_pc,
+ num_waiters);
+ // Add info for contending thread.
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
+ const char* filename;
+ int32_t line_number;
+ TranslateLocation(m, pc, &filename, &line_number);
+ oss << " blocking from "
+ << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+ << ":" << line_number << ")";
+ }
ATRACE_BEGIN(oss.str().c_str());
}
monitor_contenders_.Wait(self); // Still contended so wait.
@@ -414,6 +419,8 @@
sample_percent = 100 * wait_ms / lock_profiling_threshold_;
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
+ // Reacquire mutator_lock_ for logging.
+ ScopedObjectAccess soa(self);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 67b2e1c..0d24587 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -90,7 +90,8 @@
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
jobject javaFd, jint bufferSize, jint flags,
- jboolean samplingEnabled, jint intervalUs) {
+ jboolean samplingEnabled, jint intervalUs,
+ jboolean streamingOutput) {
int originalFd = jniGetFDFromFileDescriptor(env, javaFd);
if (originalFd < 0) {
return;
@@ -108,7 +109,10 @@
if (traceFilename.c_str() == nullptr) {
return;
}
- Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
+ Trace::TraceOutputMode outputMode = streamingOutput
+ ? Trace::TraceOutputMode::kStreaming
+ : Trace::TraceOutputMode::kFile;
+ Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, outputMode,
samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
intervalUs);
}
@@ -547,7 +551,7 @@
NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZI)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V"),
NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"),
NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"),
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 417d104..c0c301fa 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -138,6 +138,7 @@
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -145,10 +146,12 @@
jint request_count,
const jthread* request_list,
jvmtiError* results) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -156,14 +159,17 @@
jint request_count,
const jthread* request_list,
jvmtiError* results) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError StopThread(jvmtiEnv* env, jthread thread, jobject exception) {
+ ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
@@ -175,6 +181,7 @@
jthread thread,
jint* owned_monitor_count_ptr,
jobject** owned_monitors_ptr) {
+ ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -182,12 +189,14 @@
jthread thread,
jint* monitor_info_count_ptr,
jvmtiMonitorStackDepthInfo** monitor_info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread,
jobject* monitor_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -271,6 +280,7 @@
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -283,30 +293,37 @@
}
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
+ ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env, jthread thread, jobject value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env, jthread thread, jint value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env, jthread thread, jlong value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env, jthread thread, jfloat value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env, jthread thread, jdouble value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -316,6 +333,7 @@
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(&gObjectTagTable);
return heap_util.FollowReferences(env,
heap_filter,
@@ -402,6 +420,7 @@
jobject object,
jvmtiObjectReferenceCallback object_reference_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -410,6 +429,7 @@
jvmtiStackReferenceCallback stack_ref_callback,
jvmtiObjectReferenceCallback object_ref_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -417,6 +437,7 @@
jvmtiHeapObjectFilter object_filter,
jvmtiHeapObjectCallback heap_object_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -425,6 +446,7 @@
jvmtiHeapObjectFilter object_filter,
jvmtiHeapObjectCallback heap_object_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -433,6 +455,7 @@
jint depth,
jint slot,
jobject* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -440,6 +463,7 @@
jthread thread,
jint depth,
jobject* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -448,6 +472,7 @@
jint depth,
jint slot,
jint* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -456,6 +481,7 @@
jint depth,
jint slot,
jlong* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -464,6 +490,7 @@
jint depth,
jint slot,
jfloat* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -472,6 +499,7 @@
jint depth,
jint slot,
jdouble* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -480,6 +508,7 @@
jint depth,
jint slot,
jobject value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -488,6 +517,7 @@
jint depth,
jint slot,
jint value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -496,6 +526,7 @@
jint depth,
jint slot,
jlong value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -504,6 +535,7 @@
jint depth,
jint slot,
jfloat value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -512,30 +544,37 @@
jint depth,
jint slot,
jdouble value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -563,6 +602,7 @@
}
static jvmtiError GetSourceFileName(jvmtiEnv* env, jclass klass, char** source_name_ptr) {
+ ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
@@ -603,6 +643,7 @@
jint* constant_pool_count_ptr,
jint* constant_pool_byte_count_ptr,
unsigned char** constant_pool_bytes_ptr) {
+ ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
@@ -629,10 +670,12 @@
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
jclass klass,
char** source_debug_extension_ptr) {
+ ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
+ ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
art::Runtime::Current(),
@@ -649,6 +692,7 @@
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
+ ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
art::Runtime::Current(),
@@ -673,6 +717,7 @@
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
jobject object,
jvmtiMonitorUsage* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -703,6 +748,7 @@
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr) {
+ ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return FieldUtil::IsFieldSynthetic(env, klass, field, is_synthetic_ptr);
}
@@ -742,6 +788,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
+ ENSURE_HAS_CAP(env, can_get_line_numbers);
return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
@@ -756,6 +803,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLocalVariableEntry** table_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -763,6 +811,7 @@
jmethodID method,
jint* bytecode_count_ptr,
unsigned char** bytecodes_ptr) {
+ ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
@@ -771,6 +820,7 @@
}
static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) {
+ ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr);
}
@@ -779,10 +829,12 @@
}
static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix) {
+ ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env, jint prefix_count, char** prefixes) {
+ ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -855,7 +907,6 @@
jthread event_thread,
...) {
ENSURE_VALID_ENV(env);
- // TODO: Check for capabilities.
art::Thread* art_thread = nullptr;
if (event_thread != nullptr) {
// TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
@@ -1053,18 +1104,22 @@
}
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTime(jvmtiEnv* env, jthread thread, jlong* nanos_ptr) {
+ ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 91353e2..106165c 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -146,7 +146,7 @@
.can_generate_field_modification_events = 0,
.can_generate_field_access_events = 0,
.can_get_bytecodes = 0,
- .can_get_synthetic_attribute = 0,
+ .can_get_synthetic_attribute = 1,
.can_get_owned_monitor_info = 0,
.can_get_current_contended_monitor = 0,
.can_get_monitor_info = 0,
@@ -154,7 +154,7 @@
.can_redefine_classes = 1,
.can_signal_thread = 0,
.can_get_source_file_name = 0,
- .can_get_line_numbers = 0,
+ .can_get_line_numbers = 1,
.can_get_source_debug_extension = 0,
.can_access_local_variables = 0,
.can_maintain_original_method_order = 0,
@@ -171,10 +171,10 @@
.can_generate_all_class_hook_events = 0,
.can_generate_compiled_method_load_events = 0,
.can_generate_monitor_events = 0,
- .can_generate_vm_object_alloc_events = 0,
+ .can_generate_vm_object_alloc_events = 1,
.can_generate_native_method_bind_events = 0,
- .can_generate_garbage_collection_events = 0,
- .can_generate_object_free_events = 0,
+ .can_generate_garbage_collection_events = 1,
+ .can_generate_object_free_events = 1,
.can_force_early_return = 0,
.can_get_owned_monitor_stack_depth_info = 0,
.can_get_constant_pool = 0,
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 1da08a0..34492a9 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -302,6 +302,64 @@
}
}
+// Checks to see if the env has the capabilities associated with the given event.
+static bool HasAssociatedCapability(ArtJvmTiEnv* env,
+ ArtJvmtiEvent event) {
+ jvmtiCapabilities caps = env->capabilities;
+ switch (event) {
+ case ArtJvmtiEvent::kBreakpoint:
+ return caps.can_generate_breakpoint_events == 1;
+
+ case ArtJvmtiEvent::kCompiledMethodLoad:
+ case ArtJvmtiEvent::kCompiledMethodUnload:
+ return caps.can_generate_compiled_method_load_events == 1;
+
+ case ArtJvmtiEvent::kException:
+ case ArtJvmtiEvent::kExceptionCatch:
+ return caps.can_generate_exception_events == 1;
+
+ case ArtJvmtiEvent::kFieldAccess:
+ return caps.can_generate_field_access_events == 1;
+
+ case ArtJvmtiEvent::kFieldModification:
+ return caps.can_generate_field_modification_events == 1;
+
+ case ArtJvmtiEvent::kFramePop:
+ return caps.can_generate_frame_pop_events == 1;
+
+ case ArtJvmtiEvent::kGarbageCollectionStart:
+ case ArtJvmtiEvent::kGarbageCollectionFinish:
+ return caps.can_generate_garbage_collection_events == 1;
+
+ case ArtJvmtiEvent::kMethodEntry:
+ return caps.can_generate_method_entry_events == 1;
+
+ case ArtJvmtiEvent::kMethodExit:
+ return caps.can_generate_method_exit_events == 1;
+
+ case ArtJvmtiEvent::kMonitorContendedEnter:
+ case ArtJvmtiEvent::kMonitorContendedEntered:
+ case ArtJvmtiEvent::kMonitorWait:
+ case ArtJvmtiEvent::kMonitorWaited:
+ return caps.can_generate_monitor_events == 1;
+
+ case ArtJvmtiEvent::kNativeMethodBind:
+ return caps.can_generate_native_method_bind_events == 1;
+
+ case ArtJvmtiEvent::kObjectFree:
+ return caps.can_generate_object_free_events == 1;
+
+ case ArtJvmtiEvent::kSingleStep:
+ return caps.can_generate_single_step_events == 1;
+
+ case ArtJvmtiEvent::kVmObjectAlloc:
+ return caps.can_generate_vm_object_alloc_events == 1;
+
+ default:
+ return true;
+ }
+}
+
jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
art::Thread* thread,
ArtJvmtiEvent event,
@@ -318,8 +376,6 @@
}
}
- // TODO: Capability check.
-
if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
return ERR(ILLEGAL_ARGUMENT);
}
@@ -328,6 +384,10 @@
return ERR(INVALID_EVENT_TYPE);
}
+ if (!HasAssociatedCapability(env, event)) {
+ return ERR(MUST_POSSESS_CAPABILITY);
+ }
+
bool old_state = global_mask.Test(event);
if (mode == JVMTI_ENABLE) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 410416e..80a427b 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -415,6 +415,22 @@
return result;
}
+std::string GetJniShortName(const std::string& class_descriptor, const std::string& method) {
+ // Remove the leading 'L' and trailing ';'...
+ std::string class_name(class_descriptor);
+ CHECK_EQ(class_name[0], 'L') << class_name;
+ CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
+ class_name.erase(0, 1);
+ class_name.erase(class_name.size() - 1, 1);
+
+ std::string short_name;
+ short_name += "Java_";
+ short_name += MangleForJni(class_name);
+ short_name += "_";
+ short_name += MangleForJni(method);
+ return short_name;
+}
+
// See http://java.sun.com/j2se/1.5.0/docs/guide/jni/spec/design.html#wp615 for the full rules.
std::string MangleForJni(const std::string& s) {
std::string result;
diff --git a/runtime/utils.h b/runtime/utils.h
index 9e663b3..5f53608 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -101,6 +101,8 @@
// of the JNI spec.
std::string MangleForJni(const std::string& s);
+std::string GetJniShortName(const std::string& class_name, const std::string& method_name);
+
// Turn "java.lang.String" into "Ljava/lang/String;".
std::string DotToDescriptor(const char* class_name);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index b4c6b45..afd1998 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -341,11 +341,14 @@
# Note 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without
# --prebuild --relocate
+# 934 & 935 are broken due to dex2dex issues and app-images
TEST_ART_BROKEN_NO_RELOCATE_TESTS := \
117-nopatchoat \
118-noimage-dex2oat \
119-noimage-patchoat \
- 554-jit-profile-file
+ 554-jit-profile-file \
+ 934-load-transform \
+ 935-non-retransformable \
ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -357,9 +360,12 @@
# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
# 629 requires compilation.
+# 934 & 935 are broken due to dex2dex issues and app-images
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
137-cfi \
- 629-vdex-speed
+ 629-vdex-speed \
+ 934-load-transform \
+ 935-non-retransformable \
ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -425,6 +431,7 @@
# 147-stripped-dex-fallback is disabled because it requires --prebuild.
# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
# 629-vdex-speed requires compiled code.
+# 934 & 935 are broken due to dex2dex issues and app-images
TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \
116-nodex2oat \
117-nopatchoat \
@@ -434,7 +441,9 @@
138-duplicate-classes-check2 \
147-stripped-dex-fallback \
554-jit-profile-file \
- 629-vdex-speed
+ 629-vdex-speed \
+ 934-load-transform \
+ 935-non-retransformable \
# This test fails without an image.
# 018, 961, 964 often time out. b/34369284
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 330f7e1..ed82bb0 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -301,11 +301,36 @@
}
}
-// TODO Write something useful.
extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRetransformation(JNIEnv* env,
jclass,
jobjectArray targets) {
- DoClassRetransformation(jvmti_env, env, targets);
+ jvmtiCapabilities caps;
+ jvmtiError caps_err = jvmti_env->GetCapabilities(&caps);
+ if (caps_err != JVMTI_ERROR_NONE) {
+ env->ThrowNew(env->FindClass("java/lang/Exception"),
+ "Unable to get current jvmtiEnv capabilities");
+ return;
+ }
+
+ // Allocate a new environment if we don't have the can_retransform_classes capability needed to
+ // call the RetransformClasses function.
+ jvmtiEnv* real_env = nullptr;
+ if (caps.can_retransform_classes != 1) {
+ JavaVM* vm = nullptr;
+ if (env->GetJavaVM(&vm) != 0 ||
+ vm->GetEnv(reinterpret_cast<void**>(&real_env), JVMTI_VERSION_1_0) != 0) {
+ env->ThrowNew(env->FindClass("java/lang/Exception"),
+ "Unable to create temporary jvmtiEnv for RetransformClasses call.");
+ return;
+ }
+ SetAllCapabilities(real_env);
+ } else {
+ real_env = jvmti_env;
+ }
+ DoClassRetransformation(real_env, env, targets);
+ if (caps.can_retransform_classes != 1) {
+ real_env->DisposeEnvironment();
+ }
}
// Get all capabilities except those related to retransformation.
@@ -372,15 +397,29 @@
LOG(FATAL) << "Could not get methods";
}
- ArtMethod* m = jni::DecodeArtMethod(method);
-
std::string names[2];
- {
+ if (IsJVM()) {
+ // TODO Get the JNI long name
+ char* klass_name;
+ jvmtiError klass_result = jenv->GetClassSignature(klass, &klass_name, nullptr);
+ if (klass_result == JVMTI_ERROR_NONE) {
+ std::string name_str(name);
+ std::string klass_str(klass_name);
+ names[0] = GetJniShortName(klass_str, name_str);
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(klass_name));
+ } else {
+ LOG(FATAL) << "Could not get class name!";
+ }
+ } else {
ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* m = jni::DecodeArtMethod(method);
names[0] = m->JniShortName();
names[1] = m->JniLongName();
}
for (const std::string& mangled_name : names) {
+ if (mangled_name == "") {
+ continue;
+ }
void* sym = dlsym(RTLD_DEFAULT, mangled_name.c_str());
if (sym == nullptr) {
continue;
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index c1cdf1e..78f73f5 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -98,7 +98,7 @@
Timed Out - mutated files that timed out for one or more backends.
Current timeouts are:
Optimizing - 5 seconds
- Intepreter - 30 seconds
+ Interpreter - 30 seconds
(use --short-timeouts to set all backends to 2 seconds.)
Successful - mutated files that executed and all backends agreed on the resulting
output. NB: if all backends crashed with the same output, this would
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index 50c4f20..34a92f6 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -19,7 +19,7 @@
import shutil
import sys
-from subprocess import check_call
+from subprocess import call
from tempfile import mkdtemp
sys.path.append(os.path.dirname(os.path.dirname(
@@ -75,6 +75,9 @@
top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
self._dexfuzz_env['PATH'] = (top + '/art/tools/bisection_search:' +
self._dexfuzz_env['PATH'])
+ android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
+ self._dexfuzz_env['ANDROID_ROOT'] = android_root
+ self._dexfuzz_env['LD_LIBRARY_PATH'] = android_root + '/lib'
os.chdir(self._dexfuzz_dir)
os.mkdir('divergent_programs')
os.mkdir('bisection_outputs')
@@ -119,24 +122,30 @@
def RunDexFuzz(self):
"""Starts the DexFuzz testing."""
os.chdir(self._dexfuzz_dir)
- dexfuzz_args = ['--inputs=' + self._inputs_dir, '--execute',
- '--execute-class=Test', '--repeat=' + str(self._num_tests),
- '--dump-output', '--interpreter', '--optimizing',
+ dexfuzz_args = ['--inputs=' + self._inputs_dir,
+ '--execute',
+ '--execute-class=Test',
+ '--repeat=' + str(self._num_tests),
+ '--dump-output', '--dump-verify',
+ '--interpreter', '--optimizing',
'--bisection-search']
if self._device is not None:
dexfuzz_args += ['--device=' + self._device, '--allarm']
else:
dexfuzz_args += ['--host'] # Assume host otherwise.
- check_call(['dexfuzz'] + dexfuzz_args, env=self._dexfuzz_env)
- # TODO: summarize findings.
+ cmd = ['dexfuzz'] + dexfuzz_args
+ print('**** Running ****\n\n', cmd, '\n')
+ call(cmd, env=self._dexfuzz_env)
+ print('\n**** Results (report.log) ****\n')
+ call(['tail', '-n 24', 'report.log'])
def main():
# Handle arguments.
parser = argparse.ArgumentParser()
- parser.add_argument('--num_tests', default=10000,
+ parser.add_argument('--num_tests', default=1000,
type=int, help='number of tests to run')
- parser.add_argument('--num_inputs', default=50,
+ parser.add_argument('--num_inputs', default=10,
type=int, help='number of JFuzz program to generate')
parser.add_argument('--device', help='target device serial number')
args = parser.parse_args()
diff --git a/tools/jfuzz/run_jfuzz_test_nightly.py b/tools/jfuzz/run_jfuzz_test_nightly.py
index 29595f2..a9f8365 100755
--- a/tools/jfuzz/run_jfuzz_test_nightly.py
+++ b/tools/jfuzz/run_jfuzz_test_nightly.py
@@ -26,9 +26,6 @@
from tempfile import mkdtemp
from tempfile import TemporaryFile
-# Default arguments for run_jfuzz_test.py.
-DEFAULT_ARGS = ['--num_tests=20000']
-
# run_jfuzz_test.py success string.
SUCCESS_STRING = 'success (no divergences)'
@@ -36,17 +33,22 @@
NOT_FOUND = -1
def main(argv):
+ # Set up.
cwd = os.path.dirname(os.path.realpath(__file__))
- cmd = [cwd + '/run_jfuzz_test.py'] + DEFAULT_ARGS
+ cmd = [cwd + '/run_jfuzz_test.py']
parser = argparse.ArgumentParser()
parser.add_argument('--num_proc', default=8,
type=int, help='number of processes to run')
# Unknown arguments are passed to run_jfuzz_test.py.
(args, unknown_args) = parser.parse_known_args()
+ # Run processes.
+ cmd = cmd + unknown_args
+ print('\n**** Running ****\n\n', cmd, '\n')
output_files = [TemporaryFile('wb+') for _ in range(args.num_proc)]
processes = []
- for output_file in output_files:
- processes.append(subprocess.Popen(cmd + unknown_args, stdout=output_file,
+ for i, output_file in enumerate(output_files):
+ print('Tester', i)
+ processes.append(subprocess.Popen(cmd, stdout=output_file,
stderr=subprocess.STDOUT))
try:
# Wait for processes to terminate.
@@ -56,6 +58,7 @@
for proc in processes:
proc.kill()
# Output results.
+ print('\n**** Results ****\n')
output_dirs = []
for i, output_file in enumerate(output_files):
output_file.seek(0)
@@ -65,20 +68,24 @@
directory_match = re.search(r'Directory[^:]*: ([^\n]+)\n', output_str)
if directory_match:
output_dirs.append(directory_match.group(1))
- print('Tester', i)
if output_str.find(SUCCESS_STRING) == NOT_FOUND:
- print(output_str)
+ print('Tester', i, output_str)
else:
- print(SUCCESS_STRING)
+ print('Tester', i, SUCCESS_STRING)
# Gather divergences.
global_out_dir = mkdtemp('jfuzz_nightly')
- divergence_nr = 1
+ divergence_nr = 0
for out_dir in output_dirs:
for divergence_dir in glob(out_dir + '/divergence*/'):
+ divergence_nr += 1
shutil.copytree(divergence_dir,
global_out_dir + '/divergence' + str(divergence_nr))
- divergence_nr += 1
- print('Global output directory:', global_out_dir)
+ if divergence_nr > 0:
+ print('\n!!!! Divergences !!!!', divergence_nr)
+ else:
+ print ('\nSuccess')
+ print('\nGlobal output directory:', global_out_dir)
+ print()
if __name__ == '__main__':
main(sys.argv)