Merge changes I14c7cddd,I4a39d0e8,I3f24c8f4,I9699f138
* changes:
Replace StringPiece with std::string_view in art/runtime/.
Replace StringPiece with std::string_view in HashSet.
Replace StringPiece with std::string_view in Signature.
Replace StringPiece with std::string_view in class.h.
diff --git a/Android.mk b/Android.mk
index 526cd59..1a7ed43 100644
--- a/Android.mk
+++ b/Android.mk
@@ -352,13 +352,18 @@
# Module with both release and debug variants, as well as
# additional tools.
TARGET_RUNTIME_APEX := com.android.runtime.debug
+ APEX_TEST_MODULE := art-check-debug-apex-gen-fakebin
else
# Release module (without debug variants nor tools).
TARGET_RUNTIME_APEX := com.android.runtime.release
+ APEX_TEST_MODULE := art-check-release-apex-gen-fakebin
endif
LOCAL_MODULE := com.android.runtime
LOCAL_REQUIRED_MODULES := $(TARGET_RUNTIME_APEX)
+ifneq ($(HOST_OS),darwin)
+ LOCAL_REQUIRED_MODULES += $(APEX_TEST_MODULE)
+endif
# Clear locally used variable.
art_target_include_debug_build :=
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 4a6637b..e3e5b6e 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -230,3 +230,72 @@
},
},
}
+
+python_binary_host {
+ name: "art-apex-tester",
+ srcs: ["art_apex_test.py"],
+ main: "art_apex_test.py",
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ },
+ },
+}
+
+// Genrules so we can run the checker, and empty Java library so that it gets executed.
+
+genrule {
+ name: "art-check-release-apex-gen",
+ srcs: [":com.android.runtime.release"],
+ tools: [
+ "art-apex-tester",
+ "debugfs",
+ ],
+ cmd: "$(location art-apex-tester)"
+ + " --debugfs $(location debugfs)"
+ + " --tmpdir $(genDir)"
+ + " $(in)"
+ + " && touch $(out)",
+ out: ["art-check-release-apex-gen.dummy"],
+}
+cc_prebuilt_binary {
+ name: "art-check-release-apex-gen-fakebin",
+ srcs: [":art-check-release-apex-gen"],
+ host_supported: true,
+ device_supported: false,
+ target: {
+ darwin: {
+ enabled: false, // No python3.
+ },
+ },
+}
+
+genrule {
+ name: "art-check-debug-apex-gen",
+ srcs: [":com.android.runtime.debug"],
+ tools: [
+ "art-apex-tester",
+ "debugfs",
+ ],
+ cmd: "$(location art-apex-tester)"
+ + " --debugfs $(location debugfs)"
+ + " --tmpdir $(genDir)"
+ + " --debug"
+ + " $(in)"
+ + " && touch $(out)",
+ out: ["art-check-debug-apex-gen.dummy"],
+}
+cc_prebuilt_binary {
+ name: "art-check-debug-apex-gen-fakebin",
+ srcs: [":art-check-debug-apex-gen"],
+ host_supported: true,
+ device_supported: false,
+ target: {
+ darwin: {
+ enabled: false, // No python3.
+ },
+ },
+}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0d35fec..4d7ae9b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -99,7 +99,10 @@
}
}
}
+
if (instruction_set_features == nullptr) {
+ // '--instruction-set-features/--instruction-set-variant' were not used.
+ // Use build-time defined features.
instruction_set_features = InstructionSetFeatures::FromCppDefines();
}
compiler_options_->instruction_set_features_ = std::move(instruction_set_features);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 2721cb5..2de0f0c 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "base/utils.h"
#include "class_linker.h"
+#include "class_root.h"
#include "dex/invoke_type.h"
#include "driver/compiler_options.h"
#include "gc/space/image_space.h"
@@ -362,4 +363,13 @@
return info;
}
+void IntrinsicVisitor::AssertNonMovableStringClass() {
+ if (kIsDebugBuild) {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ ObjPtr<mirror::Class> string_class = GetClassRoot<art::mirror::String>();
+ CHECK(!art::Runtime::Current()->GetHeap()->IsMovableObject(string_class));
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 50b13c8..ab68cce 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -142,6 +142,8 @@
protected:
IntrinsicVisitor() {}
+ static void AssertNonMovableStringClass();
+
private:
DISALLOW_COPY_AND_ASSIGN(IntrinsicVisitor);
};
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ca790f6..ec5d17a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1464,8 +1464,16 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // /* HeapReference<Class> */ temp = str->klass_
__ Ldr(temp, MemOperand(str.X(), class_offset));
+ // /* HeapReference<Class> */ temp1 = arg->klass_
__ Ldr(temp1, MemOperand(arg.X(), class_offset));
+ // Also, because we use the previously loaded class references only in the
+ // following comparison, we don't need to unpoison them.
__ Cmp(temp, temp1);
__ B(&return_false, ne);
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 396ff62..f0aa92e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1529,8 +1529,16 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // /* HeapReference<Class> */ temp = str->klass_
__ Ldr(temp, MemOperand(str, class_offset));
+ // /* HeapReference<Class> */ out = arg->klass_
__ Ldr(out, MemOperand(arg, class_offset));
+ // Also, because we use the previously loaded class references only in the
+ // following comparison, we don't need to unpoison them.
__ Cmp(temp, out);
__ B(ne, &return_false, /* is_far_target= */ false);
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5b35974..3da0e57 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1575,8 +1575,16 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // /* HeapReference<Class> */ temp1 = str->klass_
__ Lw(temp1, str, class_offset);
+ // /* HeapReference<Class> */ temp2 = arg->klass_
__ Lw(temp2, arg, class_offset);
+ // Also, because we use the previously loaded class references only in the
+ // following comparison, we don't need to unpoison them.
__ Bne(temp1, temp2, &return_false);
}
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index afaa4ca..3e68765 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1429,8 +1429,16 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // /* HeapReference<Class> */ temp1 = str->klass_
__ Lw(temp1, str, class_offset);
+ // /* HeapReference<Class> */ temp2 = arg->klass_
__ Lw(temp2, arg, class_offset);
+ // Also, because we use the previously loaded class references only in the
+ // following comparison, we don't need to unpoison them.
__ Bnec(temp1, temp2, &return_false);
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8747f06..de697f0 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1072,7 +1072,15 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // Also, because we use the loaded class references only to compare them, we
+ // don't need to unpoison them.
+ // /* HeapReference<Class> */ ecx = str->klass_
__ movl(ecx, Address(str, class_offset));
+ // if (ecx != /* HeapReference<Class> */ arg->klass_) return false
__ cmpl(ecx, Address(arg, class_offset));
__ j(kNotEqual, &return_false);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 167c1d8..e79c0c9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1275,7 +1275,15 @@
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
+ //
+ // As the String class is expected to be non-movable, we can read the class
+ // field from String.equals' arguments without read barriers.
+ AssertNonMovableStringClass();
+ // Also, because we use the loaded class references only to compare them, we
+ // don't need to unpoison them.
+ // /* HeapReference<Class> */ rcx = str->klass_
__ movl(rcx, Address(str, class_offset));
+ // if (rcx != /* HeapReference<Class> */ arg->klass_) return false
__ cmpl(rcx, Address(arg, class_offset));
__ j(kNotEqual, &return_false);
}
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index dee83d1..841ff1c 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -115,12 +115,11 @@
? free_by_size_.end()
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
- auto entry = it->free_by_start_entry;
- SpaceChunk old_chunk = *entry;
+ SpaceChunk old_chunk = *it->free_by_start_entry;
if (old_chunk.size == size) {
RemoveChunk(it);
} else {
- // Try to avoid deallocating and allocating the std::set<> nodes.
+ // Avoid deallocating and allocating the std::set<> nodes.
// This would be much simpler if we could use replace() from Boost.Bimap.
// The free_by_start_ map contains disjoint intervals ordered by the `ptr`.
@@ -128,24 +127,9 @@
it->free_by_start_entry->ptr += size;
it->free_by_start_entry->size -= size;
- // The free_by_size_ map is ordered by the `size` and then `free_by_start_entry->ptr`.
- // Adjusting the `ptr` above does not change that ordering but decreasing `size` can
- // push the node before the previous node(s).
- if (it == free_by_size_.begin()) {
- it->size -= size;
- } else {
- auto prev = it;
- --prev;
- FreeBySizeEntry new_value(old_chunk.size - size, entry);
- if (free_by_size_.key_comp()(*prev, new_value)) {
- it->size -= size;
- } else {
- // Changing in place would break the std::set<> ordering, we need to remove and insert.
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- free_by_size_.erase(it);
- free_by_size_.insert(new_value);
- }
- }
+ auto node = free_by_size_.extract(it);
+ node.value().size -= size;
+ free_by_size_.insert(std::move(node));
}
return old_chunk.ptr;
} else {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 808ad6c..ad1dda4 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -296,6 +296,10 @@
UsageError(" Default: arm");
UsageError("");
UsageError(" --instruction-set-features=...,: Specify instruction set features");
+ UsageError(" On target the value 'runtime' can be used to detect features at run time.");
+ UsageError(" If target does not support run-time detection the value 'runtime'");
+ UsageError(" has the same effect as the value 'default'.");
+ UsageError(" Note: the value 'runtime' has no effect if it is used on host.");
UsageError(" Example: --instruction-set-features=div");
UsageError(" Default: default");
UsageError("");
@@ -875,9 +879,9 @@
oat_unstripped_ = std::move(parser_options->oat_symbols);
}
- // If no instruction set feature was given, use the default one for the target
- // instruction set.
- if (compiler_options_->instruction_set_features_.get() == nullptr) {
+ if (compiler_options_->instruction_set_features_ == nullptr) {
+ // '--instruction-set-features/--instruction-set-variant' were not used.
+ // Use features for the 'default' variant.
compiler_options_->instruction_set_features_ = InstructionSetFeatures::FromVariant(
compiler_options_->instruction_set_, "default", &parser_options->error_msg);
if (compiler_options_->instruction_set_features_ == nullptr) {
@@ -890,9 +894,9 @@
std::unique_ptr<const InstructionSetFeatures> runtime_features(
InstructionSetFeatures::FromCppDefines());
if (!compiler_options_->GetInstructionSetFeatures()->Equals(runtime_features.get())) {
- LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
+ LOG(WARNING) << "Mismatch between dex2oat instruction set features to use ("
<< *compiler_options_->GetInstructionSetFeatures()
- << ") and those of dex2oat executable (" << *runtime_features
+ << ") and those from CPP defines (" << *runtime_features
<< ") for the command line:\n" << CommandLine();
}
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 524bce0..d3bfb57 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <algorithm>
#include <regex>
#include <sstream>
#include <string>
@@ -28,6 +29,7 @@
#include "common_runtime_test.h"
+#include "arch/instruction_set_features.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "base/utils.h"
@@ -2315,4 +2317,38 @@
}));
}
+class Dex2oatISAFeaturesRuntimeDetectionTest : public Dex2oatTest {
+ protected:
+ void RunTest(const std::vector<std::string>& extra_args = {}) {
+ std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar";
+ std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex";
+
+ Copy(GetTestDexFileName(), dex_location);
+
+ ASSERT_TRUE(GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kSpeed,
+ extra_args));
+ }
+
+ std::string GetTestDexFileName() {
+ return GetDexSrc1();
+ }
+};
+
+TEST_F(Dex2oatISAFeaturesRuntimeDetectionTest, TestCurrentRuntimeFeaturesAsDex2OatArguments) {
+ std::vector<std::string> argv;
+ Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+ auto option_pos =
+ std::find(std::begin(argv), std::end(argv), "--instruction-set-features=runtime");
+ if (InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+ EXPECT_TRUE(kIsTargetBuild);
+ EXPECT_NE(option_pos, std::end(argv));
+ } else {
+ EXPECT_EQ(option_pos, std::end(argv));
+ }
+
+ RunTest();
+}
+
} // namespace art
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 2833750..ba2a7c6 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -796,13 +796,13 @@
// Shrink the reservation MemMap and update its `gMaps` entry.
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
auto it = GetGMapsEntry(*this);
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- gMaps->erase(it);
+ auto node = gMaps->extract(it);
begin_ += byte_count;
size_ -= byte_count;
base_begin_ = begin_;
base_size_ = size_;
- gMaps->emplace(base_begin_, this);
+ node.key() = base_begin_;
+ gMaps->insert(std::move(node));
}
}
@@ -1266,9 +1266,9 @@
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
if (base_begin < aligned_base_begin) {
auto it = GetGMapsEntry(*this);
- // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
- gMaps->erase(it);
- gMaps->insert(std::make_pair(aligned_base_begin, this));
+ auto node = gMaps->extract(it);
+ node.key() = aligned_base_begin;
+ gMaps->insert(std::move(node));
}
base_begin_ = aligned_base_begin;
base_size_ = aligned_base_size;
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index fcf3c75..fdf4dbd 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -319,8 +319,9 @@
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
bool has_armv8a = has_armv8a_;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = android::base::Trim(*i);
+ for (const std::string& feature : features) {
+ DCHECK_EQ(android::base::Trim(feature), feature)
+ << "Feature name is not trimmed: '" << feature << "'";
if (feature == "div") {
has_div = true;
} else if (feature == "-div") {
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 4a2b9d5..196f358 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -315,8 +315,9 @@
bool has_lse = has_lse_;
bool has_fp16 = has_fp16_;
bool has_dotprod = has_dotprod_;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = android::base::Trim(*i);
+ for (const std::string& feature : features) {
+ DCHECK_EQ(android::base::Trim(feature), feature)
+ << "Feature name is not trimmed: '" << feature << "'";
if (feature == "a53") {
is_a53 = true;
} else if (feature == "-a53") {
@@ -367,4 +368,17 @@
has_dotprod));
}
+std::unique_ptr<const InstructionSetFeatures>
+Arm64InstructionSetFeatures::AddRuntimeDetectedFeatures(
+ const InstructionSetFeatures *features) const {
+ const Arm64InstructionSetFeatures *arm64_features = features->AsArm64InstructionSetFeatures();
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new Arm64InstructionSetFeatures(fix_cortex_a53_835769_,
+ fix_cortex_a53_843419_,
+ arm64_features->has_crc_,
+ arm64_features->has_lse_,
+ arm64_features->has_fp16_,
+ arm64_features->has_dotprod_));
+}
+
} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 4ec8fa2..432b9ef 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -98,6 +98,9 @@
AddFeaturesFromSplitString(const std::vector<std::string>& features,
std::string* error_msg) const override;
+ std::unique_ptr<const InstructionSetFeatures>
+ AddRuntimeDetectedFeatures(const InstructionSetFeatures *features) const override;
+
private:
Arm64InstructionSetFeatures(bool needs_a53_835769_fix,
bool needs_a53_843419_fix,
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 99d6b0d..eef8f08 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -170,4 +170,54 @@
EXPECT_EQ(armv8_2a_cpu_features->AsBitmap(), 14U);
}
+TEST(Arm64InstructionSetFeaturesTest, IsRuntimeDetectionSupported) {
+ if (kRuntimeISA == InstructionSet::kArm64) {
+ EXPECT_TRUE(InstructionSetFeatures::IsRuntimeDetectionSupported());
+ }
+}
+
+TEST(Arm64InstructionSetFeaturesTest, FeaturesFromRuntimeDetection) {
+ if (kRuntimeISA != InstructionSet::kArm64) {
+ return;
+ }
+
+ std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+ InstructionSetFeatures::FromHwcap());
+ std::unique_ptr<const InstructionSetFeatures> runtime_detected_features(
+ InstructionSetFeatures::FromRuntimeDetection());
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ EXPECT_NE(runtime_detected_features, nullptr);
+ EXPECT_TRUE(InstructionSetFeatures::IsRuntimeDetectionSupported());
+ EXPECT_TRUE(runtime_detected_features->Equals(hwcap_features.get()));
+ EXPECT_TRUE(runtime_detected_features->HasAtLeast(cpp_defined_features.get()));
+}
+
+TEST(Arm64InstructionSetFeaturesTest, AddFeaturesFromStringRuntime) {
+ std::unique_ptr<const InstructionSetFeatures> features(
+ InstructionSetFeatures::FromBitmap(InstructionSet::kArm64, 0x0));
+ std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+ InstructionSetFeatures::FromHwcap());
+
+ std::string error_msg;
+ features = features->AddFeaturesFromString("runtime", &error_msg);
+
+ EXPECT_NE(features, nullptr);
+ EXPECT_TRUE(error_msg.empty());
+
+ if (kRuntimeISA == InstructionSet::kArm64) {
+ EXPECT_TRUE(features->Equals(hwcap_features.get()));
+ EXPECT_EQ(features->GetFeatureString(), hwcap_features->GetFeatureString());
+ }
+
+ std::unique_ptr<const InstructionSetFeatures> a53_features(
+ Arm64InstructionSetFeatures::FromVariant("cortex-a53", &error_msg));
+ features = a53_features->AddFeaturesFromString("runtime", &error_msg);
+ EXPECT_NE(features, nullptr);
+ EXPECT_TRUE(error_msg.empty()) << error_msg;
+ const Arm64InstructionSetFeatures *arm64_features = features->AsArm64InstructionSetFeatures();
+ EXPECT_TRUE(arm64_features->NeedFixCortexA53_835769());
+ EXPECT_TRUE(arm64_features->NeedFixCortexA53_843419());
+}
+
} // namespace art
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 886b40a..c5c2d31 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <algorithm>
+
#include "instruction_set_features.h"
#include <algorithm>
@@ -113,6 +115,16 @@
UNREACHABLE();
}
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromRuntimeDetection() {
+ switch (kRuntimeISA) {
+#ifdef ART_TARGET_ANDROID
+ case InstructionSet::kArm64:
+ return Arm64InstructionSetFeatures::FromHwcap();
+#endif
+ default:
+ return nullptr;
+ }
+}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
switch (kRuntimeISA) {
@@ -184,44 +196,57 @@
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
- const std::string& feature_list, std::string* error_msg) const {
- if (feature_list.empty()) {
- *error_msg = "No instruction set features specified";
- return std::unique_ptr<const InstructionSetFeatures>();
- }
+ const std::string& feature_list, /* out */ std::string* error_msg) const {
std::vector<std::string> features;
Split(feature_list, ',', &features);
- bool use_default = false; // Have we seen the 'default' feature?
- bool first = false; // Is this first feature?
- for (auto it = features.begin(); it != features.end();) {
- if (use_default) {
- *error_msg = "Unexpected instruction set features after 'default'";
- return std::unique_ptr<const InstructionSetFeatures>();
- }
- std::string feature = android::base::Trim(*it);
- bool erase = false;
- if (feature == "default") {
- if (!first) {
- use_default = true;
- erase = true;
- } else {
- *error_msg = "Unexpected instruction set features before 'default'";
- return std::unique_ptr<const InstructionSetFeatures>();
- }
- }
- if (!erase) {
- ++it;
- } else {
- it = features.erase(it);
- }
- first = true;
+ std::transform(std::begin(features), std::end(features), std::begin(features),
+ [](const std::string &s) { return android::base::Trim(s); });
+ auto empty_strings_begin = std::copy_if(std::begin(features), std::end(features),
+ std::begin(features),
+ [](const std::string& s) { return !s.empty(); });
+ features.erase(empty_strings_begin, std::end(features));
+ if (features.empty()) {
+ *error_msg = "No instruction set features specified";
+ return nullptr;
}
- // Expectation: "default" is standalone, no other flags. But an empty features vector after
- // processing can also come along if the handled flags are the only ones in the list. So
- // logically, we check "default -> features.empty."
- DCHECK(!use_default || features.empty());
- return AddFeaturesFromSplitString(features, error_msg);
+ bool use_default = false;
+ bool use_runtime_detection = false;
+ for (const std::string& feature : features) {
+ if (feature == "default") {
+ if (features.size() > 1) {
+ *error_msg = "Specific instruction set feature(s) cannot be used when 'default' is used.";
+ return nullptr;
+ }
+ use_default = true;
+ features.pop_back();
+ break;
+ } else if (feature == "runtime") {
+ if (features.size() > 1) {
+ *error_msg = "Specific instruction set feature(s) cannot be used when 'runtime' is used.";
+ return nullptr;
+ }
+ use_runtime_detection = true;
+ features.pop_back();
+ break;
+ }
+ }
+ // Expectation: "default" and "runtime" are standalone, no other feature names.
+ // But an empty features vector after processing can also come along if the
+ // handled feature names are the only ones in the list. So
+ // logically, we check "default or runtime => features.empty."
+ DCHECK((!use_default && !use_runtime_detection) || features.empty());
+
+ std::unique_ptr<const InstructionSetFeatures> runtime_detected_features;
+ if (use_runtime_detection) {
+ runtime_detected_features = FromRuntimeDetection();
+ }
+
+ if (runtime_detected_features != nullptr) {
+ return AddRuntimeDetectedFeatures(runtime_detected_features.get());
+ } else {
+ return AddFeaturesFromSplitString(features, error_msg);
+ }
}
const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
@@ -262,6 +287,12 @@
return std::find(begin, end, variant) != end;
}
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddRuntimeDetectedFeatures(
+ const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const {
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+}
+
std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
return os;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index f910a41..9222a7b 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -48,6 +48,20 @@
// Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
static std::unique_ptr<const InstructionSetFeatures> FromCppDefines();
+ // Check if run-time detection of instruction set features is supported.
+ //
+ // Return: true - if run-time detection is supported on a target device.
+ // false - otherwise
+ static bool IsRuntimeDetectionSupported() {
+ return FromRuntimeDetection() != nullptr;
+ }
+
+ // Use run-time detection to get instruction set features.
+ //
+ // Return: a set of detected features or nullptr if runtime detection is not
+ // supported on a target.
+ static std::unique_ptr<const InstructionSetFeatures> FromRuntimeDetection();
+
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
static std::unique_ptr<const InstructionSetFeatures> FromCpuInfo();
@@ -126,6 +140,10 @@
AddFeaturesFromSplitString(const std::vector<std::string>& features,
std::string* error_msg) const = 0;
+ // Add run-time detected architecture specific features in sub-classes.
+ virtual std::unique_ptr<const InstructionSetFeatures>
+ AddRuntimeDetectedFeatures(const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const;
+
private:
DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
};
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index 3a39a2a..d9b2e3f 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <array>
+
#include "instruction_set_features.h"
#include <gtest/gtest.h>
@@ -161,4 +163,145 @@
<< "\nFeatures from build: " << *instruction_set_features.get();
}
+TEST(InstructionSetFeaturesTest, FeaturesFromRuntimeDetection) {
+ if (!InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+ EXPECT_EQ(InstructionSetFeatures::FromRuntimeDetection(), nullptr);
+ }
+}
+
+// The instruction set feature string must not contain 'default' together with
+// other feature names.
+//
+// Test that InstructionSetFeatures::AddFeaturesFromString returns nullptr and
+// an error is reported when the value 'default' is specified together
+// with other feature names in an instruction set feature string.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringWithDefaultAndOtherNames) {
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ std::vector<std::string> invalid_feature_strings = {
+ "a,default",
+ "default,a",
+ "a,default,b",
+ "a,b,default",
+ "default,a,b,c",
+ "a,b,default,c,d",
+ "a, default ",
+ " default , a",
+ "a, default , b",
+ "default,runtime"
+ };
+
+ for (const std::string& invalid_feature_string : invalid_feature_strings) {
+ std::string error_msg;
+ EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+ nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+ EXPECT_EQ(error_msg,
+ "Specific instruction set feature(s) cannot be used when 'default' is used.");
+ }
+}
+
+// The instruction set feature string must not contain 'runtime' together with
+// other feature names.
+//
+// Test that InstructionSetFeatures::AddFeaturesFromString returns nullptr and
+// an error is reported when the value 'runtime' is specified together
+// with other feature names in an instruction set feature string.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringWithRuntimeAndOtherNames) {
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ std::vector<std::string> invalid_feature_strings = {
+ "a,runtime",
+ "runtime,a",
+ "a,runtime,b",
+ "a,b,runtime",
+ "runtime,a,b,c",
+ "a,b,runtime,c,d",
+ "a, runtime ",
+ " runtime , a",
+ "a, runtime , b",
+ "runtime,default"
+ };
+
+ for (const std::string& invalid_feature_string : invalid_feature_strings) {
+ std::string error_msg;
+ EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+ nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+ EXPECT_EQ(error_msg,
+ "Specific instruction set feature(s) cannot be used when 'runtime' is used.");
+ }
+}
+
+// Spaces and multiple commas are ignores in a instruction set feature string.
+//
+// Test that a use of spaces and multiple commas with 'default' and 'runtime'
+// does not cause errors.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromValidStringContainingDefaultOrRuntime) {
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ std::vector<std::string> valid_feature_strings = {
+ "default",
+ ",,,default",
+ "default,,,,",
+ ",,,default,,,,",
+ "default, , , ",
+ " , , ,default",
+ " , , ,default, , , ",
+ " default , , , ",
+ ",,,runtime",
+ "runtime,,,,",
+ ",,,runtime,,,,",
+ "runtime, , , ",
+ " , , ,runtime",
+ " , , ,runtime, , , ",
+ " runtime , , , "
+ };
+ for (const std::string& valid_feature_string : valid_feature_strings) {
+ std::string error_msg;
+ EXPECT_NE(cpp_defined_features->AddFeaturesFromString(valid_feature_string, &error_msg),
+ nullptr) << " Valid feature string: '" << valid_feature_string << "'";
+ EXPECT_TRUE(error_msg.empty()) << error_msg;
+ }
+}
+
+// Spaces and multiple commas are ignores in a instruction set feature string.
+//
+// Test that a use of spaces and multiple commas without any feature names
+// causes errors.
+TEST(InstructionSetFeaturesTest, AddFeaturesFromInvalidStringWithoutFeatureNames) {
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ std::vector<std::string> invalid_feature_strings = {
+ " ",
+ " ",
+ ",",
+ ",,",
+ " , , ,,,,,,",
+ "\t",
+ " \t ",
+ ",",
+ ",,",
+ " , , ,,,,,,"
+ };
+ for (const std::string& invalid_feature_string : invalid_feature_strings) {
+ std::string error_msg;
+ EXPECT_EQ(cpp_defined_features->AddFeaturesFromString(invalid_feature_string, &error_msg),
+ nullptr) << " Invalid feature string: '" << invalid_feature_string << "'";
+ EXPECT_EQ(error_msg, "No instruction set features specified");
+ }
+}
+
+TEST(InstructionSetFeaturesTest, AddFeaturesFromStringRuntime) {
+ std::unique_ptr<const InstructionSetFeatures> cpp_defined_features(
+ InstructionSetFeatures::FromCppDefines());
+ std::string error_msg;
+
+ const std::unique_ptr<const InstructionSetFeatures> features =
+ cpp_defined_features->AddFeaturesFromString("runtime", &error_msg);
+ EXPECT_NE(features, nullptr);
+ EXPECT_TRUE(error_msg.empty()) << error_msg;
+ if (!InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+ EXPECT_TRUE(features->Equals(cpp_defined_features.get()));
+ }
+}
+
} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 952ed25..99ce536 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -214,8 +214,9 @@
bool mips_isa_gte2 = mips_isa_gte2_;
bool r6 = r6_;
bool msa = msa_;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = android::base::Trim(*i);
+ for (const std::string& feature : features) {
+ DCHECK_EQ(android::base::Trim(feature), feature)
+ << "Feature name is not trimmed: '" << feature << "'";
if (feature == "fpu32") {
fpu_32bit = true;
} else if (feature == "-fpu32") {
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index ea9f84b..2031433 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -114,8 +114,9 @@
Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
const std::vector<std::string>& features, std::string* error_msg) const {
bool msa = msa_;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = android::base::Trim(*i);
+ for (const std::string& feature : features) {
+ DCHECK_EQ(android::base::Trim(feature), feature)
+ << "Feature name is not trimmed: '" << feature << "'";
if (feature == "msa") {
msa = true;
} else if (feature == "-msa") {
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index e9e983c..0c3d26e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -311,8 +311,9 @@
bool has_AVX = has_AVX_;
bool has_AVX2 = has_AVX2_;
bool has_POPCNT = has_POPCNT_;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = android::base::Trim(*i);
+ for (const std::string& feature : features) {
+ DCHECK_EQ(android::base::Trim(feature), feature)
+ << "Feature name is not trimmed: '" << feature << "'";
if (feature == "ssse3") {
has_SSSE3 = true;
} else if (feature == "-ssse3") {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d29a6b7..2e1f364 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -25,6 +25,7 @@
#include <memory>
#include <queue>
#include <string>
+#include <string_view>
#include <tuple>
#include <unordered_map>
#include <utility>
@@ -3727,32 +3728,7 @@
}
}
if (initialize_oat_file_data) {
- // Initialize the .data.bimg.rel.ro section.
- if (!oat_file->GetBootImageRelocations().empty()) {
- uint8_t* reloc_begin = const_cast<uint8_t*>(oat_file->DataBimgRelRoBegin());
- CheckedCall(mprotect,
- "un-protect boot image relocations",
- reloc_begin,
- oat_file->DataBimgRelRoSize(),
- PROT_READ | PROT_WRITE);
- uint32_t boot_image_begin = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(
- Runtime::Current()->GetHeap()->GetBootImageSpaces().front()->Begin()));
- for (const uint32_t& relocation : oat_file->GetBootImageRelocations()) {
- const_cast<uint32_t&>(relocation) += boot_image_begin;
- }
- CheckedCall(mprotect,
- "protect boot image relocations",
- reloc_begin,
- oat_file->DataBimgRelRoSize(),
- PROT_READ);
- }
-
- // Initialize the .bss section.
- // TODO: Pre-initialize from boot/app image?
- ArtMethod* resolution_method = Runtime::Current()->GetResolutionMethod();
- for (ArtMethod*& entry : oat_file->GetBssMethods()) {
- entry = resolution_method;
- }
+ oat_file->InitializeRelocations();
}
jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache);
dex_cache->SetDexFile(&dex_file);
@@ -7040,11 +7016,13 @@
return FindSameNameAndSignature(cmp, rest...);
}
+namespace {
+
// Check that all vtable entries are present in this class's virtuals or are the same as a
// superclasses vtable entry.
-static void CheckClassOwnsVTableEntries(Thread* self,
- Handle<mirror::Class> klass,
- PointerSize pointer_size)
+void CheckClassOwnsVTableEntries(Thread* self,
+ Handle<mirror::Class> klass,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<2> hs(self);
Handle<mirror::PointerArray> check_vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
@@ -7074,47 +7052,185 @@
// Check to make sure the vtable does not have duplicates. Duplicates could cause problems when a
// method is overridden in a subclass.
-static void CheckVTableHasNoDuplicates(Thread* self,
- Handle<mirror::Class> klass,
- PointerSize pointer_size)
+template <PointerSize kPointerSize>
+void CheckVTableHasNoDuplicates(Thread* self, Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
int32_t num_entries = vtable->GetLength();
- for (int32_t i = 0; i < num_entries; i++) {
- ArtMethod* vtable_entry = vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
- // Don't bother if we cannot 'see' the vtable entry (i.e. it is a package-private member maybe).
+
+ // Observations:
+ // * The older implementation was O(n^2) and got too expensive for apps with larger classes.
+ // * Many classes do not override Object functions (e.g., equals/hashCode/toString). Thus,
+ // for many classes outside of libcore a cross-dexfile check has to be run anyways.
+ // * In the cross-dexfile case, with the O(n^2), in the best case O(n) cross checks would have
+ // to be done. It is thus OK in a single-pass algorithm to read all data, anyways.
+ // * The single-pass algorithm will trade memory for speed, but that is OK.
+
+ CHECK_GT(num_entries, 0);
+
+ auto log_fn = [&vtable, &klass](int32_t i, int32_t j) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m1 = vtable->GetElementPtrSize<ArtMethod*, kPointerSize>(i);
+ ArtMethod* m2 = vtable->GetElementPtrSize<ArtMethod*, kPointerSize>(j);
+ LOG(WARNING) << "vtable entries " << i << " and " << j << " are identical for "
+ << klass->PrettyClass() << " in method " << m1->PrettyMethod()
+ << " (0x" << std::hex << reinterpret_cast<uintptr_t>(m2) << ") and "
+ << m2->PrettyMethod() << " (0x" << std::hex
+ << reinterpret_cast<uintptr_t>(m2) << ")";
+ };
+ struct BaseHashType {
+ static size_t HashCombine(size_t seed, size_t val) {
+ return seed ^ (val + 0x9e3779b9 + (seed << 6) + (seed >> 2));
+ }
+ };
+
+ // Check assuming all entries come from the same dex file.
+ {
+ // Find the first interesting method and its dex file.
+ int32_t start = 0;
+ for (; start < num_entries; ++start) {
+ ArtMethod* vtable_entry = vtable->GetElementPtrSize<ArtMethod*, kPointerSize>(start);
+ // Don't bother if we cannot 'see' the vtable entry (i.e. it is a package-private member
+ // maybe).
+ if (!klass->CanAccessMember(vtable_entry->GetDeclaringClass(),
+ vtable_entry->GetAccessFlags())) {
+ continue;
+ }
+ break;
+ }
+ if (start == num_entries) {
+ return;
+ }
+ const DexFile* dex_file =
+ vtable->GetElementPtrSize<ArtMethod*, kPointerSize>(start)->
+ GetInterfaceMethodIfProxy(kPointerSize)->GetDexFile();
+
+ // Helper function to avoid logging if we have to run the cross-file checks.
+ auto check_fn = [&](bool log_warn) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Use a map to store seen entries, as the storage space is too large for a bitvector.
+ using PairType = std::pair<uint32_t, uint16_t>;
+ struct PairHash : BaseHashType {
+ size_t operator()(const PairType& key) const {
+ return BaseHashType::HashCombine(BaseHashType::HashCombine(0, key.first), key.second);
+ }
+ };
+ std::unordered_map<PairType, int32_t, PairHash> seen;
+ seen.reserve(2 * num_entries);
+ bool need_slow_path = false;
+ bool found_dup = false;
+ for (int i = start; i < num_entries; ++i) {
+ // Can use Unchecked here as the start loop already ensured that the arrays are correct
+ // wrt/ kPointerSize.
+ ArtMethod* vtable_entry = vtable->GetElementPtrSizeUnchecked<ArtMethod*, kPointerSize>(i);
+ if (!klass->CanAccessMember(vtable_entry->GetDeclaringClass(),
+ vtable_entry->GetAccessFlags())) {
+ continue;
+ }
+ ArtMethod* m = vtable_entry->GetInterfaceMethodIfProxy(kPointerSize);
+ if (dex_file != m->GetDexFile()) {
+ need_slow_path = true;
+ break;
+ }
+ const dex::MethodId* m_mid = &dex_file->GetMethodId(m->GetDexMethodIndex());
+ PairType pair = std::make_pair(m_mid->name_idx_.index_, m_mid->proto_idx_.index_);
+ auto it = seen.find(pair);
+ if (it != seen.end()) {
+ found_dup = true;
+ if (log_warn) {
+ log_fn(it->second, i);
+ }
+ } else {
+ seen.emplace(pair, i);
+ }
+ }
+ return std::make_pair(need_slow_path, found_dup);
+ };
+ std::pair<bool, bool> result = check_fn(/* log_warn= */ false);
+ if (!result.first) {
+ if (result.second) {
+ check_fn(/* log_warn= */ true);
+ }
+ return;
+ }
+ }
+
+ // Need to check across dex files.
+ struct Entry {
+ size_t cached_hash = 0;
+ const char* name = nullptr;
+ Signature signature = Signature::NoSignature();
+ uint32_t name_len = 0;
+
+ Entry(const DexFile* dex_file, const dex::MethodId& mid)
+ : name(dex_file->StringDataAndUtf16LengthByIdx(mid.name_idx_, &name_len)),
+ signature(dex_file->GetMethodSignature(mid)) {
+ }
+
+ bool operator==(const Entry& other) const {
+ if (name_len != other.name_len || strcmp(name, other.name) != 0) {
+ return false;
+ }
+ return signature == other.signature;
+ }
+ };
+ struct EntryHash {
+ size_t operator()(const Entry& key) const {
+ return key.cached_hash;
+ }
+ };
+ std::unordered_map<Entry, int32_t, EntryHash> map;
+ for (int32_t i = 0; i < num_entries; ++i) {
+ // Can use Unchecked here as the first loop already ensured that the arrays are correct
+ // wrt/ kPointerSize.
+ ArtMethod* vtable_entry = vtable->GetElementPtrSizeUnchecked<ArtMethod*, kPointerSize>(i);
+ // Don't bother if we cannot 'see' the vtable entry (i.e. it is a package-private member
+ // maybe).
if (!klass->CanAccessMember(vtable_entry->GetDeclaringClass(),
vtable_entry->GetAccessFlags())) {
continue;
}
- MethodNameAndSignatureComparator name_comparator(
- vtable_entry->GetInterfaceMethodIfProxy(pointer_size));
- for (int32_t j = i + 1; j < num_entries; j++) {
- ArtMethod* other_entry = vtable->GetElementPtrSize<ArtMethod*>(j, pointer_size);
- if (!klass->CanAccessMember(other_entry->GetDeclaringClass(),
- other_entry->GetAccessFlags())) {
- continue;
- }
- if (vtable_entry == other_entry ||
- name_comparator.HasSameNameAndSignature(
- other_entry->GetInterfaceMethodIfProxy(pointer_size))) {
- LOG(WARNING) << "vtable entries " << i << " and " << j << " are identical for "
- << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod()
- << " (0x" << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and "
- << other_entry->PrettyMethod() << " (0x" << std::hex
- << reinterpret_cast<uintptr_t>(other_entry) << ")";
- }
+ ArtMethod* m = vtable_entry->GetInterfaceMethodIfProxy(kPointerSize);
+ const DexFile* dex_file = m->GetDexFile();
+ const dex::MethodId& mid = dex_file->GetMethodId(m->GetDexMethodIndex());
+
+ Entry e(dex_file, mid);
+
+ size_t string_hash = std::hash<std::string_view>()(std::string_view(e.name, e.name_len));
+ size_t sig_hash = std::hash<std::string>()(e.signature.ToString());
+ e.cached_hash = BaseHashType::HashCombine(BaseHashType::HashCombine(0u, string_hash),
+ sig_hash);
+
+ auto it = map.find(e);
+ if (it != map.end()) {
+ log_fn(it->second, i);
+ } else {
+ map.emplace(e, i);
}
}
}
+void CheckVTableHasNoDuplicates(Thread* self,
+ Handle<mirror::Class> klass,
+ PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ switch (pointer_size) {
+ case PointerSize::k64:
+ CheckVTableHasNoDuplicates<PointerSize::k64>(self, klass);
+ break;
+ case PointerSize::k32:
+ CheckVTableHasNoDuplicates<PointerSize::k32>(self, klass);
+ break;
+ }
+}
+
static void SanityCheckVTable(Thread* self, Handle<mirror::Class> klass, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
CheckClassOwnsVTableEntries(self, klass, pointer_size);
CheckVTableHasNoDuplicates(self, klass, pointer_size);
}
+} // namespace
+
void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a958fa6..5473b52 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4016,8 +4016,7 @@
+ old_native_bytes / kOldNativeDiscountFactor;
size_t add_bytes_allowed = static_cast<size_t>(
NativeAllocationGcWatermark() * HeapGrowthMultiplier());
- size_t adj_start_bytes = UnsignedSum(concurrent_start_bytes_,
- add_bytes_allowed / kNewNativeDiscountFactor);
+ size_t adj_start_bytes = concurrent_start_bytes_ + add_bytes_allowed / kNewNativeDiscountFactor;
return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
/ static_cast<float>(adj_start_bytes);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1aefad5..6bdba12 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -917,13 +917,9 @@
return main_space_backup_ != nullptr;
}
- // Size_t saturating arithmetic
static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
return x > y ? x - y : 0;
}
- static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
- return x + y > x ? x + y : std::numeric_limits<size_t>::max();
- }
static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
return
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index a6a5ba2..d9d88e1 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -224,15 +224,31 @@
}
}
-template<typename T, VerifyObjectFlags kVerifyFlags>
-inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
+template<typename T, PointerSize kPointerSize, VerifyObjectFlags kVerifyFlags>
+inline T PointerArray::GetElementPtrSize(uint32_t idx) {
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
- if (ptr_size == PointerSize::k64) {
+ if (kPointerSize == PointerSize::k64) {
return (T)static_cast<uintptr_t>(AsLongArray<kVerifyFlags>()->GetWithoutChecks(idx));
}
return (T)static_cast<uintptr_t>(AsIntArray<kVerifyFlags>()->GetWithoutChecks(idx));
}
+template<typename T, PointerSize kPointerSize, VerifyObjectFlags kVerifyFlags>
+inline T PointerArray::GetElementPtrSizeUnchecked(uint32_t idx) {
+ // C style casts here since we sometimes have T be a pointer, or sometimes an integer
+ // (for stack traces).
+ if (kPointerSize == PointerSize::k64) {
+ return (T)static_cast<uintptr_t>(AsLongArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+ }
+ return (T)static_cast<uintptr_t>(AsIntArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+}
+template<typename T, VerifyObjectFlags kVerifyFlags>
+inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
+ return GetElementPtrSize<T, PointerSize::k64, kVerifyFlags>(idx);
+ }
+ return GetElementPtrSize<T, PointerSize::k32, kVerifyFlags>(idx);
+}
template<bool kTransactionActive, bool kUnchecked>
inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) {
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 8816c61..2e894d5 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -32,6 +32,8 @@
class MANAGED Array : public Object {
public:
+ static constexpr size_t kFirstElementOffset = 12u;
+
// The size of a java.lang.Class representing an array.
static uint32_t ClassSize(PointerSize pointer_size);
@@ -79,6 +81,17 @@
<< "Array data offset isn't aligned with component size";
return MemberOffset(data_offset);
}
+ template <size_t kComponentSize>
+ static constexpr MemberOffset DataOffset() {
+ static_assert(IsPowerOfTwo(kComponentSize), "Invalid component size");
+ constexpr size_t data_offset = RoundUp(kFirstElementOffset, kComponentSize);
+ static_assert(RoundUp(data_offset, kComponentSize) == data_offset, "RoundUp fail");
+ return MemberOffset(data_offset);
+ }
+
+ static constexpr size_t FirstElementOffset() {
+ return OFFSETOF_MEMBER(Array, first_element_);
+ }
void* GetRawData(size_t component_size, int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -86,12 +99,24 @@
+ (index * component_size);
return reinterpret_cast<void*>(data);
}
+ template <size_t kComponentSize>
+ void* GetRawData(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_) {
+ intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset<kComponentSize>().Int32Value() +
+ + (index * kComponentSize);
+ return reinterpret_cast<void*>(data);
+ }
const void* GetRawData(size_t component_size, int32_t index) const {
intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+ (index * component_size);
return reinterpret_cast<void*>(data);
}
+ template <size_t kComponentSize>
+ const void* GetRawData(int32_t index) const {
+ intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset<kComponentSize>().Int32Value() +
+ + (index * kComponentSize);
+ return reinterpret_cast<void*>(data);
+ }
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
@@ -132,11 +157,11 @@
const T* GetData() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
+ return reinterpret_cast<const T*>(GetRawData<sizeof(T)>(0));
}
T* GetData() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
+ return reinterpret_cast<T*>(GetRawData<sizeof(T)>(0));
}
T Get(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
@@ -196,6 +221,15 @@
template<typename T, VerifyObjectFlags kVerifyFlags = kVerifyNone>
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<typename T, PointerSize kPtrSize, VerifyObjectFlags kVerifyFlags = kVerifyNone>
+ T GetElementPtrSize(uint32_t idx)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Same as GetElementPtrSize, but uses unchecked version of array conversion. It is thus not
+ // checked whether kPtrSize matches the underlying array. Only use after at least one invocation
+ // of GetElementPtrSize!
+ template<typename T, PointerSize kPtrSize, VerifyObjectFlags kVerifyFlags = kVerifyNone>
+ T GetElementPtrSizeUnchecked(uint32_t idx)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kVerifyNone>
void** ElementAddress(size_t index, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 2c2ad9b..005e272 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -253,9 +253,13 @@
}
template<VerifyObjectFlags kVerifyFlags>
+inline IntArray* Object::AsIntArrayUnchecked() {
+ return down_cast<IntArray*>(this);
+}
+template<VerifyObjectFlags kVerifyFlags>
inline IntArray* Object::AsIntArray() {
DCHECK((IsIntArray<kVerifyFlags>()));
- return down_cast<IntArray*>(this);
+ return AsIntArrayUnchecked<kVerifyFlags>();
}
template<VerifyObjectFlags kVerifyFlags>
@@ -264,9 +268,13 @@
}
template<VerifyObjectFlags kVerifyFlags>
+inline LongArray* Object::AsLongArrayUnchecked() {
+ return down_cast<LongArray*>(this);
+}
+template<VerifyObjectFlags kVerifyFlags>
inline LongArray* Object::AsLongArray() {
DCHECK((IsLongArray<kVerifyFlags>()));
- return down_cast<LongArray*>(this);
+ return AsLongArrayUnchecked<kVerifyFlags>();
}
template<VerifyObjectFlags kVerifyFlags>
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index ba222f6..ca8867d8 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -223,11 +223,15 @@
bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ IntArray* AsIntArrayUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ LongArray* AsLongArrayUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 8a0a1e7..67803b6 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -58,13 +58,14 @@
#include "elf_file.h"
#include "elf_utils.h"
#include "gc_root.h"
+#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "oat.h"
#include "oat_file-inl.h"
#include "oat_file_manager.h"
-#include "runtime.h"
+#include "runtime-inl.h"
#include "vdex_file.h"
namespace art {
@@ -447,34 +448,6 @@
return true;
}
-static void DCheckIndexToBssMapping(OatFile* oat_file,
- uint32_t number_of_indexes,
- size_t slot_size,
- const IndexBssMapping* index_bss_mapping) {
- if (kIsDebugBuild && index_bss_mapping != nullptr) {
- size_t index_bits = IndexBssMappingEntry::IndexBits(number_of_indexes);
- const IndexBssMappingEntry* prev_entry = nullptr;
- for (const IndexBssMappingEntry& entry : *index_bss_mapping) {
- CHECK_ALIGNED_PARAM(entry.bss_offset, slot_size);
- // When loading a non-executable ElfOatFile, .bss symbols are not even
- // looked up, so we cannot verify the offset against BssSize().
- if (oat_file->IsExecutable()) {
- CHECK_LT(entry.bss_offset, oat_file->BssSize());
- }
- uint32_t mask = entry.GetMask(index_bits);
- CHECK_LE(POPCOUNT(mask) * slot_size, entry.bss_offset);
- size_t index_mask_span = (mask != 0u) ? 32u - index_bits - CTZ(mask) : 0u;
- CHECK_LE(index_mask_span, entry.GetIndex(index_bits));
- if (prev_entry != nullptr) {
- CHECK_LT(prev_entry->GetIndex(index_bits), entry.GetIndex(index_bits) - index_mask_span);
- }
- prev_entry = &entry;
- }
- CHECK(prev_entry != nullptr);
- CHECK_LT(prev_entry->GetIndex(index_bits), number_of_indexes);
- }
-}
-
bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
@@ -813,12 +786,6 @@
this, &oat, i, dex_file_location, "string", &string_bss_mapping, error_msg)) {
return false;
}
- DCheckIndexToBssMapping(
- this, header->method_ids_size_, static_cast<size_t>(pointer_size), method_bss_mapping);
- DCheckIndexToBssMapping(
- this, header->type_ids_size_, sizeof(GcRoot<mirror::Class>), type_bss_mapping);
- DCheckIndexToBssMapping(
- this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping);
std::string canonical_location =
DexFileLoader::GetDexCanonicalLocation(dex_file_name.c_str());
@@ -2019,6 +1986,82 @@
return oat_dex_file->GetOatClass(class_def_idx);
}
+static void DCheckIndexToBssMapping(const OatFile* oat_file,
+ uint32_t number_of_indexes,
+ size_t slot_size,
+ const IndexBssMapping* index_bss_mapping) {
+ if (kIsDebugBuild && index_bss_mapping != nullptr) {
+ size_t index_bits = IndexBssMappingEntry::IndexBits(number_of_indexes);
+ const IndexBssMappingEntry* prev_entry = nullptr;
+ for (const IndexBssMappingEntry& entry : *index_bss_mapping) {
+ CHECK_ALIGNED_PARAM(entry.bss_offset, slot_size);
+ CHECK_LT(entry.bss_offset, oat_file->BssSize());
+ uint32_t mask = entry.GetMask(index_bits);
+ CHECK_LE(POPCOUNT(mask) * slot_size, entry.bss_offset);
+ size_t index_mask_span = (mask != 0u) ? 32u - index_bits - CTZ(mask) : 0u;
+ CHECK_LE(index_mask_span, entry.GetIndex(index_bits));
+ if (prev_entry != nullptr) {
+ CHECK_LT(prev_entry->GetIndex(index_bits), entry.GetIndex(index_bits) - index_mask_span);
+ }
+ prev_entry = &entry;
+ }
+ CHECK(prev_entry != nullptr);
+ CHECK_LT(prev_entry->GetIndex(index_bits), number_of_indexes);
+ }
+}
+
+void OatFile::InitializeRelocations() const {
+ DCHECK(IsExecutable());
+
+ // Initialize the .data.bimg.rel.ro section.
+ if (!GetBootImageRelocations().empty()) {
+ uint8_t* reloc_begin = const_cast<uint8_t*>(DataBimgRelRoBegin());
+ CheckedCall(mprotect,
+ "un-protect boot image relocations",
+ reloc_begin,
+ DataBimgRelRoSize(),
+ PROT_READ | PROT_WRITE);
+ uint32_t boot_image_begin = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(
+ Runtime::Current()->GetHeap()->GetBootImageSpaces().front()->Begin()));
+ for (const uint32_t& relocation : GetBootImageRelocations()) {
+ const_cast<uint32_t&>(relocation) += boot_image_begin;
+ }
+ CheckedCall(mprotect,
+ "protect boot image relocations",
+ reloc_begin,
+ DataBimgRelRoSize(),
+ PROT_READ);
+ }
+
+ // Before initializing .bss, check the .bss mappings in debug mode.
+ if (kIsDebugBuild) {
+ PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ for (const OatDexFile* odf : GetOatDexFiles()) {
+ const DexFile::Header* header =
+ reinterpret_cast<const DexFile::Header*>(odf->GetDexFilePointer());
+ DCheckIndexToBssMapping(this,
+ header->method_ids_size_,
+ static_cast<size_t>(pointer_size),
+ odf->GetMethodBssMapping());
+ DCheckIndexToBssMapping(this,
+ header->type_ids_size_,
+ sizeof(GcRoot<mirror::Class>),
+ odf->GetTypeBssMapping());
+ DCheckIndexToBssMapping(this,
+ header->string_ids_size_,
+ sizeof(GcRoot<mirror::String>),
+ odf->GetStringBssMapping());
+ }
+ }
+
+ // Initialize the .bss section.
+ // TODO: Pre-initialize from boot/app image?
+ ArtMethod* resolution_method = Runtime::Current()->GetResolutionMethod();
+ for (ArtMethod*& entry : GetBssMethods()) {
+ entry = resolution_method;
+ }
+}
+
void OatDexFile::AssertAotCompiler() {
CHECK(Runtime::Current()->IsAotCompiler());
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 37dbe6a..04b666c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -325,6 +325,9 @@
ArrayRef<ArtMethod*> GetBssMethods() const;
ArrayRef<GcRoot<mirror::Object>> GetBssGcRoots() const;
+ // Initialize relocation sections (.data.bimg.rel.ro and .bss).
+ void InitializeRelocations() const;
+
// Returns the absolute dex location for the encoded relative dex location.
//
// If not null, abs_dex_location is used to resolve the absolute dex
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 97dccb0..1465b14 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -199,6 +199,7 @@
};
namespace {
+
#ifdef __APPLE__
inline char** GetEnviron() {
// When Google Test is built as a framework on MacOS X, the environ variable
@@ -212,6 +213,11 @@
extern "C" char** environ;
inline char** GetEnviron() { return environ; }
#endif
+
+void CheckConstants() {
+ CHECK_EQ(mirror::Array::kFirstElementOffset, mirror::Array::FirstElementOffset());
+}
+
} // namespace
Runtime::Runtime()
@@ -284,6 +290,7 @@
verifier_logging_threshold_ms_(100) {
static_assert(Runtime::kCalleeSaveSize ==
static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
+ CheckConstants();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
interpreter::CheckInterpreterAsmConstants();
@@ -2475,10 +2482,15 @@
instruction_set += GetInstructionSetString(kRuntimeISA);
argv->push_back(instruction_set);
- std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
- std::string feature_string("--instruction-set-features=");
- feature_string += features->GetFeatureString();
- argv->push_back(feature_string);
+ if (InstructionSetFeatures::IsRuntimeDetectionSupported()) {
+ argv->push_back("--instruction-set-features=runtime");
+ } else {
+ std::unique_ptr<const InstructionSetFeatures> features(
+ InstructionSetFeatures::FromCppDefines());
+ std::string feature_string("--instruction-set-features=");
+ feature_string += features->GetFeatureString();
+ argv->push_back(feature_string);
+ }
}
void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {