Directory restructuring of object.h
Break object.h into constituent files.
Reduce number of #includes in other GC header files.
Introduce -inl.h files to avoid mirror files #include-ing each other.
Check invariants of verifier RegTypes for all constructors.
Change-Id: Iecf1171c02910ac152d52947330ef456df4043bc
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 1646264..21e829c 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -154,7 +154,6 @@
src/compiled_method.cc \
src/compiler.cc \
src/debugger.cc \
- src/dex_cache.cc \
src/dex_file.cc \
src/dex_file_verifier.cc \
src/dex_instruction.cc \
@@ -193,6 +192,15 @@
src/locks.cc \
src/mem_map.cc \
src/memory_region.cc \
+ src/mirror/abstract_method.cc \
+ src/mirror/array.cc \
+ src/mirror/class.cc \
+ src/mirror/dex_cache.cc \
+ src/mirror/field.cc \
+ src/mirror/object.cc \
+ src/mirror/stack_trace_element.cc \
+ src/mirror/string.cc \
+ src/mirror/throwable.cc \
src/monitor.cc \
src/native/dalvik_system_DexFile.cc \
src/native/dalvik_system_VMDebug.cc \
@@ -229,7 +237,6 @@
src/oat/utils/x86/managed_register_x86.cc \
src/oat_file.cc \
src/oat_writer.cc \
- src/object.cc \
src/offsets.cc \
src/os_linux.cc \
src/primitive.cc \
@@ -242,6 +249,7 @@
src/thread.cc \
src/thread_list.cc \
src/thread_pool.cc \
+ src/timing_logger.cc \
src/trace.cc \
src/utf.cc \
src/utils.cc \
@@ -354,6 +362,7 @@
src/compiler/compiler_enums.h \
src/dex_file.h \
src/dex_instruction.h \
+ src/gc/gc_type.h \
src/gc/space.h \
src/heap.h \
src/indirect_reference_table.h \
@@ -362,8 +371,9 @@
src/jdwp/jdwp.h \
src/jdwp/jdwp_constants.h \
src/locks.h \
- src/object.h \
+ src/mirror/class.h \
src/thread.h \
+ src/thread_state.h \
src/verifier/method_verifier.h
LIBARTTEST_COMMON_SRC_FILES := \
@@ -380,7 +390,6 @@
src/base/unix_file/string_file_test.cc \
src/class_linker_test.cc \
src/compiler_test.cc \
- src/dex_cache_test.cc \
src/dex_file_test.cc \
src/dex_instruction_visitor_test.cc \
src/elf_writer_test.cc \
@@ -395,10 +404,11 @@
src/intern_table_test.cc \
src/jni_compiler_test.cc \
src/jni_internal_test.cc \
+ src/mirror/dex_cache_test.cc \
+ src/mirror/object_test.cc \
src/oat/utils/arm/managed_register_arm_test.cc \
src/oat/utils/x86/managed_register_x86_test.cc \
src/oat_test.cc \
- src/object_test.cc \
src/output_stream_test.cc \
src/reference_table_test.cc \
src/runtime_support_test.cc \
diff --git a/src/barrier_test.cc b/src/barrier_test.cc
index 284be57..bb7bcb3 100644
--- a/src/barrier_test.cc
+++ b/src/barrier_test.cc
@@ -20,6 +20,7 @@
#include "atomic_integer.h"
#include "common_test.h"
+#include "mirror/object_array-inl.h"
#include "thread_pool.h"
#include "UniquePtr.h"
diff --git a/src/base/mutex.cc b/src/base/mutex.cc
index e2ab51f..d09a6a2 100644
--- a/src/base/mutex.cc
+++ b/src/base/mutex.cc
@@ -21,7 +21,6 @@
#include "base/logging.h"
#include "cutils/atomic.h"
-#include "cutils/atomic-inline.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
diff --git a/src/check_jni.cc b/src/check_jni.cc
index 8f4e921..e53e1c4 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -21,9 +21,16 @@
#include "base/logging.h"
#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "gc/space.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/throwable.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
-#include "gc/space.h"
#include "thread.h"
#include "runtime.h"
@@ -36,7 +43,7 @@
static void JniAbort(const char* jni_function_name, const char* msg) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- AbstractMethod* current_method = self->GetCurrentMethod();
+ mirror::AbstractMethod* current_method = self->GetCurrentMethod();
std::ostringstream os;
os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
@@ -123,7 +130,7 @@
NULL
};
-static bool ShouldTrace(JavaVMExt* vm, const AbstractMethod* method)
+static bool ShouldTrace(JavaVMExt* vm, const mirror::AbstractMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
// when a native method that matches the -Xjnitrace argument calls a JNI function
@@ -196,14 +203,14 @@
*/
void CheckFieldType(jobject java_object, jfieldID fid, char prim, bool isStatic)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* f = CheckFieldID(fid);
+ mirror::Field* f = CheckFieldID(fid);
if (f == NULL) {
return;
}
- Class* field_type = FieldHelper(f).GetType();
+ mirror::Class* field_type = FieldHelper(f).GetType();
if (!field_type->IsPrimitive()) {
if (java_object != NULL) {
- Object* obj = soa_.Decode<Object*>(java_object);
+ mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
// If java_object is a weak global ref whose referent has been cleared,
// obj will be NULL. Otherwise, obj should always be non-NULL
// and valid.
@@ -243,7 +250,7 @@
*/
void CheckInstanceFieldID(jobject java_object, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = soa_.Decode<Object*>(java_object);
+ mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "field operation on invalid %s: %p",
@@ -251,11 +258,11 @@
return;
}
- Field* f = CheckFieldID(fid);
+ mirror::Field* f = CheckFieldID(fid);
if (f == NULL) {
return;
}
- Class* c = o->GetClass();
+ mirror::Class* c = o->GetClass();
FieldHelper fh(f);
if (c->FindInstanceField(fh.GetName(), fh.GetTypeDescriptor()) == NULL) {
JniAbortF(function_name_, "jfieldID %s not valid for an object of class %s",
@@ -278,7 +285,7 @@
*/
void CheckSig(jmethodID mid, const char* expectedType, bool isStatic)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* m = CheckMethodID(mid);
+ mirror::AbstractMethod* m = CheckMethodID(mid);
if (m == NULL) {
return;
}
@@ -304,8 +311,8 @@
*/
void CheckStaticFieldID(jclass java_class, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* c = soa_.Decode<Class*>(java_class);
- const Field* f = CheckFieldID(fid);
+ mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
+ const mirror::Field* f = CheckFieldID(fid);
if (f == NULL) {
return;
}
@@ -326,11 +333,11 @@
*/
void CheckStaticMethod(jclass java_class, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const AbstractMethod* m = CheckMethodID(mid);
+ const mirror::AbstractMethod* m = CheckMethodID(mid);
if (m == NULL) {
return;
}
- Class* c = soa_.Decode<Class*>(java_class);
+ mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
if (!c->IsAssignableFrom(m->GetDeclaringClass())) {
JniAbortF(function_name_, "can't call static %s on class %s",
PrettyMethod(m).c_str(), PrettyClass(c).c_str());
@@ -346,11 +353,11 @@
*/
void CheckVirtualMethod(jobject java_object, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const AbstractMethod* m = CheckMethodID(mid);
+ const mirror::AbstractMethod* m = CheckMethodID(mid);
if (m == NULL) {
return;
}
- Object* o = soa_.Decode<Object*>(java_object);
+ mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (!o->InstanceOf(m->GetDeclaringClass())) {
JniAbortF(function_name_, "can't call %s on instance of %s",
PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
@@ -397,7 +404,7 @@
SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) {
va_list ap;
- const AbstractMethod* traceMethod = NULL;
+ const mirror::AbstractMethod* traceMethod = NULL;
if ((!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni)) && has_method_) {
// We need to guard some of the invocation interface's calls: a bad caller might
// use DetachCurrentThread or GetEnv on a thread that's not yet attached.
@@ -455,7 +462,7 @@
msg += (b ? "JNI_TRUE" : "JNI_FALSE");
} else if (ch == 'c') { // jclass
jclass jc = va_arg(ap, jclass);
- Class* c = reinterpret_cast<Class*>(Thread::Current()->DecodeJObject(jc));
+ mirror::Class* c = reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(jc));
if (c == NULL) {
msg += "NULL";
} else if (c == kInvalidIndirectRefObject || !Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
@@ -470,7 +477,7 @@
}
} else if (ch == 'f') { // jfieldID
jfieldID fid = va_arg(ap, jfieldID);
- Field* f = reinterpret_cast<Field*>(fid);
+ mirror::Field* f = reinterpret_cast<mirror::Field*>(fid);
msg += PrettyField(f);
if (!entry) {
StringAppendF(&msg, " (%p)", fid);
@@ -483,7 +490,7 @@
StringAppendF(&msg, "%d", i);
} else if (ch == 'm') { // jmethodID
jmethodID mid = va_arg(ap, jmethodID);
- AbstractMethod* m = reinterpret_cast<AbstractMethod*>(mid);
+ mirror::AbstractMethod* m = reinterpret_cast<mirror::AbstractMethod*>(mid);
msg += PrettyMethod(m);
if (!entry) {
StringAppendF(&msg, " (%p)", mid);
@@ -623,7 +630,7 @@
return false;
}
- Object* obj = soa_.Decode<Object*>(java_object);
+ mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
@@ -677,7 +684,7 @@
return;
}
- Array* a = soa_.Decode<Array*>(java_array);
+ mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
@@ -693,12 +700,12 @@
}
}
- Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (fid == NULL) {
JniAbortF(function_name_, "jfieldID was NULL");
return NULL;
}
- Field* f = soa_.DecodeField(fid);
+ mirror::Field* f = soa_.DecodeField(fid);
if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsField()) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "invalid jfieldID: %p", fid);
@@ -707,12 +714,12 @@
return f;
}
- AbstractMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (mid == NULL) {
JniAbortF(function_name_, "jmethodID was NULL");
return NULL;
}
- AbstractMethod* m = soa_.DecodeMethod(mid);
+ mirror::AbstractMethod* m = soa_.DecodeMethod(mid);
if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsMethod()) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "invalid jmethodID: %p", mid);
@@ -733,7 +740,7 @@
return;
}
- Object* o = soa_.Decode<Object*>(java_object);
+ mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
Runtime::Current()->GetHeap()->DumpSpaces();
// TODO: when we remove work_around_app_jni_bugs, this should be impossible.
@@ -1084,7 +1091,7 @@
static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) {
ScopedObjectAccess soa(env);
- Array* a = soa.Decode<Array*>(java_array);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
size_t component_size = a->GetClass()->GetComponentSize();
size_t byte_count = a->GetLength() * component_size;
void* result = GuardedCopy::Create(a->GetRawData(component_size), byte_count, true);
@@ -1104,7 +1111,7 @@
}
ScopedObjectAccess soa(env);
- Array* a = soa.Decode<Array*>(java_array);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
GuardedCopy::Check(__FUNCTION__, dataBuf, true);
@@ -1467,7 +1474,7 @@
#define NON_VOID_RETURN(_retsig, _ctype) return CHECK_JNI_EXIT(_retsig, (_ctype) result)
#define VOID_RETURN CHECK_JNI_EXIT_VOID()
-CALL(jobject, Object, Object* result, result = reinterpret_cast<Object*>, NON_VOID_RETURN("L", jobject), "L");
+CALL(jobject, Object, mirror::Object* result, result = reinterpret_cast<mirror::Object*>, NON_VOID_RETURN("L", jobject), "L");
CALL(jboolean, Boolean, jboolean result, result =, NON_VOID_RETURN("Z", jboolean), "Z");
CALL(jbyte, Byte, jbyte result, result =, NON_VOID_RETURN("B", jbyte), "B");
CALL(jchar, Char, jchar result, result =, NON_VOID_RETURN("C", jchar), "C");
@@ -1492,7 +1499,7 @@
CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy);
const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy);
if (sc.ForceCopy() && result != NULL) {
- String* s = sc.soa().Decode<String*>(java_string);
+ mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
int byteCount = s->GetLength() * 2;
result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
if (isCopy != NULL) {
@@ -1719,7 +1726,7 @@
CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy);
const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy);
if (sc.ForceCopy() && result != NULL) {
- String* s = sc.soa().Decode<String*>(java_string);
+ mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
int byteCount = s->GetLength() * 2;
result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
if (isCopy != NULL) {
diff --git a/src/class_linker-inl.h b/src/class_linker-inl.h
new file mode 100644
index 0000000..6cf4991
--- /dev/null
+++ b/src/class_linker-inl.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_CLASS_LINKER_INL_H_
+#define ART_SRC_CLASS_LINKER_INL_H_
+
+#include "class_linker.h"
+
+#include "mirror/dex_cache.h"
+#include "mirror/field.h"
+#include "mirror/iftable.h"
+#include "mirror/object_array.h"
+
+namespace art {
+
+inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
+ const mirror::AbstractMethod* referrer) {
+ mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
+ if (UNLIKELY(resolved_string == NULL)) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_string = ResolveString(dex_file, string_idx, dex_cache);
+ }
+ return resolved_string;
+}
+
+inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
+ const mirror::AbstractMethod* referrer) {
+ mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
+ if (UNLIKELY(resolved_type == NULL)) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ }
+ return resolved_type;
+}
+
+inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, const mirror::Field* referrer) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ mirror::Class* resolved_type = dex_cache->GetResolvedType(type_idx);
+ if (UNLIKELY(resolved_type == NULL)) {
+ mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ }
+ return resolved_type;
+}
+
+inline mirror::AbstractMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
+ const mirror::AbstractMethod* referrer,
+ InvokeType type) {
+ mirror::AbstractMethod* resolved_method =
+ referrer->GetDexCacheResolvedMethods()->Get(method_idx);
+ if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
+ }
+ return resolved_method;
+}
+
+inline mirror::Field* ClassLinker::ResolveField(uint32_t field_idx,
+ const mirror::AbstractMethod* referrer,
+ bool is_static) {
+ mirror::Field* resolved_field =
+ referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
+ if (UNLIKELY(resolved_field == NULL)) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
+ }
+ return resolved_field;
+}
+
+template <class T>
+inline mirror::ObjectArray<T>* ClassLinker::AllocObjectArray(Thread* self, size_t length) {
+ return mirror::ObjectArray<T>::Alloc(self, GetClassRoot(kObjectArrayClass), length);
+}
+
+inline mirror::ObjectArray<mirror::Class>* ClassLinker::AllocClassArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::Class>::Alloc(self, GetClassRoot(kClassArrayClass), length);
+}
+
+inline mirror::ObjectArray<mirror::String>* ClassLinker::AllocStringArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::String>::Alloc(self, GetClassRoot(kJavaLangStringArrayClass),
+ length);
+}
+
+inline mirror::ObjectArray<mirror::AbstractMethod>* ClassLinker::AllocAbstractMethodArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::AbstractMethod>::Alloc(self,
+ GetClassRoot(kJavaLangReflectAbstractMethodArrayClass), length);
+}
+
+inline mirror::ObjectArray<mirror::AbstractMethod>* ClassLinker::AllocMethodArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::AbstractMethod>::Alloc(self,
+ GetClassRoot(kJavaLangReflectMethodArrayClass), length);
+}
+
+inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) {
+ return down_cast<mirror::IfTable*>(
+ mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * mirror::IfTable::kMax));
+}
+
+inline mirror::ObjectArray<mirror::Field>* ClassLinker::AllocFieldArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::Field>::Alloc(self,
+ GetClassRoot(kJavaLangReflectFieldArrayClass),
+ length);
+}
+
+inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(class_roots_ != NULL);
+ mirror::Class* klass = class_roots_->Get(class_root);
+ DCHECK(klass != NULL);
+ return klass;
+}
+
+} // namespace art
+
+#endif // ART_SRC_CLASS_LINKER_INL_H_
diff --git a/src/class_linker.cc b/src/class_linker.cc
index c43c397..9aa4dda 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -31,17 +31,28 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
-#include "class_loader.h"
+#include "class_linker-inl.h"
#include "debugger.h"
-#include "dex_cache.h"
#include "dex_file.h"
+#include "gc/card_table-inl.h"
#include "heap.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
-#include "object.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field-inl.h"
+#include "mirror/iftable-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/proxy.h"
+#include "mirror/stack_trace_element.h"
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
@@ -58,6 +69,7 @@
#include "thread.h"
#include "UniquePtr.h"
#include "utils.h"
+#include "verifier/method_verifier.h"
#include "well_known_classes.h"
namespace art {
@@ -92,7 +104,7 @@
va_end(args);
}
-static void ThrowNoSuchFieldError(const StringPiece& scope, Class* c, const StringPiece& type,
+static void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type,
const StringPiece& name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassHelper kh(c);
@@ -116,7 +128,7 @@
va_end(args);
}
-static void ThrowEarlierClassFailure(Class* c)
+static void ThrowEarlierClassFailure(mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// The class failed to initialize on a previous attempt, so we want to throw
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
@@ -239,45 +251,47 @@
// java_lang_Class comes first, it's needed for AllocClass
Thread* self = Thread::Current();
Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<Class>
- java_lang_Class(self, down_cast<Class*>(heap->AllocObject(self, NULL, sizeof(ClassClass))));
+ SirtRef<mirror::Class>
+ java_lang_Class(self,
+ down_cast<mirror::Class*>(heap->AllocObject(self, NULL,
+ sizeof(mirror::ClassClass))));
CHECK(java_lang_Class.get() != NULL);
- Class::SetClassClass(java_lang_Class.get());
+ mirror::Class::SetClassClass(java_lang_Class.get());
java_lang_Class->SetClass(java_lang_Class.get());
- java_lang_Class->SetClassSize(sizeof(ClassClass));
- // AllocClass(Class*) can now be used
+ java_lang_Class->SetClassSize(sizeof(mirror::ClassClass));
+ // AllocClass(mirror::Class*) can now be used
// Class[] is used for reflection support.
- SirtRef<Class> class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
class_array_class->SetComponentType(java_lang_Class.get());
// java_lang_Object comes next so that object_array_class can be created.
- SirtRef<Class> java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
CHECK(java_lang_Object.get() != NULL);
// backfill Object as the super class of Class.
java_lang_Class->SetSuperClass(java_lang_Object.get());
- java_lang_Object->SetStatus(Class::kStatusLoaded);
+ java_lang_Object->SetStatus(mirror::Class::kStatusLoaded);
// Object[] next to hold class roots.
- SirtRef<Class> object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
object_array_class->SetComponentType(java_lang_Object.get());
// Setup the char class to be used for char[].
- SirtRef<Class> char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
// Setup the char[] class to be used for String.
- SirtRef<Class> char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
char_array_class->SetComponentType(char_class.get());
- CharArray::SetArrayClass(char_array_class.get());
+ mirror::CharArray::SetArrayClass(char_array_class.get());
// Setup String.
- SirtRef<Class> java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(StringClass)));
- String::SetClass(java_lang_String.get());
- java_lang_String->SetObjectSize(sizeof(String));
- java_lang_String->SetStatus(Class::kStatusResolved);
+ SirtRef<mirror::Class> java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::StringClass)));
+ mirror::String::SetClass(java_lang_String.get());
+ java_lang_String->SetObjectSize(sizeof(mirror::String));
+ java_lang_String->SetStatus(mirror::Class::kStatusResolved);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = ObjectArray<Class>::Alloc(self, object_array_class.get(), kClassRootsMax);
+ class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.get(), kClassRootsMax);
CHECK(class_roots_ != NULL);
SetClassRoot(kJavaLangClass, java_lang_Class.get());
SetClassRoot(kJavaLangObject, java_lang_Object.get());
@@ -300,68 +314,69 @@
array_iftable_ = AllocIfTable(self, 2);
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
- SirtRef<Class> int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt));
- IntArray::SetArrayClass(int_array_class.get());
+ mirror::IntArray::SetArrayClass(int_array_class.get());
SetClassRoot(kIntArrayClass, int_array_class.get());
// now that these are registered, we can use AllocClass() and AllocObjectArray
// Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache.
- SirtRef<Class>
- java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(DexCacheClass)));
+ SirtRef<mirror::Class>
+ java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::DexCacheClass)));
SetClassRoot(kJavaLangDexCache, java_lang_DexCache.get());
- java_lang_DexCache->SetObjectSize(sizeof(DexCacheClass));
- java_lang_DexCache->SetStatus(Class::kStatusResolved);
+ java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCacheClass));
+ java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved);
// Constructor, Field, Method, and AbstractMethod are necessary so that FindClass can link members.
- SirtRef<Class> java_lang_reflect_Field(self, AllocClass(self, java_lang_Class.get(),
- sizeof(FieldClass)));
+ SirtRef<mirror::Class> java_lang_reflect_Field(self, AllocClass(self, java_lang_Class.get(),
+ sizeof(mirror::FieldClass)));
CHECK(java_lang_reflect_Field.get() != NULL);
- java_lang_reflect_Field->SetObjectSize(sizeof(Field));
+ java_lang_reflect_Field->SetObjectSize(sizeof(mirror::Field));
SetClassRoot(kJavaLangReflectField, java_lang_reflect_Field.get());
- java_lang_reflect_Field->SetStatus(Class::kStatusResolved);
- Field::SetClass(java_lang_reflect_Field.get());
+ java_lang_reflect_Field->SetStatus(mirror::Class::kStatusResolved);
+ mirror::Field::SetClass(java_lang_reflect_Field.get());
- SirtRef<Class> java_lang_reflect_AbstractMethod(self, AllocClass(self, java_lang_Class.get(),
- sizeof(MethodClass)));
+ SirtRef<mirror::Class> java_lang_reflect_AbstractMethod(self, AllocClass(self, java_lang_Class.get(),
+ sizeof(mirror::AbstractMethodClass)));
CHECK(java_lang_reflect_AbstractMethod.get() != NULL);
- java_lang_reflect_AbstractMethod->SetObjectSize(sizeof(AbstractMethod));
+ java_lang_reflect_AbstractMethod->SetObjectSize(sizeof(mirror::AbstractMethod));
SetClassRoot(kJavaLangReflectAbstractMethod, java_lang_reflect_AbstractMethod.get());
- java_lang_reflect_AbstractMethod->SetStatus(Class::kStatusResolved);
+ java_lang_reflect_AbstractMethod->SetStatus(mirror::Class::kStatusResolved);
- SirtRef<Class> java_lang_reflect_Constructor(self, AllocClass(self, java_lang_Class.get(),
- sizeof(MethodClass)));
+ SirtRef<mirror::Class> java_lang_reflect_Constructor(self, AllocClass(self, java_lang_Class.get(),
+ sizeof(mirror::AbstractMethodClass)));
CHECK(java_lang_reflect_Constructor.get() != NULL);
- java_lang_reflect_Constructor->SetObjectSize(sizeof(Constructor));
+ java_lang_reflect_Constructor->SetObjectSize(sizeof(mirror::Constructor));
java_lang_reflect_Constructor->SetSuperClass(java_lang_reflect_AbstractMethod.get());
SetClassRoot(kJavaLangReflectConstructor, java_lang_reflect_Constructor.get());
- java_lang_reflect_Constructor->SetStatus(Class::kStatusResolved);
+ java_lang_reflect_Constructor->SetStatus(mirror::Class::kStatusResolved);
- SirtRef<Class> java_lang_reflect_Method(self, AllocClass(self, java_lang_Class.get(),
- sizeof(MethodClass)));
+ SirtRef<mirror::Class> java_lang_reflect_Method(self, AllocClass(self, java_lang_Class.get(),
+ sizeof(mirror::AbstractMethodClass)));
CHECK(java_lang_reflect_Method.get() != NULL);
- java_lang_reflect_Method->SetObjectSize(sizeof(Method));
+ java_lang_reflect_Method->SetObjectSize(sizeof(mirror::Method));
java_lang_reflect_Method->SetSuperClass(java_lang_reflect_AbstractMethod.get());
SetClassRoot(kJavaLangReflectMethod, java_lang_reflect_Method.get());
- java_lang_reflect_Method->SetStatus(Class::kStatusResolved);
+ java_lang_reflect_Method->SetStatus(mirror::Class::kStatusResolved);
- AbstractMethod::SetClasses(java_lang_reflect_Constructor.get(), java_lang_reflect_Method.get());
+ mirror::AbstractMethod::SetClasses(java_lang_reflect_Constructor.get(),
+ java_lang_reflect_Method.get());
// Set up array classes for string, field, method
- SirtRef<Class> object_array_string(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> object_array_string(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
object_array_string->SetComponentType(java_lang_String.get());
SetClassRoot(kJavaLangStringArrayClass, object_array_string.get());
- SirtRef<Class> object_array_abstract_method(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> object_array_abstract_method(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
object_array_abstract_method->SetComponentType(java_lang_reflect_AbstractMethod.get());
SetClassRoot(kJavaLangReflectAbstractMethodArrayClass, object_array_abstract_method.get());
- SirtRef<Class> object_array_field(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> object_array_field(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
object_array_field->SetComponentType(java_lang_reflect_Field.get());
SetClassRoot(kJavaLangReflectFieldArrayClass, object_array_field.get());
- SirtRef<Class> object_array_method(self, AllocClass(self, java_lang_Class.get(), sizeof(Class)));
+ SirtRef<mirror::Class> object_array_method(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
object_array_method->SetComponentType(java_lang_reflect_Method.get());
SetClassRoot(kJavaLangReflectMethodArrayClass, object_array_method.get());
@@ -382,55 +397,55 @@
SetClassRoot(kPrimitiveChar, char_class.get()); // needs descriptor
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
- java_lang_Object->SetStatus(Class::kStatusNotReady);
- Class* Object_class = FindSystemClass("Ljava/lang/Object;");
+ java_lang_Object->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* Object_class = FindSystemClass("Ljava/lang/Object;");
CHECK_EQ(java_lang_Object.get(), Object_class);
- CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(Object));
- java_lang_String->SetStatus(Class::kStatusNotReady);
- Class* String_class = FindSystemClass("Ljava/lang/String;");
+ CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(mirror::Object));
+ java_lang_String->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* String_class = FindSystemClass("Ljava/lang/String;");
CHECK_EQ(java_lang_String.get(), String_class);
- CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(String));
- java_lang_DexCache->SetStatus(Class::kStatusNotReady);
- Class* DexCache_class = FindSystemClass("Ljava/lang/DexCache;");
+ CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(mirror::String));
+ java_lang_DexCache->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* DexCache_class = FindSystemClass("Ljava/lang/DexCache;");
CHECK_EQ(java_lang_String.get(), String_class);
CHECK_EQ(java_lang_DexCache.get(), DexCache_class);
- CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(DexCache));
+ CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(mirror::DexCache));
// Setup the primitive array type classes - can't be done until Object has a vtable.
SetClassRoot(kBooleanArrayClass, FindSystemClass("[Z"));
- BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
+ mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
SetClassRoot(kByteArrayClass, FindSystemClass("[B"));
- ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
+ mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
- Class* found_char_array_class = FindSystemClass("[C");
+ mirror::Class* found_char_array_class = FindSystemClass("[C");
CHECK_EQ(char_array_class.get(), found_char_array_class);
SetClassRoot(kShortArrayClass, FindSystemClass("[S"));
- ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
+ mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
- Class* found_int_array_class = FindSystemClass("[I");
+ mirror::Class* found_int_array_class = FindSystemClass("[I");
CHECK_EQ(int_array_class.get(), found_int_array_class);
SetClassRoot(kLongArrayClass, FindSystemClass("[J"));
- LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
+ mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
SetClassRoot(kFloatArrayClass, FindSystemClass("[F"));
- FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
+ mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
SetClassRoot(kDoubleArrayClass, FindSystemClass("[D"));
- DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
+ mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
- Class* found_class_array_class = FindSystemClass("[Ljava/lang/Class;");
+ mirror::Class* found_class_array_class = FindSystemClass("[Ljava/lang/Class;");
CHECK_EQ(class_array_class.get(), found_class_array_class);
- Class* found_object_array_class = FindSystemClass("[Ljava/lang/Object;");
+ mirror::Class* found_object_array_class = FindSystemClass("[Ljava/lang/Object;");
CHECK_EQ(object_array_class.get(), found_object_array_class);
// Setup the single, global copy of "iftable".
- Class* java_lang_Cloneable = FindSystemClass("Ljava/lang/Cloneable;");
+ mirror::Class* java_lang_Cloneable = FindSystemClass("Ljava/lang/Cloneable;");
CHECK(java_lang_Cloneable != NULL);
- Class* java_io_Serializable = FindSystemClass("Ljava/io/Serializable;");
+ mirror::Class* java_io_Serializable = FindSystemClass("Ljava/io/Serializable;");
CHECK(java_io_Serializable != NULL);
// We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
// crawl up and explicitly list all of the supers as well.
@@ -446,79 +461,79 @@
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
// Run Class, Constructor, Field, and Method through FindSystemClass. This initializes their
// dex_cache_ fields and register them in classes_.
- Class* Class_class = FindSystemClass("Ljava/lang/Class;");
+ mirror::Class* Class_class = FindSystemClass("Ljava/lang/Class;");
CHECK_EQ(java_lang_Class.get(), Class_class);
- java_lang_reflect_AbstractMethod->SetStatus(Class::kStatusNotReady);
- Class* Abstract_method_class = FindSystemClass("Ljava/lang/reflect/AbstractMethod;");
+ java_lang_reflect_AbstractMethod->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* Abstract_method_class = FindSystemClass("Ljava/lang/reflect/AbstractMethod;");
CHECK_EQ(java_lang_reflect_AbstractMethod.get(), Abstract_method_class);
// Method extends AbstractMethod so must reset after.
- java_lang_reflect_Method->SetStatus(Class::kStatusNotReady);
- Class* Method_class = FindSystemClass("Ljava/lang/reflect/Method;");
+ java_lang_reflect_Method->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* Method_class = FindSystemClass("Ljava/lang/reflect/Method;");
CHECK_EQ(java_lang_reflect_Method.get(), Method_class);
// Constructor extends AbstractMethod so must reset after.
- java_lang_reflect_Constructor->SetStatus(Class::kStatusNotReady);
- Class* Constructor_class = FindSystemClass("Ljava/lang/reflect/Constructor;");
+ java_lang_reflect_Constructor->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* Constructor_class = FindSystemClass("Ljava/lang/reflect/Constructor;");
CHECK_EQ(java_lang_reflect_Constructor.get(), Constructor_class);
- java_lang_reflect_Field->SetStatus(Class::kStatusNotReady);
- Class* Field_class = FindSystemClass("Ljava/lang/reflect/Field;");
+ java_lang_reflect_Field->SetStatus(mirror::Class::kStatusNotReady);
+ mirror::Class* Field_class = FindSystemClass("Ljava/lang/reflect/Field;");
CHECK_EQ(java_lang_reflect_Field.get(), Field_class);
- Class* String_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangStringArrayClass]);
+ mirror::Class* String_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangStringArrayClass]);
CHECK_EQ(object_array_string.get(), String_array_class);
- Class* Abstract_method_array_class =
+ mirror::Class* Abstract_method_array_class =
FindSystemClass(class_roots_descriptors_[kJavaLangReflectAbstractMethodArrayClass]);
CHECK_EQ(object_array_abstract_method.get(), Abstract_method_array_class);
- Class* Field_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectFieldArrayClass]);
+ mirror::Class* Field_array_class = FindSystemClass(class_roots_descriptors_[kJavaLangReflectFieldArrayClass]);
CHECK_EQ(object_array_field.get(), Field_array_class);
- Class* Method_array_class =
+ mirror::Class* Method_array_class =
FindSystemClass(class_roots_descriptors_[kJavaLangReflectMethodArrayClass]);
CHECK_EQ(object_array_method.get(), Method_array_class);
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
// Create java.lang.reflect.Proxy root.
- Class* java_lang_reflect_Proxy = FindSystemClass("Ljava/lang/reflect/Proxy;");
+ mirror::Class* java_lang_reflect_Proxy = FindSystemClass("Ljava/lang/reflect/Proxy;");
SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
- Class* java_lang_ref_Reference = FindSystemClass("Ljava/lang/ref/Reference;");
+ mirror::Class* java_lang_ref_Reference = FindSystemClass("Ljava/lang/ref/Reference;");
SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference);
- Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;");
+ mirror::Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;");
java_lang_ref_FinalizerReference->SetAccessFlags(
java_lang_ref_FinalizerReference->GetAccessFlags() |
kAccClassIsReference | kAccClassIsFinalizerReference);
- Class* java_lang_ref_PhantomReference = FindSystemClass("Ljava/lang/ref/PhantomReference;");
+ mirror::Class* java_lang_ref_PhantomReference = FindSystemClass("Ljava/lang/ref/PhantomReference;");
java_lang_ref_PhantomReference->SetAccessFlags(
java_lang_ref_PhantomReference->GetAccessFlags() |
kAccClassIsReference | kAccClassIsPhantomReference);
- Class* java_lang_ref_SoftReference = FindSystemClass("Ljava/lang/ref/SoftReference;");
+ mirror::Class* java_lang_ref_SoftReference = FindSystemClass("Ljava/lang/ref/SoftReference;");
java_lang_ref_SoftReference->SetAccessFlags(
java_lang_ref_SoftReference->GetAccessFlags() | kAccClassIsReference);
- Class* java_lang_ref_WeakReference = FindSystemClass("Ljava/lang/ref/WeakReference;");
+ mirror::Class* java_lang_ref_WeakReference = FindSystemClass("Ljava/lang/ref/WeakReference;");
java_lang_ref_WeakReference->SetAccessFlags(
java_lang_ref_WeakReference->GetAccessFlags() |
kAccClassIsReference | kAccClassIsWeakReference);
// Setup the ClassLoader, verifying the object_size_.
- Class* java_lang_ClassLoader = FindSystemClass("Ljava/lang/ClassLoader;");
- CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), sizeof(ClassLoader));
+ mirror::Class* java_lang_ClassLoader = FindSystemClass("Ljava/lang/ClassLoader;");
+ CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), sizeof(mirror::ClassLoader));
SetClassRoot(kJavaLangClassLoader, java_lang_ClassLoader);
// Set up java.lang.Throwable, java.lang.ClassNotFoundException, and
// java.lang.StackTraceElement as a convenience.
SetClassRoot(kJavaLangThrowable, FindSystemClass("Ljava/lang/Throwable;"));
- Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
+ mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
SetClassRoot(kJavaLangClassNotFoundException, FindSystemClass("Ljava/lang/ClassNotFoundException;"));
SetClassRoot(kJavaLangStackTraceElement, FindSystemClass("Ljava/lang/StackTraceElement;"));
SetClassRoot(kJavaLangStackTraceElementArrayClass, FindSystemClass("[Ljava/lang/StackTraceElement;"));
- StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+ mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
FinishInit();
@@ -532,37 +547,37 @@
// Note: we hard code the field indexes here rather than using FindInstanceField
// as the types of the field can't be resolved prior to the runtime being
// fully initialized
- Class* java_lang_ref_Reference = GetClassRoot(kJavaLangRefReference);
- Class* java_lang_ref_ReferenceQueue = FindSystemClass("Ljava/lang/ref/ReferenceQueue;");
- Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;");
+ mirror::Class* java_lang_ref_Reference = GetClassRoot(kJavaLangRefReference);
+ mirror::Class* java_lang_ref_ReferenceQueue = FindSystemClass("Ljava/lang/ref/ReferenceQueue;");
+ mirror::Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;");
const DexFile& java_lang_dex = *java_lang_ref_Reference->GetDexCache()->GetDexFile();
- Field* pendingNext = java_lang_ref_Reference->GetInstanceField(0);
+ mirror::Field* pendingNext = java_lang_ref_Reference->GetInstanceField(0);
FieldHelper fh(pendingNext, this);
CHECK_STREQ(fh.GetName(), "pendingNext");
CHECK_EQ(java_lang_dex.GetFieldId(pendingNext->GetDexFieldIndex()).type_idx_,
java_lang_ref_Reference->GetDexTypeIndex());
- Field* queue = java_lang_ref_Reference->GetInstanceField(1);
+ mirror::Field* queue = java_lang_ref_Reference->GetInstanceField(1);
fh.ChangeField(queue);
CHECK_STREQ(fh.GetName(), "queue");
CHECK_EQ(java_lang_dex.GetFieldId(queue->GetDexFieldIndex()).type_idx_,
java_lang_ref_ReferenceQueue->GetDexTypeIndex());
- Field* queueNext = java_lang_ref_Reference->GetInstanceField(2);
+ mirror::Field* queueNext = java_lang_ref_Reference->GetInstanceField(2);
fh.ChangeField(queueNext);
CHECK_STREQ(fh.GetName(), "queueNext");
CHECK_EQ(java_lang_dex.GetFieldId(queueNext->GetDexFieldIndex()).type_idx_,
java_lang_ref_Reference->GetDexTypeIndex());
- Field* referent = java_lang_ref_Reference->GetInstanceField(3);
+ mirror::Field* referent = java_lang_ref_Reference->GetInstanceField(3);
fh.ChangeField(referent);
CHECK_STREQ(fh.GetName(), "referent");
CHECK_EQ(java_lang_dex.GetFieldId(referent->GetDexFieldIndex()).type_idx_,
GetClassRoot(kJavaLangObject)->GetDexTypeIndex());
- Field* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2);
+ mirror::Field* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2);
fh.ChangeField(zombie);
CHECK_STREQ(fh.GetName(), "zombie");
CHECK_EQ(java_lang_dex.GetFieldId(zombie->GetDexFieldIndex()).type_idx_,
@@ -578,7 +593,7 @@
// ensure all class_roots_ are initialized
for (size_t i = 0; i < kClassRootsMax; i++) {
ClassRoot class_root = static_cast<ClassRoot>(i);
- Class* klass = GetClassRoot(class_root);
+ mirror::Class* klass = GetClassRoot(class_root);
CHECK(klass != NULL);
DCHECK(klass->IsArrayClass() || klass->IsPrimitive() || klass->GetDexCache() != NULL);
// note SetClassRoot does additional validation.
@@ -597,7 +612,7 @@
void ClassLinker::RunRootClinits() {
Thread* self = Thread::Current();
for (size_t i = 0; i < ClassLinker::kClassRootsMax; ++i) {
- Class* c = GetClassRoot(ClassRoot(i));
+ mirror::Class* c = GetClassRoot(ClassRoot(i));
if (!c->IsArrayClass() && !c->IsPrimitive()) {
EnsureInitialized(GetClassRoot(ClassRoot(i)), true, true);
self->AssertNoPendingException();
@@ -699,7 +714,8 @@
const ImageHeader& image_header = space->GetImageHeader();
// Grab location but don't use Object::AsString as we haven't yet initialized the roots to
// check the down cast
- String* oat_location = down_cast<String*>(image_header.GetImageRoot(ImageHeader::kOatLocation));
+ mirror::String* oat_location =
+ down_cast<mirror::String*>(image_header.GetImageRoot(ImageHeader::kOatLocation));
std::string oat_filename;
oat_filename += runtime->GetHostPrefix();
oat_filename += oat_location->ToModifiedUtf8();
@@ -971,21 +987,22 @@
CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
CHECK(oat_file->GetOatHeader().GetImageFileLocation().empty());
- Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- ObjectArray<DexCache>* dex_caches = dex_caches_object->AsObjectArray<DexCache>();
+ mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches =
+ dex_caches_object->AsObjectArray<mirror::DexCache>();
- ObjectArray<Class>* class_roots =
- space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<Class>();
+ mirror::ObjectArray<mirror::Class>* class_roots =
+ space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>();
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
- String::SetClass(class_roots->Get(kJavaLangString));
+ mirror::String::SetClass(class_roots->Get(kJavaLangString));
CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
Thread* self = Thread::Current();
for (int i = 0; i < dex_caches->GetLength(); i++) {
- SirtRef<DexCache> dex_cache(self, dex_caches->Get(i));
+ SirtRef<mirror::DexCache> dex_cache(self, dex_caches->Get(i));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location);
CHECK(oat_dex_file != NULL) << oat_file->GetLocation() << " " << dex_file_location;
@@ -1008,33 +1025,33 @@
}
// reinit class_roots_
- Class::SetClassClass(class_roots->Get(kJavaLangClass));
+ mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
class_roots_ = class_roots;
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
- Field::SetClass(GetClassRoot(kJavaLangReflectField));
- AbstractMethod::SetClasses(GetClassRoot(kJavaLangReflectConstructor),
+ mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
+ mirror::AbstractMethod::SetClasses(GetClassRoot(kJavaLangReflectConstructor),
GetClassRoot(kJavaLangReflectMethod));
- BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
- ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
- CharArray::SetArrayClass(GetClassRoot(kCharArrayClass));
- DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
- FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
- IntArray::SetArrayClass(GetClassRoot(kIntArrayClass));
- LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
- ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
- Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
- StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+ mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
+ mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
+ mirror::CharArray::SetArrayClass(GetClassRoot(kCharArrayClass));
+ mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
+ mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
+ mirror::IntArray::SetArrayClass(GetClassRoot(kIntArrayClass));
+ mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
+ mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
+ mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
+ mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
FinishInit();
VLOG(startup) << "ClassLinker::InitFromImage exiting";
}
-void ClassLinker::InitFromImageCallback(Object* obj, void* arg) {
+void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) {
DCHECK(obj != NULL);
DCHECK(arg != NULL);
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
@@ -1045,9 +1062,9 @@
}
if (obj->IsClass()) {
// restore class to ClassLinker::classes_ table
- Class* klass = obj->AsClass();
+ mirror::Class* klass = obj->AsClass();
ClassHelper kh(klass, class_linker);
- Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
+ mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
DCHECK(existing == NULL) << kh.GetDescriptor();
return;
}
@@ -1056,7 +1073,7 @@
// Keep in sync with InitCallback. Anything we visit, we need to
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
-void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg) {
visitor(class_roots_, arg);
Thread* self = Thread::Current();
{
@@ -1096,16 +1113,16 @@
}
}
-static bool GetClassesVisitor(Class* c, void* arg) {
- std::set<Class*>* classes = reinterpret_cast<std::set<Class*>*>(arg);
+static bool GetClassesVisitor(mirror::Class* c, void* arg) {
+ std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg);
classes->insert(c);
return true;
}
void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const {
- std::set<Class*> classes;
+ std::set<mirror::Class*> classes;
VisitClasses(GetClassesVisitor, &classes);
- typedef std::set<Class*>::const_iterator It; // TODO: C++0x auto
+ typedef std::set<mirror::Class*>::const_iterator It; // TODO: C++0x auto
for (It it = classes.begin(), end = classes.end(); it != end; ++it) {
if (!visitor(*it, arg)) {
return;
@@ -1115,57 +1132,61 @@
ClassLinker::~ClassLinker() {
- Class::ResetClass();
- String::ResetClass();
- Field::ResetClass();
- AbstractMethod::ResetClasses();
- BooleanArray::ResetArrayClass();
- ByteArray::ResetArrayClass();
- CharArray::ResetArrayClass();
- DoubleArray::ResetArrayClass();
- FloatArray::ResetArrayClass();
- IntArray::ResetArrayClass();
- LongArray::ResetArrayClass();
- ShortArray::ResetArrayClass();
- Throwable::ResetClass();
- StackTraceElement::ResetClass();
+ mirror::Class::ResetClass();
+ mirror::String::ResetClass();
+ mirror::Field::ResetClass();
+ mirror::AbstractMethod::ResetClasses();
+ mirror::BooleanArray::ResetArrayClass();
+ mirror::ByteArray::ResetArrayClass();
+ mirror::CharArray::ResetArrayClass();
+ mirror::DoubleArray::ResetArrayClass();
+ mirror::FloatArray::ResetArrayClass();
+ mirror::IntArray::ResetArrayClass();
+ mirror::LongArray::ResetArrayClass();
+ mirror::ShortArray::ResetArrayClass();
+ mirror::Throwable::ResetClass();
+ mirror::StackTraceElement::ResetClass();
STLDeleteElements(&boot_class_path_);
STLDeleteElements(&oat_files_);
}
-DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
Heap* heap = Runtime::Current()->GetHeap();
- Class* dex_cache_class = GetClassRoot(kJavaLangDexCache);
- SirtRef<DexCache> dex_cache(self,
- down_cast<DexCache*>(heap->AllocObject(self, dex_cache_class,
- dex_cache_class->GetObjectSize())));
+ mirror::Class* dex_cache_class = GetClassRoot(kJavaLangDexCache);
+ SirtRef<mirror::DexCache> dex_cache(self,
+ down_cast<mirror::DexCache*>(heap->AllocObject(self, dex_cache_class,
+ dex_cache_class->GetObjectSize())));
if (dex_cache.get() == NULL) {
return NULL;
}
- SirtRef<String> location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str()));
+ SirtRef<mirror::String>
+ location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str()));
if (location.get() == NULL) {
return NULL;
}
- SirtRef<ObjectArray<String> > strings(self, AllocStringArray(self, dex_file.NumStringIds()));
+ SirtRef<mirror::ObjectArray<mirror::String> >
+ strings(self, AllocStringArray(self, dex_file.NumStringIds()));
if (strings.get() == NULL) {
return NULL;
}
- SirtRef<ObjectArray<Class> > types(self, AllocClassArray(self, dex_file.NumTypeIds()));
+ SirtRef<mirror::ObjectArray<mirror::Class> >
+ types(self, AllocClassArray(self, dex_file.NumTypeIds()));
if (types.get() == NULL) {
return NULL;
}
- SirtRef<ObjectArray<AbstractMethod> >
+ SirtRef<mirror::ObjectArray<mirror::AbstractMethod> >
methods(self, AllocAbstractMethodArray(self, dex_file.NumMethodIds()));
if (methods.get() == NULL) {
return NULL;
}
- SirtRef<ObjectArray<Field> > fields(self, AllocFieldArray(self, dex_file.NumFieldIds()));
+ SirtRef<mirror::ObjectArray<mirror::Field> >
+ fields(self, AllocFieldArray(self, dex_file.NumFieldIds()));
if (fields.get() == NULL) {
return NULL;
}
- SirtRef<ObjectArray<StaticStorageBase> >
+ SirtRef<mirror::ObjectArray<mirror::StaticStorageBase> >
initialized_static_storage(self,
- AllocObjectArray<StaticStorageBase>(self, dex_file.NumTypeIds()));
+ AllocObjectArray<mirror::StaticStorageBase>(self, dex_file.NumTypeIds()));
if (initialized_static_storage.get() == NULL) {
return NULL;
}
@@ -1180,40 +1201,41 @@
return dex_cache.get();
}
-Class* ClassLinker::AllocClass(Thread* self, Class* java_lang_Class, size_t class_size) {
- DCHECK_GE(class_size, sizeof(Class));
+mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class,
+ size_t class_size) {
+ DCHECK_GE(class_size, sizeof(mirror::Class));
Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<Class> klass(self,
+ SirtRef<mirror::Class> klass(self,
heap->AllocObject(self, java_lang_Class, class_size)->AsClass());
klass->SetPrimitiveType(Primitive::kPrimNot); // default to not being primitive
klass->SetClassSize(class_size);
return klass.get();
}
-Class* ClassLinker::AllocClass(Thread* self, size_t class_size) {
+mirror::Class* ClassLinker::AllocClass(Thread* self, size_t class_size) {
return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
-Field* ClassLinker::AllocField(Thread* self) {
- return down_cast<Field*>(GetClassRoot(kJavaLangReflectField)->AllocObject(self));
+mirror::Field* ClassLinker::AllocField(Thread* self) {
+ return down_cast<mirror::Field*>(GetClassRoot(kJavaLangReflectField)->AllocObject(self));
}
-Method* ClassLinker::AllocMethod(Thread* self) {
- return down_cast<Method*>(GetClassRoot(kJavaLangReflectMethod)->AllocObject(self));
+mirror::Method* ClassLinker::AllocMethod(Thread* self) {
+ return down_cast<mirror::Method*>(GetClassRoot(kJavaLangReflectMethod)->AllocObject(self));
}
-Constructor* ClassLinker::AllocConstructor(Thread* self) {
- return down_cast<Constructor*>(GetClassRoot(kJavaLangReflectConstructor)->AllocObject(self));
+mirror::Constructor* ClassLinker::AllocConstructor(Thread* self) {
+ return down_cast<mirror::Constructor*>(GetClassRoot(kJavaLangReflectConstructor)->AllocObject(self));
}
-ObjectArray<StackTraceElement>* ClassLinker::AllocStackTraceElementArray(Thread* self,
- size_t length) {
- return ObjectArray<StackTraceElement>::Alloc(self,
- GetClassRoot(kJavaLangStackTraceElementArrayClass),
- length);
+mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(Thread* self,
+ size_t length) {
+ return mirror::ObjectArray<mirror::StackTraceElement>::Alloc(self,
+ GetClassRoot(kJavaLangStackTraceElementArrayClass),
+ length);
}
-static Class* EnsureResolved(Thread* self, Class* klass)
+static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(klass != NULL);
// Wait for the class if it has not already been linked.
@@ -1223,7 +1245,7 @@
if (!klass->IsResolved() && klass->GetClinitThreadId() == self->GetTid()) {
self->ThrowNewException("Ljava/lang/ClassCircularityError;",
PrettyDescriptor(klass).c_str());
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return NULL;
}
// Wait for the pending initialization to complete.
@@ -1243,11 +1265,11 @@
return klass;
}
-Class* ClassLinker::FindSystemClass(const char* descriptor) {
+mirror::Class* ClassLinker::FindSystemClass(const char* descriptor) {
return FindClass(descriptor, NULL);
}
-Class* ClassLinker::FindClass(const char* descriptor, ClassLoader* class_loader) {
+mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoader* class_loader) {
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
Thread* self = Thread::Current();
DCHECK(self != NULL);
@@ -1258,7 +1280,7 @@
return FindPrimitiveClass(descriptor[0]);
}
// Find the class in the loaded classes table.
- Class* klass = LookupClass(descriptor, class_loader);
+ mirror::Class* klass = LookupClass(descriptor, class_loader);
if (klass != NULL) {
return EnsureResolved(self, klass);
}
@@ -1274,7 +1296,7 @@
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
// first try the boot class path
- Class* system_class = FindSystemClass(descriptor);
+ mirror::Class* system_class = FindSystemClass(descriptor);
if (system_class != NULL) {
return system_class;
}
@@ -1321,8 +1343,8 @@
class_name_string.c_str());
return NULL;
} else {
- // success, return Class*
- return soa.Decode<Class*>(result.get());
+ // success, return mirror::Class*
+ return soa.Decode<mirror::Class*>(result.get());
}
}
@@ -1330,12 +1352,12 @@
return NULL;
}
-Class* ClassLinker::DefineClass(const StringPiece& descriptor,
- ClassLoader* class_loader,
- const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def) {
+mirror::Class* ClassLinker::DefineClass(const StringPiece& descriptor,
+ mirror::ClassLoader* class_loader,
+ const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def) {
Thread* self = Thread::Current();
- SirtRef<Class> klass(self, NULL);
+ SirtRef<mirror::Class> klass(self, NULL);
// Load the class from the dex file.
if (!init_done_) {
// finish up init of hand crafted class_roots_
@@ -1365,13 +1387,13 @@
LoadClass(dex_file, dex_class_def, klass, class_loader);
// Check for a pending exception during load
if (self->IsExceptionPending()) {
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return NULL;
}
ObjectLock lock(self, klass.get());
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
- SirtRef<Class> existing(self, InsertClass(descriptor, klass.get(), false));
+ SirtRef<mirror::Class> existing(self, InsertClass(descriptor, klass.get(), false));
if (existing.get() != NULL) {
// We failed to insert because we raced with another thread.
return EnsureResolved(self, existing.get());
@@ -1380,7 +1402,7 @@
CHECK(!klass->IsLoaded());
if (!LoadSuperAndInterfaces(klass, dex_file)) {
// Loading failed.
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
lock.NotifyAll();
return NULL;
}
@@ -1389,7 +1411,7 @@
CHECK(!klass->IsResolved());
if (!LinkClass(klass, NULL)) {
// Linking failed.
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
lock.NotifyAll();
return NULL;
}
@@ -1433,7 +1455,7 @@
}
}
// start with generic class data
- size_t size = sizeof(Class);
+ size_t size = sizeof(mirror::Class);
// follow with reference fields which must be contiguous at start
size += (num_ref * sizeof(uint32_t));
// if there are 64-bit fields to add, make sure they are aligned
@@ -1502,10 +1524,10 @@
return 0;
}
-const OatFile::OatMethod ClassLinker::GetOatMethodFor(const AbstractMethod* method) {
+const OatFile::OatMethod ClassLinker::GetOatMethodFor(const mirror::AbstractMethod* method) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
- Class* declaring_class = method->GetDeclaringClass();
+ mirror::Class* declaring_class = method->GetDeclaringClass();
size_t oat_method_index;
if (method->IsStatic() || method->IsDirect()) {
// Simple case where the oat method index was stashed at load time.
@@ -1536,7 +1558,7 @@
}
// Special case to get oat code without overwriting a trampoline.
-const void* ClassLinker::GetOatCodeFor(const AbstractMethod* method) {
+const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) {
CHECK(Runtime::Current()->IsCompiler() || method->GetDeclaringClass()->IsInitializing());
return GetOatMethodFor(method).GetCode();
}
@@ -1550,7 +1572,7 @@
return oat_class->GetOatMethod(oat_method_idx).GetCode();
}
-void ClassLinker::FixupStaticTrampolines(Class* klass) {
+void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
ClassHelper kh(klass);
const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
CHECK(dex_class_def != NULL);
@@ -1577,7 +1599,7 @@
// Link the code of methods skipped by LinkCode
const void* trampoline = Runtime::Current()->GetResolutionStubArray(Runtime::kStaticMethod)->GetData();
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
- AbstractMethod* method = klass->GetDirectMethod(i);
+ mirror::AbstractMethod* method = klass->GetDirectMethod(i);
if (Runtime::Current()->IsMethodTracingActive()) {
Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (instrumentation->GetSavedCodeFromMap(method) == trampoline) {
@@ -1596,7 +1618,7 @@
}
}
-static void LinkCode(SirtRef<AbstractMethod>& method, const OatFile::OatClass* oat_class,
+static void LinkCode(SirtRef<mirror::AbstractMethod>& method, const OatFile::OatClass* oat_class,
uint32_t method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Every kind of method should at least get an invoke stub from the oat_method.
@@ -1627,11 +1649,11 @@
void ClassLinker::LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- SirtRef<Class>& klass,
- ClassLoader* class_loader) {
+ SirtRef<mirror::Class>& klass,
+ mirror::ClassLoader* class_loader) {
CHECK(klass.get() != NULL);
CHECK(klass->GetDexCache() != NULL);
- CHECK_EQ(Class::kStatusNotReady, klass->GetStatus());
+ CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
const char* descriptor = dex_file.GetClassDescriptor(dex_class_def);
CHECK(descriptor != NULL);
@@ -1643,7 +1665,7 @@
klass->SetAccessFlags(access_flags);
klass->SetClassLoader(class_loader);
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetStatus(Class::kStatusIdx);
+ klass->SetStatus(mirror::Class::kStatusIdx);
klass->SetDexTypeIndex(dex_class_def.class_idx_);
@@ -1661,12 +1683,12 @@
klass->SetIFields(AllocFieldArray(self, it.NumInstanceFields()));
}
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- SirtRef<Field> sfield(self, AllocField(self));
+ SirtRef<mirror::Field> sfield(self, AllocField(self));
klass->SetStaticField(i, sfield.get());
LoadField(dex_file, it, klass, sfield);
}
for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
- SirtRef<Field> ifield(self, AllocField(self));
+ SirtRef<mirror::Field> ifield(self, AllocField(self));
klass->SetInstanceField(i, ifield.get());
LoadField(dex_file, it, klass, ifield);
}
@@ -1687,7 +1709,7 @@
}
size_t class_def_method_index = 0;
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
- SirtRef<AbstractMethod> method(self, LoadMethod(self, dex_file, it, klass));
+ SirtRef<mirror::AbstractMethod> method(self, LoadMethod(self, dex_file, it, klass));
klass->SetDirectMethod(i, method.get());
if (oat_class.get() != NULL) {
LinkCode(method, oat_class.get(), class_def_method_index);
@@ -1696,7 +1718,7 @@
class_def_method_index++;
}
for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
- SirtRef<AbstractMethod> method(self, LoadMethod(self, dex_file, it, klass));
+ SirtRef<mirror::AbstractMethod> method(self, LoadMethod(self, dex_file, it, klass));
klass->SetVirtualMethod(i, method.get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
if (oat_class.get() != NULL) {
@@ -1708,21 +1730,21 @@
}
void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
- SirtRef<Class>& klass, SirtRef<Field>& dst) {
+ SirtRef<mirror::Class>& klass, SirtRef<mirror::Field>& dst) {
uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
dst->SetDeclaringClass(klass.get());
dst->SetAccessFlags(it.GetMemberAccessFlags());
}
-AbstractMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
- const ClassDataItemIterator& it,
- SirtRef<Class>& klass) {
+mirror::AbstractMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
+ const ClassDataItemIterator& it,
+ SirtRef<mirror::Class>& klass) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
StringPiece method_name(dex_file.GetMethodName(method_id));
- AbstractMethod* dst = NULL;
+ mirror::AbstractMethod* dst = NULL;
if (method_name == "<init>") {
dst = AllocConstructor(self);
} else {
@@ -1780,11 +1802,11 @@
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
Thread* self = Thread::Current();
- SirtRef<DexCache> dex_cache(self, AllocDexCache(self, dex_file));
+ SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
AppendToBootClassPath(dex_file, dex_cache);
}
-void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) {
+void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) {
CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
RegisterDexFile(dex_file, dex_cache);
@@ -1805,7 +1827,7 @@
return IsDexFileRegisteredLocked(dex_file);
}
-void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) {
+void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) {
dex_lock_.AssertHeld(Thread::Current());
CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()));
@@ -1825,7 +1847,7 @@
// Don't alloc while holding the lock, since allocation may need to
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
- SirtRef<DexCache> dex_cache(self, AllocDexCache(self, dex_file));
+ SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
{
MutexLock mu(self, dex_lock_);
if (IsDexFileRegisteredLocked(dex_file)) {
@@ -1835,16 +1857,16 @@
}
}
-void ClassLinker::RegisterDexFile(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) {
+void ClassLinker::RegisterDexFile(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) {
MutexLock mu(Thread::Current(), dex_lock_);
RegisterDexFileLocked(dex_file, dex_cache);
}
-DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const {
+mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const {
MutexLock mu(Thread::Current(), dex_lock_);
// Search assuming unique-ness of dex file.
for (size_t i = 0; i != dex_caches_.size(); ++i) {
- DexCache* dex_cache = dex_caches_[i];
+ mirror::DexCache* dex_cache = dex_caches_[i];
if (dex_cache->GetDexFile() == &dex_file) {
return dex_cache;
}
@@ -1852,35 +1874,39 @@
// Search matching by location name.
std::string location(dex_file.GetLocation());
for (size_t i = 0; i != dex_caches_.size(); ++i) {
- DexCache* dex_cache = dex_caches_[i];
+ mirror::DexCache* dex_cache = dex_caches_[i];
if (dex_cache->GetDexFile()->GetLocation() == location) {
return dex_cache;
}
}
// Failure, dump diagnostic and abort.
for (size_t i = 0; i != dex_caches_.size(); ++i) {
- DexCache* dex_cache = dex_caches_[i];
+ mirror::DexCache* dex_cache = dex_caches_[i];
LOG(ERROR) << "Registered dex file " << i << " = " << dex_cache->GetDexFile()->GetLocation();
}
LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
return NULL;
}
-void ClassLinker::FixupDexCaches(AbstractMethod* resolution_method) const {
+void ClassLinker::FixupDexCaches(mirror::AbstractMethod* resolution_method) const {
MutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
dex_caches_[i]->Fixup(resolution_method);
}
}
-Class* ClassLinker::InitializePrimitiveClass(Class* primitive_class, Primitive::Type type) {
+mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
+ return InitializePrimitiveClass(AllocClass(self, sizeof(mirror::Class)), type);
+}
+
+mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) {
CHECK(primitive_class != NULL);
// Must hold lock on object when initializing.
ObjectLock lock(Thread::Current(), primitive_class);
primitive_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
primitive_class->SetPrimitiveType(type);
- primitive_class->SetStatus(Class::kStatusInitialized);
- Class* existing = InsertClass(Primitive::Descriptor(type), primitive_class, false);
+ primitive_class->SetStatus(mirror::Class::kStatusInitialized);
+ mirror::Class* existing = InsertClass(Primitive::Descriptor(type), primitive_class, false);
CHECK(existing == NULL) << "InitPrimitiveClass(" << type << ") failed";
return primitive_class;
}
@@ -1898,11 +1924,12 @@
// array class; that always comes from the base element class.
//
// Returns NULL with an exception raised on failure.
-Class* ClassLinker::CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader) {
+mirror::Class* ClassLinker::CreateArrayClass(const std::string& descriptor,
+ mirror::ClassLoader* class_loader) {
CHECK_EQ('[', descriptor[0]);
// Identify the underlying component type
- Class* component_type = FindClass(descriptor.substr(1).c_str(), class_loader);
+ mirror::Class* component_type = FindClass(descriptor.substr(1).c_str(), class_loader);
if (component_type == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return NULL;
@@ -1926,7 +1953,7 @@
// class to the hash table --- necessary because of possible races with
// other threads.)
if (class_loader != component_type->GetClassLoader()) {
- Class* new_class = LookupClass(descriptor.c_str(), component_type->GetClassLoader());
+ mirror::Class* new_class = LookupClass(descriptor.c_str(), component_type->GetClassLoader());
if (new_class != NULL) {
return new_class;
}
@@ -1941,7 +1968,7 @@
// Array classes are simple enough that we don't need to do a full
// link step.
Thread* self = Thread::Current();
- SirtRef<Class> new_class(self, NULL);
+ SirtRef<mirror::Class> new_class(self, NULL);
if (!init_done_) {
// Classes that were hand created, ie not by FindSystemClass
if (descriptor == "[Ljava/lang/Class;") {
@@ -1963,7 +1990,7 @@
}
}
if (new_class.get() == NULL) {
- new_class.reset(AllocClass(self, sizeof(Class)));
+ new_class.reset(AllocClass(self, sizeof(mirror::Class)));
if (new_class.get() == NULL) {
return NULL;
}
@@ -1971,12 +1998,12 @@
}
ObjectLock lock(self, new_class.get()); // Must hold lock on object when initializing.
DCHECK(new_class->GetComponentType() != NULL);
- Class* java_lang_Object = GetClassRoot(kJavaLangObject);
+ mirror::Class* java_lang_Object = GetClassRoot(kJavaLangObject);
new_class->SetSuperClass(java_lang_Object);
new_class->SetVTable(java_lang_Object->GetVTable());
new_class->SetPrimitiveType(Primitive::kPrimNot);
new_class->SetClassLoader(component_type->GetClassLoader());
- new_class->SetStatus(Class::kStatusInitialized);
+ new_class->SetStatus(mirror::Class::kStatusInitialized);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
@@ -2006,7 +2033,7 @@
new_class->SetAccessFlags(((new_class->GetComponentType()->GetAccessFlags() &
~kAccInterface) | kAccFinal) & kAccJavaFlagsMask);
- Class* existing = InsertClass(descriptor, new_class.get(), false);
+ mirror::Class* existing = InsertClass(descriptor, new_class.get(), false);
if (existing == NULL) {
return new_class.get();
}
@@ -2019,7 +2046,7 @@
return existing;
}
-Class* ClassLinker::FindPrimitiveClass(char type) {
+mirror::Class* ClassLinker::FindPrimitiveClass(char type) {
switch (Primitive::GetType(type)) {
case Primitive::kPrimByte:
return GetClassRoot(kPrimitiveByte);
@@ -2047,9 +2074,9 @@
return NULL;
}
-Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, bool image_class) {
+mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) {
if (VLOG_IS_ON(class_linker)) {
- DexCache* dex_cache = klass->GetDexCache();
+ mirror::DexCache* dex_cache = klass->GetDexCache();
std::string source;
if (dex_cache != NULL) {
source += " from ";
@@ -2060,7 +2087,7 @@
size_t hash = StringPieceHash()(descriptor);
MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
Table& classes = image_class ? image_classes_ : classes_;
- Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
+ mirror::Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
#ifndef NDEBUG
// Check we don't have the class in the other table in error
Table& other_classes = image_class ? classes_ : image_classes_;
@@ -2074,22 +2101,24 @@
return NULL;
}
-bool ClassLinker::RemoveClass(const char* descriptor, const ClassLoader* class_loader) {
+bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh;
- for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
- Class* klass = it->second;
+ for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
+ ++it) {
+ mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(kh.GetDescriptor(), descriptor) == 0 && klass->GetClassLoader() == class_loader) {
classes_.erase(it);
return true;
}
}
- for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
- Class* klass = it->second;
+ for (It it = image_classes_.lower_bound(hash), end = classes_.end();
+ it != end && it->first == hash; ++it) {
+ mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(kh.GetDescriptor(), descriptor) == 0 && klass->GetClassLoader() == class_loader) {
image_classes_.erase(it);
@@ -2099,28 +2128,30 @@
return false;
}
-Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader) {
+mirror::Class* ClassLinker::LookupClass(const char* descriptor,
+ const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// TODO: determine if its better to search classes_ or image_classes_ first
- Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_);
+ mirror::Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_);
if (klass != NULL) {
return klass;
}
return LookupClassLocked(descriptor, class_loader, hash, image_classes_);
}
-Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* class_loader,
- size_t hash, const Table& classes) {
+mirror::Class* ClassLinker::LookupClassLocked(const char* descriptor,
+ const mirror::ClassLoader* class_loader,
+ size_t hash, const Table& classes) {
ClassHelper kh(NULL, this);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = classes.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
- Class* klass = it->second;
+ mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0 && klass->GetClassLoader() == class_loader) {
#ifndef NDEBUG
for (++it; it != end && it->first == hash; ++it) {
- Class* klass2 = it->second;
+ mirror::Class* klass2 = it->second;
kh.ChangeClass(klass2);
CHECK(!(strcmp(descriptor, kh.GetDescriptor()) == 0 && klass2->GetClassLoader() == class_loader))
<< PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
@@ -2133,7 +2164,7 @@
return NULL;
}
-void ClassLinker::LookupClasses(const char* descriptor, std::vector<Class*>& classes) {
+void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes) {
classes.clear();
size_t hash = Hash(descriptor);
MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
@@ -2141,14 +2172,14 @@
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh(NULL, this);
for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
- Class* klass = it->second;
+ mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
classes.push_back(klass);
}
}
for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
- Class* klass = it->second;
+ mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
classes.push_back(klass);
@@ -2156,7 +2187,7 @@
}
}
-void ClassLinker::VerifyClass(Class* klass) {
+void ClassLinker::VerifyClass(mirror::Class* klass) {
// TODO: assert that the monitor on the Class is held
Thread* self = Thread::Current();
ObjectLock lock(self, klass);
@@ -2174,16 +2205,17 @@
return;
}
- if (klass->GetStatus() == Class::kStatusResolved) {
- klass->SetStatus(Class::kStatusVerifying);
+ if (klass->GetStatus() == mirror::Class::kStatusResolved) {
+ klass->SetStatus(mirror::Class::kStatusVerifying);
} else {
- CHECK_EQ(klass->GetStatus(), Class::kStatusRetryVerificationAtRuntime) << PrettyClass(klass);
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
+ << PrettyClass(klass);
CHECK(!Runtime::Current()->IsCompiler());
- klass->SetStatus(Class::kStatusVerifyingAtRuntime);
+ klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime);
}
// Verify super class.
- Class* super = klass->GetSuperClass();
+ mirror::Class* super = klass->GetSuperClass();
std::string error_msg;
if (super != NULL) {
// Acquire lock to prevent races on verifying the super class.
@@ -2198,7 +2230,7 @@
error_msg += " that attempts to sub-class erroneous class ";
error_msg += PrettyDescriptor(super);
LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- SirtRef<Throwable> cause(self, self->GetException());
+ SirtRef<mirror::Throwable> cause(self, self->GetException());
if (cause.get() != NULL) {
self->ClearException();
}
@@ -2206,24 +2238,24 @@
if (cause.get() != NULL) {
self->GetException()->SetCause(cause.get());
}
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return;
}
}
// Try to use verification information from the oat file, otherwise do runtime verification.
const DexFile& dex_file = *klass->GetDexCache()->GetDexFile();
- Class::Status oat_file_class_status(Class::kStatusNotReady);
+ mirror::Class::Status oat_file_class_status(mirror::Class::kStatusNotReady);
bool preverified = VerifyClassUsingOatFile(dex_file, klass, oat_file_class_status);
verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
- if (oat_file_class_status == Class::kStatusError) {
+ if (oat_file_class_status == mirror::Class::kStatusError) {
LOG(WARNING) << "Skipping runtime verification of erroneous class " << PrettyDescriptor(klass)
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
error_msg = "Rejecting class ";
error_msg += PrettyDescriptor(klass);
error_msg += " because it failed compile-time verification";
Thread::Current()->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str());
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return;
}
if (!preverified) {
@@ -2239,16 +2271,16 @@
// Make sure all classes referenced by catch blocks are resolved.
ResolveClassExceptionHandlerTypes(dex_file, klass);
if (verifier_failure == verifier::MethodVerifier::kNoFailure) {
- klass->SetStatus(Class::kStatusVerified);
+ klass->SetStatus(mirror::Class::kStatusVerified);
} else {
CHECK_EQ(verifier_failure, verifier::MethodVerifier::kSoftFailure);
// Soft failures at compile time should be retried at runtime. Soft
// failures at runtime will be handled by slow paths in the generated
// code. Set status accordingly.
if (Runtime::Current()->IsCompiler()) {
- klass->SetStatus(Class::kStatusRetryVerificationAtRuntime);
+ klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime);
} else {
- klass->SetStatus(Class::kStatusVerified);
+ klass->SetStatus(mirror::Class::kStatusVerified);
}
}
} else {
@@ -2257,12 +2289,12 @@
<< " because: " << error_msg;
self->AssertNoPendingException();
self->ThrowNewException("Ljava/lang/VerifyError;", error_msg.c_str());
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
}
}
-bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass,
- Class::Status& oat_file_class_status) {
+bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
+ mirror::Class::Status& oat_file_class_status) {
if (!Runtime::Current()->IsStarted()) {
return false;
}
@@ -2281,11 +2313,11 @@
CHECK(oat_class.get() != NULL)
<< dex_file.GetLocation() << " " << PrettyClass(klass) << " " << descriptor;
oat_file_class_status = oat_class->GetStatus();
- if (oat_file_class_status == Class::kStatusVerified ||
- oat_file_class_status == Class::kStatusInitialized) {
+ if (oat_file_class_status == mirror::Class::kStatusVerified ||
+ oat_file_class_status == mirror::Class::kStatusInitialized) {
return true;
}
- if (oat_file_class_status == Class::kStatusRetryVerificationAtRuntime) {
+ if (oat_file_class_status == mirror::Class::kStatusRetryVerificationAtRuntime) {
// Compile time verification failed with a soft error. Compile time verification can fail
// because we have incomplete type information. Consider the following:
// class ... {
@@ -2305,12 +2337,12 @@
// at compile time).
return false;
}
- if (oat_file_class_status == Class::kStatusError) {
+ if (oat_file_class_status == mirror::Class::kStatusError) {
// Compile time verification failed with a hard error. This is caused by invalid instructions
// in the class. These errors are unrecoverable.
return false;
}
- if (oat_file_class_status == Class::kStatusNotReady) {
+ if (oat_file_class_status == mirror::Class::kStatusNotReady) {
// Status is uninitialized if we couldn't determine the status at compile time, for example,
// not loading the class.
// TODO: when the verifier doesn't rely on Class-es failing to resolve/load the type hierarchy
@@ -2323,7 +2355,7 @@
return false;
}
-void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass) {
+void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, mirror::Class* klass) {
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
}
@@ -2332,7 +2364,8 @@
}
}
-void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, AbstractMethod* method) {
+void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
+ mirror::AbstractMethod* method) {
// similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod.
const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset());
if (code_item == NULL) {
@@ -2350,7 +2383,7 @@
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
- Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method);
+ mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method);
if (exception_type == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
Thread::Current()->ClearException();
@@ -2361,26 +2394,29 @@
}
}
-static void CheckProxyConstructor(AbstractMethod* constructor);
-static void CheckProxyMethod(AbstractMethod* method, SirtRef<AbstractMethod>& prototype);
+static void CheckProxyConstructor(mirror::AbstractMethod* constructor);
+static void CheckProxyMethod(mirror::AbstractMethod* method,
+ SirtRef<mirror::AbstractMethod>& prototype);
-Class* ClassLinker::CreateProxyClass(String* name, ObjectArray<Class>* interfaces,
- ClassLoader* loader, ObjectArray<AbstractMethod>* methods,
- ObjectArray<ObjectArray<Class> >* throws) {
+mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* interfaces,
+ mirror::ClassLoader* loader,
+ mirror::ObjectArray<mirror::AbstractMethod>* methods,
+ mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws) {
Thread* self = Thread::Current();
- SirtRef<Class> klass(self, AllocClass(self, GetClassRoot(kJavaLangClass),
- sizeof(SynthesizedProxyClass)));
+ SirtRef<mirror::Class> klass(self, AllocClass(self, GetClassRoot(kJavaLangClass),
+ sizeof(mirror::SynthesizedProxyClass)));
CHECK(klass.get() != NULL);
DCHECK(klass->GetClass() != NULL);
- klass->SetObjectSize(sizeof(Proxy));
+ klass->SetObjectSize(sizeof(mirror::Proxy));
klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal);
klass->SetClassLoader(loader);
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
klass->SetName(name);
- Class* proxy_class = GetClassRoot(kJavaLangReflectProxy);
+ mirror::Class* proxy_class = GetClassRoot(kJavaLangReflectProxy);
klass->SetDexCache(proxy_class->GetDexCache());
- klass->SetStatus(Class::kStatusIdx);
+ klass->SetStatus(mirror::Class::kStatusIdx);
klass->SetDexTypeIndex(DexFile::kDexNoIndex16);
@@ -2388,13 +2424,13 @@
klass->SetSFields(AllocFieldArray(self, 2));
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
- SirtRef<Field> interfaces_sfield(self, AllocField(self));
+ SirtRef<mirror::Field> interfaces_sfield(self, AllocField(self));
klass->SetStaticField(0, interfaces_sfield.get());
interfaces_sfield->SetDexFieldIndex(0);
interfaces_sfield->SetDeclaringClass(klass.get());
interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
- SirtRef<Field> throws_sfield(self, AllocField(self));
+ SirtRef<mirror::Field> throws_sfield(self, AllocField(self));
klass->SetStaticField(1, throws_sfield.get());
throws_sfield->SetDexFieldIndex(1);
throws_sfield->SetDeclaringClass(klass.get());
@@ -2408,24 +2444,24 @@
size_t num_virtual_methods = methods->GetLength();
klass->SetVirtualMethods(AllocMethodArray(self, num_virtual_methods));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- SirtRef<AbstractMethod> prototype(self, methods->Get(i));
+ SirtRef<mirror::AbstractMethod> prototype(self, methods->Get(i));
klass->SetVirtualMethod(i, CreateProxyMethod(self, klass, prototype));
}
klass->SetSuperClass(proxy_class); // The super class is java.lang.reflect.Proxy
- klass->SetStatus(Class::kStatusLoaded); // Class is now effectively in the loaded state
+ klass->SetStatus(mirror::Class::kStatusLoaded); // Class is now effectively in the loaded state
DCHECK(!Thread::Current()->IsExceptionPending());
// Link the fields and virtual methods, creating vtable and iftables
if (!LinkClass(klass, interfaces)) {
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return NULL;
}
{
ObjectLock lock(self, klass.get()); // Must hold lock on object when initializing.
interfaces_sfield->SetObject(klass.get(), interfaces);
throws_sfield->SetObject(klass.get(), throws);
- klass->SetStatus(Class::kStatusInitialized);
+ klass->SetStatus(mirror::Class::kStatusInitialized);
}
// sanity checks
@@ -2433,7 +2469,7 @@
CHECK(klass->GetIFields() == NULL);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- SirtRef<AbstractMethod> prototype(self, methods->Get(i));
+ SirtRef<mirror::AbstractMethod> prototype(self, methods->Get(i));
CheckProxyMethod(klass->GetVirtualMethod(i), prototype);
}
@@ -2445,27 +2481,29 @@
name->ToModifiedUtf8().c_str()));
CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
- SynthesizedProxyClass* synth_proxy_class = down_cast<SynthesizedProxyClass*>(klass.get());
+ mirror::SynthesizedProxyClass* synth_proxy_class =
+ down_cast<mirror::SynthesizedProxyClass*>(klass.get());
CHECK_EQ(synth_proxy_class->GetInterfaces(), interfaces);
CHECK_EQ(synth_proxy_class->GetThrows(), throws);
}
return klass.get();
}
-std::string ClassLinker::GetDescriptorForProxy(const Class* proxy_class) {
+std::string ClassLinker::GetDescriptorForProxy(const mirror::Class* proxy_class) {
DCHECK(proxy_class->IsProxyClass());
- String* name = proxy_class->GetName();
+ mirror::String* name = proxy_class->GetName();
DCHECK(name != NULL);
return DotToDescriptor(name->ToModifiedUtf8().c_str());
}
-AbstractMethod* ClassLinker::FindMethodForProxy(const Class* proxy_class, const AbstractMethod* proxy_method) {
+mirror::AbstractMethod* ClassLinker::FindMethodForProxy(const mirror::Class* proxy_class,
+ const mirror::AbstractMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
// Locate the dex cache of the original interface/Object
- DexCache* dex_cache = NULL;
+ mirror::DexCache* dex_cache = NULL;
{
- ObjectArray<Class>* resolved_types = proxy_method->GetDexCacheResolvedTypes();
+ mirror::ObjectArray<mirror::Class>* resolved_types = proxy_method->GetDexCacheResolvedTypes();
MutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
if (dex_caches_[i]->GetResolvedTypes() == resolved_types) {
@@ -2476,27 +2514,31 @@
}
CHECK(dex_cache != NULL);
uint32_t method_idx = proxy_method->GetDexMethodIndex();
- AbstractMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
+ mirror::AbstractMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
CHECK(resolved_method != NULL);
return resolved_method;
}
-AbstractMethod* ClassLinker::CreateProxyConstructor(Thread* self, SirtRef<Class>& klass, Class* proxy_class) {
+mirror::AbstractMethod* ClassLinker::CreateProxyConstructor(Thread* self,
+ SirtRef<mirror::Class>& klass,
+ mirror::Class* proxy_class) {
// Create constructor for Proxy that must initialize h
- ObjectArray<AbstractMethod>* proxy_direct_methods = proxy_class->GetDirectMethods();
+ mirror::ObjectArray<mirror::AbstractMethod>* proxy_direct_methods =
+ proxy_class->GetDirectMethods();
CHECK_EQ(proxy_direct_methods->GetLength(), 15);
- AbstractMethod* proxy_constructor = proxy_direct_methods->Get(2);
+ mirror::AbstractMethod* proxy_constructor = proxy_direct_methods->Get(2);
// Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
// code_ too)
- AbstractMethod* constructor = down_cast<AbstractMethod*>(proxy_constructor->Clone(self));
+ mirror::AbstractMethod* constructor =
+ down_cast<mirror::AbstractMethod*>(proxy_constructor->Clone(self));
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
constructor->SetDeclaringClass(klass.get());
return constructor;
}
-static void CheckProxyConstructor(AbstractMethod* constructor)
+static void CheckProxyConstructor(mirror::AbstractMethod* constructor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(constructor->IsConstructor());
MethodHelper mh(constructor);
@@ -2505,15 +2547,15 @@
DCHECK(constructor->IsPublic());
}
-AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef<Class>& klass,
- SirtRef<AbstractMethod>& prototype) {
+mirror::AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef<mirror::Class>& klass,
+ SirtRef<mirror::AbstractMethod>& prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
prototype.get());
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
- AbstractMethod* method = down_cast<AbstractMethod*>(prototype->Clone(self));
+ mirror::AbstractMethod* method = down_cast<mirror::AbstractMethod*>(prototype->Clone(self));
// Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
// the intersection of throw exceptions as defined in Proxy
@@ -2522,7 +2564,8 @@
// At runtime the method looks like a reference and argument saving method, clone the code
// related parameters from this method.
- AbstractMethod* refs_and_args = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ mirror::AbstractMethod* refs_and_args =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask());
method->SetFpSpillMask(refs_and_args->GetFpSpillMask());
method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes());
@@ -2536,7 +2579,8 @@
return method;
}
-static void CheckProxyMethod(AbstractMethod* method, SirtRef<AbstractMethod>& prototype)
+static void CheckProxyMethod(mirror::AbstractMethod* method,
+ SirtRef<mirror::AbstractMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Basic sanity
CHECK(!prototype->IsFinal());
@@ -2560,18 +2604,18 @@
CHECK_EQ(mh.GetReturnType(), mh2.GetReturnType());
}
-bool ClassLinker::InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics) {
+bool ClassLinker::InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_statics) {
CHECK(klass->IsResolved() || klass->IsErroneous())
<< PrettyClass(klass) << ": state=" << klass->GetStatus();
Thread* self = Thread::Current();
- AbstractMethod* clinit = NULL;
+ mirror::AbstractMethod* clinit = NULL;
{
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
ObjectLock lock(self, klass);
- if (klass->GetStatus() == Class::kStatusInitialized) {
+ if (klass->GetStatus() == mirror::Class::kStatusInitialized) {
return true;
}
@@ -2580,11 +2624,11 @@
return false;
}
- if (klass->GetStatus() == Class::kStatusResolved ||
- klass->GetStatus() == Class::kStatusRetryVerificationAtRuntime) {
+ if (klass->GetStatus() == mirror::Class::kStatusResolved ||
+ klass->GetStatus() == mirror::Class::kStatusRetryVerificationAtRuntime) {
VerifyClass(klass);
- if (klass->GetStatus() != Class::kStatusVerified) {
- if (klass->GetStatus() == Class::kStatusError) {
+ if (klass->GetStatus() != mirror::Class::kStatusVerified) {
+ if (klass->GetStatus() == mirror::Class::kStatusError) {
CHECK(self->IsExceptionPending());
}
return false;
@@ -2606,7 +2650,7 @@
// to initializing and we need to wait. Either way, this
// invocation of InitializeClass will not be responsible for
// running <clinit> and will return.
- if (klass->GetStatus() == Class::kStatusInitializing) {
+ if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
// We caught somebody else in the act; was it us?
if (klass->GetClinitThreadId() == self->GetTid()) {
// Yes. That's fine. Return so we can continue initializing.
@@ -2617,15 +2661,15 @@
}
if (!ValidateSuperClassDescriptors(klass)) {
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
lock.NotifyAll();
return false;
}
- DCHECK_EQ(klass->GetStatus(), Class::kStatusVerified) << PrettyClass(klass);
+ DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass);
klass->SetClinitThreadId(self->GetTid());
- klass->SetStatus(Class::kStatusInitializing);
+ klass->SetStatus(mirror::Class::kStatusInitializing);
}
uint64_t t0 = NanoTime();
@@ -2665,7 +2709,7 @@
if (self->IsExceptionPending()) {
WrapExceptionInInitializer();
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
success = false;
} else {
RuntimeStats* global_stats = Runtime::Current()->GetStats();
@@ -2677,10 +2721,10 @@
// Set the class as initialized except if we can't initialize static fields and static field
// initialization is necessary.
if (!can_init_statics && has_static_field_initializers) {
- klass->SetStatus(Class::kStatusVerified); // Don't leave class in initializing state.
+ klass->SetStatus(mirror::Class::kStatusVerified); // Don't leave class in initializing state.
success = false;
} else {
- klass->SetStatus(Class::kStatusInitialized);
+ klass->SetStatus(mirror::Class::kStatusInitialized);
}
if (VLOG_IS_ON(class_linker)) {
ClassHelper kh(klass);
@@ -2692,7 +2736,7 @@
return success;
}
-bool ClassLinker::WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock)
+bool ClassLinker::WaitForInitializeClass(mirror::Class* klass, Thread* self, ObjectLock& lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
while (true) {
self->AssertNoPendingException();
@@ -2703,14 +2747,14 @@
// "interruptShouldThrow" was set), bail out.
if (self->IsExceptionPending()) {
WrapExceptionInInitializer();
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
return false;
}
// Spurious wakeup? Go back to waiting.
- if (klass->GetStatus() == Class::kStatusInitializing) {
+ if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
continue;
}
- if (klass->GetStatus() == Class::kStatusVerified && Runtime::Current()->IsCompiler()) {
+ if (klass->GetStatus() == mirror::Class::kStatusVerified && Runtime::Current()->IsCompiler()) {
// Compile time initialization failed.
return false;
}
@@ -2729,16 +2773,16 @@
LOG(FATAL) << "Not Reached" << PrettyClass(klass);
}
-bool ClassLinker::ValidateSuperClassDescriptors(const Class* klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(const mirror::Class* klass) {
if (klass->IsInterface()) {
return true;
}
// begin with the methods local to the superclass
if (klass->HasSuperClass() &&
klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) {
- const Class* super = klass->GetSuperClass();
+ const mirror::Class* super = klass->GetSuperClass();
for (int i = super->GetVTable()->GetLength() - 1; i >= 0; --i) {
- const AbstractMethod* method = klass->GetVTable()->Get(i);
+ const mirror::AbstractMethod* method = klass->GetVTable()->Get(i);
if (method != super->GetVTable()->Get(i) &&
!IsSameMethodSignatureInDifferentClassContexts(method, super, klass)) {
ThrowLinkageError("Class %s method %s resolves differently in superclass %s",
@@ -2748,12 +2792,12 @@
}
}
}
- IfTable* iftable = klass->GetIfTable();
+ mirror::IfTable* iftable = klass->GetIfTable();
for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- Class* interface = iftable->GetInterface(i);
+ mirror::Class* interface = iftable->GetInterface(i);
if (klass->GetClassLoader() != interface->GetClassLoader()) {
for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) {
- const AbstractMethod* method = iftable->GetMethodArray(i)->Get(j);
+ const mirror::AbstractMethod* method = iftable->GetMethodArray(i)->Get(j);
if (!IsSameMethodSignatureInDifferentClassContexts(method, interface,
method->GetDeclaringClass())) {
ThrowLinkageError("Class %s method %s resolves differently in interface %s",
@@ -2770,9 +2814,9 @@
// Returns true if classes referenced by the signature of the method are the
// same classes in klass1 as they are in klass2.
-bool ClassLinker::IsSameMethodSignatureInDifferentClassContexts(const AbstractMethod* method,
- const Class* klass1,
- const Class* klass2) {
+bool ClassLinker::IsSameMethodSignatureInDifferentClassContexts(const mirror::AbstractMethod* method,
+ const mirror::Class* klass1,
+ const mirror::Class* klass2) {
if (klass1 == klass2) {
return true;
}
@@ -2803,29 +2847,29 @@
// Returns true if the descriptor resolves to the same class in the context of klass1 and klass2.
bool ClassLinker::IsSameDescriptorInDifferentClassContexts(const char* descriptor,
- const Class* klass1,
- const Class* klass2) {
+ const mirror::Class* klass1,
+ const mirror::Class* klass2) {
CHECK(descriptor != NULL);
CHECK(klass1 != NULL);
CHECK(klass2 != NULL);
if (klass1 == klass2) {
return true;
}
- Class* found1 = FindClass(descriptor, klass1->GetClassLoader());
+ mirror::Class* found1 = FindClass(descriptor, klass1->GetClassLoader());
if (found1 == NULL) {
Thread::Current()->ClearException();
}
- Class* found2 = FindClass(descriptor, klass2->GetClassLoader());
+ mirror::Class* found2 = FindClass(descriptor, klass2->GetClassLoader());
if (found2 == NULL) {
Thread::Current()->ClearException();
}
return found1 == found2;
}
-bool ClassLinker::InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields) {
+bool ClassLinker::InitializeSuperClass(mirror::Class* klass, bool can_run_clinit, bool can_init_fields) {
CHECK(klass != NULL);
if (!klass->IsInterface() && klass->HasSuperClass()) {
- Class* super_class = klass->GetSuperClass();
+ mirror::Class* super_class = klass->GetSuperClass();
if (!super_class->IsInitialized()) {
CHECK(!super_class->IsInterface());
// Must hold lock on object when initializing and setting status.
@@ -2835,11 +2879,11 @@
if (!super_initialized) {
if (!can_run_clinit) {
// Don't set status to error when we can't run <clinit>.
- CHECK_EQ(klass->GetStatus(), Class::kStatusInitializing) << PrettyClass(klass);
- klass->SetStatus(Class::kStatusVerified);
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusInitializing) << PrettyClass(klass);
+ klass->SetStatus(mirror::Class::kStatusVerified);
return false;
}
- klass->SetStatus(Class::kStatusError);
+ klass->SetStatus(mirror::Class::kStatusError);
klass->NotifyAll();
return false;
}
@@ -2848,7 +2892,7 @@
return true;
}
-bool ClassLinker::EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) {
+bool ClassLinker::EnsureInitialized(mirror::Class* c, bool can_run_clinit, bool can_init_fields) {
DCHECK(c != NULL);
if (c->IsInitialized()) {
return true;
@@ -2864,8 +2908,8 @@
}
void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- Class* c, SafeMap<uint32_t, Field*>& field_map) {
- ClassLoader* cl = c->GetClassLoader();
+ mirror::Class* c, SafeMap<uint32_t, mirror::Field*>& field_map) {
+ mirror::ClassLoader* cl = c->GetClassLoader();
const byte* class_data = dex_file.GetClassData(dex_class_def);
ClassDataItemIterator it(dex_file, class_data);
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
@@ -2873,12 +2917,12 @@
}
}
-bool ClassLinker::InitializeStaticFields(Class* klass) {
+bool ClassLinker::InitializeStaticFields(mirror::Class* klass) {
size_t num_static_fields = klass->NumStaticFields();
if (num_static_fields == 0) {
return false;
}
- DexCache* dex_cache = klass->GetDexCache();
+ mirror::DexCache* dex_cache = klass->GetDexCache();
// TODO: this seems like the wrong check. do we really want !IsPrimitive && !IsArray?
if (dex_cache == NULL) {
return false;
@@ -2892,7 +2936,7 @@
if (it.HasNext()) {
// We reordered the fields, so we need to be able to map the field indexes to the right fields.
- SafeMap<uint32_t, Field*> field_map;
+ SafeMap<uint32_t, mirror::Field*> field_map;
ConstructFieldMap(dex_file, *dex_class_def, klass, field_map);
for (size_t i = 0; it.HasNext(); i++, it.Next()) {
it.ReadValueToField(field_map.Get(i));
@@ -2902,8 +2946,9 @@
return false;
}
-bool ClassLinker::LinkClass(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) {
- CHECK_EQ(Class::kStatusLoaded, klass->GetStatus());
+bool ClassLinker::LinkClass(SirtRef<mirror::Class>& klass,
+ mirror::ObjectArray<mirror::Class>* interfaces) {
+ CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!LinkSuperClass(klass)) {
return false;
}
@@ -2918,19 +2963,19 @@
}
CreateReferenceInstanceOffsets(klass);
CreateReferenceStaticOffsets(klass);
- CHECK_EQ(Class::kStatusLoaded, klass->GetStatus());
- klass->SetStatus(Class::kStatusResolved);
+ CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
+ klass->SetStatus(mirror::Class::kStatusResolved);
return true;
}
-bool ClassLinker::LoadSuperAndInterfaces(SirtRef<Class>& klass, const DexFile& dex_file) {
- CHECK_EQ(Class::kStatusIdx, klass->GetStatus());
+bool ClassLinker::LoadSuperAndInterfaces(SirtRef<mirror::Class>& klass, const DexFile& dex_file) {
+ CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
StringPiece descriptor(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
const DexFile::ClassDef* class_def = dex_file.FindClassDef(descriptor);
CHECK(class_def != NULL);
uint16_t super_class_idx = class_def->superclass_idx_;
if (super_class_idx != DexFile::kDexNoIndex16) {
- Class* super_class = ResolveType(dex_file, super_class_idx, klass.get());
+ mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.get());
if (super_class == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
@@ -2949,7 +2994,7 @@
if (interfaces != NULL) {
for (size_t i = 0; i < interfaces->Size(); i++) {
uint16_t idx = interfaces->GetTypeItem(i).type_idx_;
- Class* interface = ResolveType(dex_file, idx, klass.get());
+ mirror::Class* interface = ResolveType(dex_file, idx, klass.get());
if (interface == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
@@ -2966,13 +3011,13 @@
}
}
// Mark the class as loaded.
- klass->SetStatus(Class::kStatusLoaded);
+ klass->SetStatus(mirror::Class::kStatusLoaded);
return true;
}
-bool ClassLinker::LinkSuperClass(SirtRef<Class>& klass) {
+bool ClassLinker::LinkSuperClass(SirtRef<mirror::Class>& klass) {
CHECK(!klass->IsPrimitive());
- Class* super = klass->GetSuperClass();
+ mirror::Class* super = klass->GetSuperClass();
if (klass.get() == GetClassRoot(kJavaLangObject)) {
if (super != NULL) {
Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassFormatError;",
@@ -3031,7 +3076,8 @@
}
// Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) {
+bool ClassLinker::LinkMethods(SirtRef<mirror::Class>& klass,
+ mirror::ObjectArray<mirror::Class>* interfaces) {
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
@@ -3051,24 +3097,24 @@
return true;
}
-bool ClassLinker::LinkVirtualMethods(SirtRef<Class>& klass) {
+bool ClassLinker::LinkVirtualMethods(SirtRef<mirror::Class>& klass) {
Thread* self = Thread::Current();
if (klass->HasSuperClass()) {
uint32_t max_count = klass->NumVirtualMethods() + klass->GetSuperClass()->GetVTable()->GetLength();
size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength();
CHECK_LE(actual_count, max_count);
// TODO: do not assign to the vtable field until it is fully constructed.
- SirtRef<ObjectArray<AbstractMethod> >
+ SirtRef<mirror::ObjectArray<mirror::AbstractMethod> >
vtable(self, klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count));
// See if any of our virtual methods override the superclass.
MethodHelper local_mh(NULL, this);
MethodHelper super_mh(NULL, this);
for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) {
- AbstractMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
+ mirror::AbstractMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
local_mh.ChangeMethod(local_method);
size_t j = 0;
for (; j < actual_count; ++j) {
- AbstractMethod* super_method = vtable->Get(j);
+ mirror::AbstractMethod* super_method = vtable->Get(j);
super_mh.ChangeMethod(super_method);
if (local_mh.HasSameNameAndSignature(&super_mh)) {
if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) {
@@ -3112,10 +3158,10 @@
ThrowClassFormatError("Too many methods: %d", num_virtual_methods);
return false;
}
- SirtRef<ObjectArray<AbstractMethod> >
+ SirtRef<mirror::ObjectArray<mirror::AbstractMethod> >
vtable(self, AllocMethodArray(self, num_virtual_methods));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- AbstractMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i);
+ mirror::AbstractMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i);
vtable->Set(i, virtual_method);
virtual_method->SetMethodIndex(i & 0xFFFF);
}
@@ -3124,7 +3170,8 @@
return true;
}
-bool ClassLinker::LinkInterfaceMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces) {
+bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
+ mirror::ObjectArray<mirror::Class>* interfaces) {
size_t super_ifcount;
if (klass->HasSuperClass()) {
super_ifcount = klass->GetSuperClass()->GetIfTableCount();
@@ -3136,7 +3183,7 @@
uint32_t num_interfaces = interfaces == NULL ? kh.NumDirectInterfaces() : interfaces->GetLength();
ifcount += num_interfaces;
for (size_t i = 0; i < num_interfaces; i++) {
- Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
ifcount += interface->GetIfTableCount();
}
if (ifcount == 0) {
@@ -3148,7 +3195,7 @@
if (ifcount == super_ifcount) {
// Class implements same interfaces as parent, are any of these not marker interfaces?
bool has_non_marker_interface = false;
- IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
+ mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
for (size_t i = 0; i < ifcount; ++i) {
if (super_iftable->GetMethodArrayCount(i) > 0) {
has_non_marker_interface = true;
@@ -3162,18 +3209,18 @@
}
}
Thread* self = Thread::Current();
- SirtRef<IfTable> iftable(self, AllocIfTable(self, ifcount));
+ SirtRef<mirror::IfTable> iftable(self, AllocIfTable(self, ifcount));
if (super_ifcount != 0) {
- IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
+ mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
for (size_t i = 0; i < super_ifcount; i++) {
- Class* super_interface = super_iftable->GetInterface(i);
+ mirror::Class* super_interface = super_iftable->GetInterface(i);
iftable->SetInterface(i, super_interface);
}
}
// Flatten the interface inheritance hierarchy.
size_t idx = super_ifcount;
for (size_t i = 0; i < num_interfaces; i++) {
- Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
DCHECK(interface != NULL);
if (!interface->IsInterface()) {
ClassHelper ih(interface);
@@ -3186,7 +3233,7 @@
// Check if interface is already in iftable
bool duplicate = false;
for (size_t j = 0; j < idx; j++) {
- Class* existing_interface = iftable->GetInterface(j);
+ mirror::Class* existing_interface = iftable->GetInterface(j);
if (existing_interface == interface) {
duplicate = true;
break;
@@ -3197,10 +3244,10 @@
iftable->SetInterface(idx++, interface);
// Add this interface's non-duplicate super-interfaces.
for (int32_t j = 0; j < interface->GetIfTableCount(); j++) {
- Class* super_interface = interface->GetIfTable()->GetInterface(j);
+ mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
bool super_duplicate = false;
for (size_t k = 0; k < idx; k++) {
- Class* existing_interface = iftable->GetInterface(k);
+ mirror::Class* existing_interface = iftable->GetInterface(k);
if (existing_interface == super_interface) {
super_duplicate = true;
break;
@@ -3214,7 +3261,7 @@
}
// Shrink iftable in case duplicates were found
if (idx < ifcount) {
- iftable.reset(down_cast<IfTable*>(iftable->CopyOf(self, idx * IfTable::kMax)));
+ iftable.reset(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
ifcount = idx;
} else {
CHECK_EQ(idx, ifcount);
@@ -3225,18 +3272,19 @@
if (klass->IsInterface()) {
return true;
}
- std::vector<AbstractMethod*> miranda_list;
+ std::vector<mirror::AbstractMethod*> miranda_list;
MethodHelper vtable_mh(NULL, this);
MethodHelper interface_mh(NULL, this);
for (size_t i = 0; i < ifcount; ++i) {
- Class* interface = iftable->GetInterface(i);
+ mirror::Class* interface = iftable->GetInterface(i);
size_t num_methods = interface->NumVirtualMethods();
if (num_methods > 0) {
- ObjectArray<AbstractMethod>* method_array = AllocMethodArray(self, num_methods);
+ mirror::ObjectArray<mirror::AbstractMethod>* method_array =
+ AllocMethodArray(self, num_methods);
iftable->SetMethodArray(i, method_array);
- ObjectArray<AbstractMethod>* vtable = klass->GetVTableDuringLinking();
+ mirror::ObjectArray<mirror::AbstractMethod>* vtable = klass->GetVTableDuringLinking();
for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) {
- AbstractMethod* interface_method = interface->GetVirtualMethod(j);
+ mirror::AbstractMethod* interface_method = interface->GetVirtualMethod(j);
interface_mh.ChangeMethod(interface_method);
int32_t k;
// For each method listed in the interface's method list, find the
@@ -3248,7 +3296,7 @@
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
for (k = vtable->GetLength() - 1; k >= 0; --k) {
- AbstractMethod* vtable_method = vtable->Get(k);
+ mirror::AbstractMethod* vtable_method = vtable->Get(k);
vtable_mh.ChangeMethod(vtable_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
if (!vtable_method->IsPublic()) {
@@ -3262,9 +3310,9 @@
}
}
if (k < 0) {
- SirtRef<AbstractMethod> miranda_method(self, NULL);
+ SirtRef<mirror::AbstractMethod> miranda_method(self, NULL);
for (size_t mir = 0; mir < miranda_list.size(); mir++) {
- AbstractMethod* mir_method = miranda_list[mir];
+ mirror::AbstractMethod* mir_method = miranda_list[mir];
vtable_mh.ChangeMethod(mir_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
miranda_method.reset(miranda_list[mir]);
@@ -3273,7 +3321,7 @@
}
if (miranda_method.get() == NULL) {
// point the interface table at a phantom slot
- miranda_method.reset(down_cast<AbstractMethod*>(interface_method->Clone(self)));
+ miranda_method.reset(down_cast<mirror::AbstractMethod*>(interface_method->Clone(self)));
miranda_list.push_back(miranda_method.get());
}
method_array->Set(j, miranda_method.get());
@@ -3288,13 +3336,14 @@
? AllocMethodArray(self, new_method_count)
: klass->GetVirtualMethods()->CopyOf(self, new_method_count));
- SirtRef<ObjectArray<AbstractMethod> > vtable(self, klass->GetVTableDuringLinking());
+ SirtRef<mirror::ObjectArray<mirror::AbstractMethod> >
+ vtable(self, klass->GetVTableDuringLinking());
CHECK(vtable.get() != NULL);
int old_vtable_count = vtable->GetLength();
int new_vtable_count = old_vtable_count + miranda_list.size();
vtable.reset(vtable->CopyOf(self, new_vtable_count));
for (size_t i = 0; i < miranda_list.size(); ++i) {
- AbstractMethod* method = miranda_list[i];
+ mirror::AbstractMethod* method = miranda_list[i];
// Leave the declaring class alone as type indices are relative to it
method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
@@ -3305,7 +3354,7 @@
klass->SetVTable(vtable.get());
}
- ObjectArray<AbstractMethod>* vtable = klass->GetVTableDuringLinking();
+ mirror::ObjectArray<mirror::AbstractMethod>* vtable = klass->GetVTableDuringLinking();
for (int i = 0; i < vtable->GetLength(); ++i) {
CHECK(vtable->Get(i) != NULL);
}
@@ -3315,12 +3364,12 @@
return true;
}
-bool ClassLinker::LinkInstanceFields(SirtRef<Class>& klass) {
+bool ClassLinker::LinkInstanceFields(SirtRef<mirror::Class>& klass) {
CHECK(klass.get() != NULL);
return LinkFields(klass, false);
}
-bool ClassLinker::LinkStaticFields(SirtRef<Class>& klass) {
+bool ClassLinker::LinkStaticFields(SirtRef<mirror::Class>& klass) {
CHECK(klass.get() != NULL);
size_t allocated_class_size = klass->GetClassSize();
bool success = LinkFields(klass, true);
@@ -3333,7 +3382,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: fh_(fh) {}
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
- bool operator()(const Field* field1, const Field* field2) NO_THREAD_SAFETY_ANALYSIS {
+ bool operator()(const mirror::Field* field1, const mirror::Field* field2)
+ NO_THREAD_SAFETY_ANALYSIS {
// First come reference fields, then 64-bit, and finally 32-bit
fh_->ChangeField(field1);
Primitive::Type type1 = fh_->GetTypeAsPrimitiveType();
@@ -3360,11 +3410,11 @@
FieldHelper* fh_;
};
-bool ClassLinker::LinkFields(SirtRef<Class>& klass, bool is_static) {
+bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
size_t num_fields =
is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
- ObjectArray<Field>* fields =
+ mirror::ObjectArray<mirror::Field>* fields =
is_static ? klass->GetSFields() : klass->GetIFields();
// Initialize size and field_offset
@@ -3372,9 +3422,9 @@
MemberOffset field_offset(0);
if (is_static) {
size = klass->GetClassSize();
- field_offset = Class::FieldsOffset();
+ field_offset = mirror::Class::FieldsOffset();
} else {
- Class* super_class = klass->GetSuperClass();
+ mirror::Class* super_class = klass->GetSuperClass();
if (super_class != NULL) {
CHECK(super_class->IsResolved());
field_offset = MemberOffset(super_class->GetObjectSize());
@@ -3386,7 +3436,7 @@
// we want a relatively stable order so that adding new fields
// minimizes disruption of C++ version such as Class and Method.
- std::deque<Field*> grouped_and_sorted_fields;
+ std::deque<mirror::Field*> grouped_and_sorted_fields;
for (size_t i = 0; i < num_fields; i++) {
grouped_and_sorted_fields.push_back(fields->Get(i));
}
@@ -3399,7 +3449,7 @@
size_t current_field = 0;
size_t num_reference_fields = 0;
for (; current_field < num_fields; current_field++) {
- Field* field = grouped_and_sorted_fields.front();
+ mirror::Field* field = grouped_and_sorted_fields.front();
fh.ChangeField(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
bool isPrimitive = type != Primitive::kPrimNot;
@@ -3418,7 +3468,7 @@
// into place. If we can't find one, we'll have to pad it.
if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) {
for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) {
- Field* field = grouped_and_sorted_fields[i];
+ mirror::Field* field = grouped_and_sorted_fields[i];
fh.ChangeField(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
CHECK(type != Primitive::kPrimNot); // should only be working on primitive types
@@ -3439,7 +3489,7 @@
// finish assigning field offsets to all fields.
DCHECK(current_field == num_fields || IsAligned<8>(field_offset.Uint32Value()));
while (!grouped_and_sorted_fields.empty()) {
- Field* field = grouped_and_sorted_fields.front();
+ mirror::Field* field = grouped_and_sorted_fields.front();
grouped_and_sorted_fields.pop_front();
fh.ChangeField(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
@@ -3469,12 +3519,13 @@
// non-reference fields, and all double-wide fields are aligned.
bool seen_non_ref = false;
for (size_t i = 0; i < num_fields; i++) {
- Field* field = fields->Get(i);
+ mirror::Field* field = fields->Get(i);
if (false) { // enable to debug field layout
LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
<< " class=" << PrettyClass(klass.get())
<< " field=" << PrettyField(field)
- << " offset=" << field->GetField32(MemberOffset(Field::OffsetOffset()), false);
+ << " offset=" << field->GetField32(MemberOffset(mirror::Field::OffsetOffset()),
+ false);
}
fh.ChangeField(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
@@ -3512,9 +3563,9 @@
// Set the bitmap of reference offsets, refOffsets, from the ifields
// list.
-void ClassLinker::CreateReferenceInstanceOffsets(SirtRef<Class>& klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(SirtRef<mirror::Class>& klass) {
uint32_t reference_offsets = 0;
- Class* super_class = klass->GetSuperClass();
+ mirror::Class* super_class = klass->GetSuperClass();
if (super_class != NULL) {
reference_offsets = super_class->GetReferenceInstanceOffsets();
// If our superclass overflowed, we don't stand a chance.
@@ -3526,23 +3577,23 @@
CreateReferenceOffsets(klass, false, reference_offsets);
}
-void ClassLinker::CreateReferenceStaticOffsets(SirtRef<Class>& klass) {
+void ClassLinker::CreateReferenceStaticOffsets(SirtRef<mirror::Class>& klass) {
CreateReferenceOffsets(klass, true, 0);
}
-void ClassLinker::CreateReferenceOffsets(SirtRef<Class>& klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(SirtRef<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets) {
size_t num_reference_fields =
is_static ? klass->NumReferenceStaticFieldsDuringLinking()
: klass->NumReferenceInstanceFieldsDuringLinking();
- const ObjectArray<Field>* fields =
+ const mirror::ObjectArray<mirror::Field>* fields =
is_static ? klass->GetSFields() : klass->GetIFields();
// All of the fields that contain object references are guaranteed
// to be at the beginning of the fields list.
for (size_t i = 0; i < num_reference_fields; ++i) {
// Note that byte_offset is the offset from the beginning of
// object, not the offset into instance data
- const Field* field = fields->Get(i);
+ const mirror::Field* field = fields->Get(i);
MemberOffset byte_offset = field->GetOffsetDuringLinking();
CHECK_EQ(byte_offset.Uint32Value() & (CLASS_OFFSET_ALIGNMENT - 1), 0U);
if (CLASS_CAN_ENCODE_OFFSET(byte_offset.Uint32Value())) {
@@ -3562,27 +3613,27 @@
}
}
-String* ClassLinker::ResolveString(const DexFile& dex_file,
- uint32_t string_idx, DexCache* dex_cache) {
+mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
+ uint32_t string_idx, mirror::DexCache* dex_cache) {
DCHECK(dex_cache != NULL);
- String* resolved = dex_cache->GetResolvedString(string_idx);
+ mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != NULL) {
return resolved;
}
const DexFile::StringId& string_id = dex_file.GetStringId(string_idx);
int32_t utf16_length = dex_file.GetStringLength(string_id);
const char* utf8_data = dex_file.GetStringData(string_id);
- String* string = intern_table_->InternStrong(utf16_length, utf8_data);
+ mirror::String* string = intern_table_->InternStrong(utf16_length, utf8_data);
dex_cache->SetResolvedString(string_idx, string);
return string;
}
-Class* ClassLinker::ResolveType(const DexFile& dex_file,
- uint16_t type_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader) {
+mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader) {
DCHECK(dex_cache != NULL);
- Class* resolved = dex_cache->GetResolvedType(type_idx);
+ mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == NULL) {
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(descriptor, class_loader);
@@ -3604,21 +3655,21 @@
return resolved;
}
-AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
- uint32_t method_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader,
- const AbstractMethod* referrer,
- InvokeType type) {
+mirror::AbstractMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
+ uint32_t method_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ const mirror::AbstractMethod* referrer,
+ InvokeType type) {
DCHECK(dex_cache != NULL);
// Check for hit in the dex cache.
- AbstractMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
+ mirror::AbstractMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != NULL) {
return resolved;
}
// Fail, get the declaring class.
const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
- Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
+ mirror::Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
if (klass == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return NULL;
@@ -3689,8 +3740,8 @@
// If we found something, check that it can be accessed by the referrer.
if (resolved != NULL && referrer != NULL) {
- Class* methods_class = resolved->GetDeclaringClass();
- Class* referring_class = referrer->GetDeclaringClass();
+ mirror::Class* methods_class = resolved->GetDeclaringClass();
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
if (!referring_class->CanAccess(methods_class)) {
ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
referrer, resolved, type);
@@ -3751,18 +3802,18 @@
}
}
-Field* ClassLinker::ResolveField(const DexFile& dex_file,
- uint32_t field_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader,
- bool is_static) {
+mirror::Field* ClassLinker::ResolveField(const DexFile& dex_file,
+ uint32_t field_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ bool is_static) {
DCHECK(dex_cache != NULL);
- Field* resolved = dex_cache->GetResolvedField(field_idx);
+ mirror::Field* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
- Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader);
+ mirror::Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader);
if (klass == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return NULL;
@@ -3791,17 +3842,17 @@
return resolved;
}
-Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
- uint32_t field_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader) {
+mirror::Field* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
+ uint32_t field_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader) {
DCHECK(dex_cache != NULL);
- Field* resolved = dex_cache->GetResolvedField(field_idx);
+ mirror::Field* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
- Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader);
+ mirror::Class* klass = ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader);
if (klass == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return NULL;
@@ -3818,9 +3869,10 @@
return resolved;
}
-const char* ClassLinker::MethodShorty(uint32_t method_idx, AbstractMethod* referrer, uint32_t* length) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
+const char* ClassLinker::MethodShorty(uint32_t method_idx, mirror::AbstractMethod* referrer,
+ uint32_t* length) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
return dex_file.GetMethodShorty(method_id, length);
@@ -3829,7 +3881,7 @@
void ClassLinker::DumpAllClasses(int flags) const {
// TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker
// lock held, because it might need to resolve a field's type, which would try to take the lock.
- std::vector<Class*> all_classes;
+ std::vector<mirror::Class*> all_classes;
{
MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
@@ -3865,7 +3917,7 @@
return dex_lock_.GetExclusiveOwnerTid();
}
-void ClassLinker::SetClassRoot(ClassRoot class_root, Class* klass) {
+void ClassLinker::SetClassRoot(ClassRoot class_root, mirror::Class* klass) {
DCHECK(!init_done_);
DCHECK(klass != NULL);
diff --git a/src/class_linker.h b/src/class_linker.h
index 09a43c5..3039d55 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -23,23 +23,26 @@
#include "base/macros.h"
#include "base/mutex.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "gtest/gtest.h"
-#include "heap.h"
+#include "root_visitor.h"
#include "oat_file.h"
-#include "object.h"
-#include "safe_map.h"
namespace art {
-
+namespace mirror {
class ClassLoader;
+class DexCache;
+class DexCacheTest_Open_Test;
+class IfTable;
+template<class T> class ObjectArray;
+class StackTraceElement;
+}
class ImageSpace;
class InternTable;
class ObjectLock;
template<class T> class SirtRef;
-typedef bool (ClassVisitor)(Class* c, void* arg);
+typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
class ClassLinker {
public:
@@ -56,33 +59,33 @@
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
- Class* FindClass(const char* descriptor, ClassLoader* class_loader)
+ mirror::Class* FindClass(const char* descriptor, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* FindSystemClass(const char* descriptor)
+ mirror::Class* FindSystemClass(const char* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Define a new a class based on a ClassDef from a DexFile
- Class* DefineClass(const StringPiece& descriptor, ClassLoader* class_loader,
- const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
+ mirror::Class* DefineClass(const StringPiece& descriptor, mirror::ClassLoader* class_loader,
+ const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
// by the given 'class_loader'.
- Class* LookupClass(const char* descriptor, const ClassLoader* class_loader)
+ mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds all the classes with the given descriptor, regardless of ClassLoader.
- void LookupClasses(const char* descriptor, std::vector<Class*>& classes)
+ void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// General class unloading is not supported, this is used to prune
// unwanted classes during image writing.
- bool RemoveClass(const char* descriptor, const ClassLoader* class_loader)
+ bool RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -98,27 +101,20 @@
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
- String* ResolveString(uint32_t string_idx, const AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
- if (UNLIKELY(resolved_string == NULL)) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_string = ResolveString(dex_file, string_idx, dex_cache);
- }
- return resolved_string;
- }
+ mirror::String* ResolveString(uint32_t string_idx, const mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
- String* ResolveString(const DexFile& dex_file, uint32_t string_idx, DexCache* dex_cache)
+ mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
+ mirror::DexCache* dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identity the
// target DexCache and ClassLoader to use for resolution.
- Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const Class* referrer)
+ mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
+ const mirror::Class* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return ResolveType(dex_file,
type_idx,
@@ -129,40 +125,20 @@
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
- Class* ResolveType(uint16_t type_idx, const AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
- ClassLoader* class_loader = declaring_class->GetClassLoader();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- }
- return resolved_type;
- }
+ mirror::Class* ResolveType(uint16_t type_idx, const mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* ResolveType(uint16_t type_idx, const Field* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
- Class* resolved_type = dex_cache->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
- ClassLoader* class_loader = declaring_class->GetClassLoader();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- }
- return resolved_type;
- }
+ mirror::Class* ResolveType(uint16_t type_idx, const mirror::Field* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
// type, since it may be referenced from but not contained within
// the given DexFile.
- Class* ResolveType(const DexFile& dex_file,
- uint16_t type_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader)
+ mirror::Class* ResolveType(const DexFile& dex_file,
+ uint16_t type_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method with a given ID from the DexFile, storing the
@@ -170,50 +146,31 @@
// in ResolveType. What is unique is the method type argument which
// is used to determine if this method is a direct, static, or
// virtual method.
- AbstractMethod* ResolveMethod(const DexFile& dex_file,
- uint32_t method_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader,
- const AbstractMethod* referrer,
- InvokeType type)
+ mirror::AbstractMethod* ResolveMethod(const DexFile& dex_file,
+ uint32_t method_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ const mirror::AbstractMethod* referrer,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- AbstractMethod* ResolveMethod(uint32_t method_idx, const AbstractMethod* referrer, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* resolved_method = referrer->GetDexCacheResolvedMethods()->Get(method_idx);
- if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
- ClassLoader* class_loader = declaring_class->GetClassLoader();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
- }
- return resolved_method;
- }
+ mirror::AbstractMethod* ResolveMethod(uint32_t method_idx, const mirror::AbstractMethod* referrer,
+ InvokeType type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Field* ResolveField(uint32_t field_idx, const AbstractMethod* referrer, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
- if (UNLIKELY(resolved_field == NULL)) {
- Class* declaring_class = referrer->GetDeclaringClass();
- DexCache* dex_cache = declaring_class->GetDexCache();
- ClassLoader* class_loader = declaring_class->GetClassLoader();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
- }
- return resolved_field;
- }
+ mirror::Field* ResolveField(uint32_t field_idx, const mirror::AbstractMethod* referrer,
+ bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
// in ResolveType. What is unique is the is_static argument which is
// used to determine if we are resolving a static or non-static
// field.
- Field* ResolveField(const DexFile& dex_file,
+ mirror::Field* ResolveField(const DexFile& dex_file,
uint32_t field_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -221,20 +178,20 @@
// result in DexCache. The ClassLinker and ClassLoader are used as
// in ResolveType. No is_static argument is provided so that Java
// field resolution semantics are followed.
- Field* ResolveFieldJLS(const DexFile& dex_file,
- uint32_t field_idx,
- DexCache* dex_cache,
- ClassLoader* class_loader)
+ mirror::Field* ResolveFieldJLS(const DexFile& dex_file,
+ uint32_t field_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get shorty from method index without resolution. Used to do handlerization.
- const char* MethodShorty(uint32_t method_idx, AbstractMethod* referrer, uint32_t* length)
+ const char* MethodShorty(uint32_t method_idx, mirror::AbstractMethod* referrer, uint32_t* length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
// given the restriction that no <clinit> execution is possible.
- bool EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields)
+ bool EnsureInitialized(mirror::Class* c, bool can_run_clinit, bool can_init_fields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Initializes classes that have instances in the image but that have
@@ -244,7 +201,7 @@
void RegisterDexFile(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFile(const DexFile& dex_file, SirtRef<DexCache>& dex_cache)
+ void RegisterDexFile(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -262,15 +219,15 @@
void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
- void VisitRoots(Heap::RootVisitor* visitor, void* arg)
+ void VisitRoots(RootVisitor* visitor, void* arg)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
- DexCache* FindDexCache(const DexFile& dex_file) const
+ mirror::DexCache* FindDexCache(const DexFile& dex_file) const
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegistered(const DexFile& dex_file) const
LOCKS_EXCLUDED(dex_lock_);
- void FixupDexCaches(AbstractMethod* resolution_method) const
+ void FixupDexCaches(mirror::AbstractMethod* resolution_method) const
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -310,66 +267,54 @@
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
- ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<T>::Alloc(self, GetClassRoot(kObjectArrayClass), length);
- }
-
- ObjectArray<Class>* AllocClassArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<Class>::Alloc(self, GetClassRoot(kClassArrayClass), length);
- }
-
- ObjectArray<String>* AllocStringArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<String>::Alloc(self, GetClassRoot(kJavaLangStringArrayClass), length);
- }
-
- ObjectArray<AbstractMethod>* AllocAbstractMethodArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<AbstractMethod>::Alloc(self,
- GetClassRoot(kJavaLangReflectAbstractMethodArrayClass), length);
- }
-
- ObjectArray<AbstractMethod>* AllocMethodArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<AbstractMethod>::Alloc(self,
- GetClassRoot(kJavaLangReflectMethodArrayClass), length);
- }
-
- IfTable* AllocIfTable(Thread* self, size_t ifcount) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return down_cast<IfTable*>(
- IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * IfTable::kMax));
- }
-
- ObjectArray<Field>* AllocFieldArray(Thread* self, size_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ObjectArray<Field>::Alloc(self, GetClassRoot(kJavaLangReflectFieldArrayClass), length);
- }
-
- ObjectArray<StackTraceElement>* AllocStackTraceElementArray(Thread* self, size_t length)
+ mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyClassUsingOatFile(const DexFile& dex_file, Class* klass,
- Class::Status& oat_file_class_status)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, AbstractMethod* klass)
+ mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* CreateProxyClass(String* name, ObjectArray<Class>* interfaces, ClassLoader* loader,
- ObjectArray<AbstractMethod>* methods, ObjectArray<ObjectArray<Class> >* throws)
+ mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string GetDescriptorForProxy(const Class* proxy_class)
+
+ mirror::ObjectArray<mirror::AbstractMethod>* AllocAbstractMethodArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- AbstractMethod* FindMethodForProxy(const Class* proxy_class, const AbstractMethod* proxy_method)
+
+ mirror::ObjectArray<mirror::AbstractMethod>* AllocMethodArray(Thread* self, size_t length)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ObjectArray<mirror::Field>* AllocFieldArray(Thread* self, size_t length)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
+ size_t length)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void VerifyClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
+ mirror::Class::Status& oat_file_class_status)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::AbstractMethod* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Class* CreateProxyClass(mirror::String* name, mirror::ObjectArray<mirror::Class>* interfaces,
+ mirror::ClassLoader* loader,
+ mirror::ObjectArray<mirror::AbstractMethod>* methods,
+ mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string GetDescriptorForProxy(const mirror::Class* proxy_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::AbstractMethod* FindMethodForProxy(const mirror::Class* proxy_class,
+ const mirror::AbstractMethod* proxy_method)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized
- const void* GetOatCodeFor(const AbstractMethod* method)
+ const void* GetOatCodeFor(const mirror::AbstractMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the oat code for a method from a method index.
@@ -389,7 +334,7 @@
private:
explicit ClassLinker(InternTable*);
- const OatFile::OatMethod GetOatMethodFor(const AbstractMethod* method)
+ const OatFile::OatMethod GetOatMethodFor(const mirror::AbstractMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Initialize class linker by bootstraping from dex files
@@ -401,43 +346,41 @@
OatFile* OpenOat(const ImageSpace* space)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void InitFromImageCallback(Object* obj, void* arg)
+ static void InitFromImageCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For early bootstrapping by Init
- Class* AllocClass(Thread* self, Class* java_lang_Class, size_t class_size)
+ mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, size_t class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Alloc* convenience functions to avoid needing to pass in Class*
+ // Alloc* convenience functions to avoid needing to pass in mirror::Class*
// values that are known to the ClassLinker such as
// kObjectArrayClass and kJavaLangString etc.
- Class* AllocClass(Thread* self, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
+ mirror::Class* AllocClass(Thread* self, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Field* AllocField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Method* AllocMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Constructor* AllocConstructor(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Field* AllocField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Method* AllocMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Constructor* AllocConstructor(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return InitializePrimitiveClass(AllocClass(self, sizeof(Class)), type);
- }
- Class* InitializePrimitiveClass(Class* primitive_class, Primitive::Type type)
+ mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* CreateArrayClass(const std::string& descriptor, ClassLoader* class_loader)
+ mirror::Class* CreateArrayClass(const std::string& descriptor, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AppendToBootClassPath(const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AppendToBootClassPath(const DexFile& dex_file, SirtRef<DexCache>& dex_cache)
+ void AppendToBootClassPath(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- Class* c, SafeMap<uint32_t, Field*>& field_map)
+ mirror::Class* c, SafeMap<uint32_t, mirror::Field*>& field_map)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t SizeOfClass(const DexFile& dex_file,
@@ -445,18 +388,20 @@
void LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- SirtRef<Class>& klass,
- ClassLoader* class_loader)
+ SirtRef<mirror::Class>& klass,
+ mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef<Class>& klass,
- SirtRef<Field>& dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
+ SirtRef<mirror::Class>& klass, SirtRef<mirror::Field>& dst)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- AbstractMethod* LoadMethod(Thread* self, const DexFile& dex_file,
- const ClassDataItemIterator& dex_method,
- SirtRef<Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::AbstractMethod* LoadMethod(Thread* self, const DexFile& dex_file,
+ const ClassDataItemIterator& dex_method,
+ SirtRef<mirror::Class>& klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupStaticTrampolines(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds the associated oat class for a dex_file and descriptor
const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor);
@@ -464,74 +409,75 @@
// Attempts to insert a class into a class table. Returns NULL if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
- Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class)
+ mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCache>& dex_cache)
+ void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
- bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics)
+ bool InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_statics)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock);
- bool ValidateSuperClassDescriptors(const Class* klass)
+ bool WaitForInitializeClass(mirror::Class* klass, Thread* self, ObjectLock& lock);
+ bool ValidateSuperClassDescriptors(const mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool InitializeSuperClass(Class* klass, bool can_run_clinit, bool can_init_fields)
+ bool InitializeSuperClass(mirror::Class* klass, bool can_run_clinit, bool can_init_fields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Initialize static fields, returns true if fields were initialized.
- bool InitializeStaticFields(Class* klass)
+ bool InitializeStaticFields(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsSameDescriptorInDifferentClassContexts(const char* descriptor,
- const Class* klass1,
- const Class* klass2)
+ const mirror::Class* klass1,
+ const mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsSameMethodSignatureInDifferentClassContexts(const AbstractMethod* descriptor,
- const Class* klass1,
- const Class* klass2)
+ bool IsSameMethodSignatureInDifferentClassContexts(const mirror::AbstractMethod* method,
+ const mirror::Class* klass1,
+ const mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkClass(SirtRef<Class>& klass, ObjectArray<Class>* interfaces)
+ bool LinkClass(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkSuperClass(SirtRef<Class>& klass)
+ bool LinkSuperClass(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LoadSuperAndInterfaces(SirtRef<Class>& klass, const DexFile& dex_file)
+ bool LoadSuperAndInterfaces(SirtRef<mirror::Class>& klass, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces)
+ bool LinkMethods(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkVirtualMethods(SirtRef<Class>& klass)
+ bool LinkVirtualMethods(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInterfaceMethods(SirtRef<Class>& klass, ObjectArray<Class>* interfaces)
+ bool LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
+ mirror::ObjectArray<mirror::Class>* interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkStaticFields(SirtRef<Class>& klass)
+ bool LinkStaticFields(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInstanceFields(SirtRef<Class>& klass)
+ bool LinkInstanceFields(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkFields(SirtRef<Class>& klass, bool is_static)
+ bool LinkFields(SirtRef<mirror::Class>& klass, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceInstanceOffsets(SirtRef<Class>& klass)
+ void CreateReferenceInstanceOffsets(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceStaticOffsets(SirtRef<Class>& klass)
+ void CreateReferenceStaticOffsets(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceOffsets(SirtRef<Class>& klass, bool is_static,
+ void CreateReferenceOffsets(SirtRef<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
- const std::vector<DexCache*>& GetDexCaches() {
+ const std::vector<mirror::DexCache*>& GetDexCaches() {
return dex_caches_;
}
@@ -547,28 +493,29 @@
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- AbstractMethod* CreateProxyConstructor(Thread* self, SirtRef<Class>& klass, Class* proxy_class)
+ mirror::AbstractMethod* CreateProxyConstructor(Thread* self, SirtRef<mirror::Class>& klass,
+ mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- AbstractMethod* CreateProxyMethod(Thread* self, SirtRef<Class>& klass,
- SirtRef<AbstractMethod>& prototype)
+ mirror::AbstractMethod* CreateProxyMethod(Thread* self, SirtRef<mirror::Class>& klass,
+ SirtRef<mirror::AbstractMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
mutable Mutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::vector<DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
+ std::vector<mirror::DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// multimap from a string hash code of a class descriptor to
- // Class* instances. Results should be compared for a matching
+ // mirror::Class* instances. Results should be compared for a matching
// Class::descriptor_ and Class::class_loader_.
- typedef std::multimap<size_t, Class*> Table;
- Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ typedef std::multimap<size_t, mirror::Class*> Table;
+ Table image_classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
Table classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
- Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader,
- size_t hash, const Table& classes)
+ mirror::Class* LookupClassLocked(const char* descriptor, const mirror::ClassLoader* class_loader,
+ size_t hash, const Table& classes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::classlinker_classes_lock_);
@@ -615,20 +562,14 @@
kJavaLangStackTraceElementArrayClass,
kClassRootsMax,
};
- ObjectArray<Class>* class_roots_;
+ mirror::ObjectArray<mirror::Class>* class_roots_;
- Class* GetClassRoot(ClassRoot class_root)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(class_roots_ != NULL);
- Class* klass = class_roots_->Get(class_root);
- DCHECK(klass != NULL);
- return klass;
- }
+ mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetClassRoot(ClassRoot class_root, Class* klass)
+ void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<Class>* GetClassRoots() {
+ mirror::ObjectArray<mirror::Class>* GetClassRoots() {
DCHECK(class_roots_ != NULL);
return class_roots_;
}
@@ -641,7 +582,7 @@
return descriptor;
}
- IfTable* array_iftable_;
+ mirror::IfTable* array_iftable_;
bool init_done_;
bool is_dirty_;
@@ -652,7 +593,7 @@
friend class ImageWriter; // for GetClassRoots
friend class ObjectTest;
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
- FRIEND_TEST(DexCacheTest, Open);
+ FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
FRIEND_TEST(ObjectTest, AllocObjectArray);
DISALLOW_COPY_AND_ASSIGN(ClassLinker);
diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc
index d32e91e..893e7a4 100644
--- a/src/class_linker_test.cc
+++ b/src/class_linker_test.cc
@@ -19,13 +19,24 @@
#include <string>
#include "UniquePtr.h"
+#include "class_linker-inl.h"
#include "common_test.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "heap.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/proxy.h"
+#include "mirror/stack_trace_element.h"
#include "runtime_support.h"
#include "sirt_ref.h"
+using namespace art::mirror;
+
namespace art {
class ClassLinkerTest : public CommonTest {
@@ -600,10 +611,10 @@
};
};
-struct MethodClassOffsets : public CheckOffsets<MethodClass> {
- MethodClassOffsets() : CheckOffsets<MethodClass>(true, "Ljava/lang/reflect/Method;") {
+struct MethodClassOffsets : public CheckOffsets<AbstractMethodClass> {
+ MethodClassOffsets() : CheckOffsets<AbstractMethodClass>(true, "Ljava/lang/reflect/Method;") {
// alphabetical references
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(MethodClass, ORDER_BY_SIGNATURE_), "ORDER_BY_SIGNATURE"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(AbstractMethodClass, ORDER_BY_SIGNATURE_), "ORDER_BY_SIGNATURE"));
};
};
diff --git a/src/common_test.h b/src/common_test.h
index 5a5479a..46a8309 100644
--- a/src/common_test.h
+++ b/src/common_test.h
@@ -26,12 +26,12 @@
#include "base/stringprintf.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
-#include "class_loader.h"
#include "compiler.h"
#include "dex_file.h"
#include "gtest/gtest.h"
#include "heap.h"
#include "instruction_set.h"
+#include "mirror/class_loader.h"
#include "oat_file.h"
#include "object_utils.h"
#include "os.h"
@@ -171,7 +171,7 @@
class CommonTest : public testing::Test {
public:
- static void MakeExecutable(const ByteArray* code_array) {
+ static void MakeExecutable(const mirror::ByteArray* code_array) {
CHECK(code_array != NULL);
MakeExecutable(code_array->GetData(), code_array->GetLength());
}
@@ -189,7 +189,7 @@
const uint32_t* mapping_table,
const uint16_t* vmap_table,
const uint8_t* gc_map,
- const AbstractMethod::InvokeStub* invoke_stub) {
+ const mirror::AbstractMethod::InvokeStub* invoke_stub) {
return OatFile::OatMethod(NULL,
reinterpret_cast<uint32_t>(code),
frame_size_in_bytes,
@@ -205,7 +205,7 @@
);
}
- void MakeExecutable(AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void MakeExecutable(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(method != NULL);
MethodHelper mh(method);
@@ -215,8 +215,8 @@
const std::vector<uint8_t>& invoke_stub = compiled_invoke_stub->GetCode();
MakeExecutable(invoke_stub);
- const AbstractMethod::InvokeStub* method_invoke_stub =
- reinterpret_cast<const AbstractMethod::InvokeStub*>(
+ const mirror::AbstractMethod::InvokeStub* method_invoke_stub =
+ reinterpret_cast<const mirror::AbstractMethod::InvokeStub*>(
CompiledCode::CodePointer(&invoke_stub[0],
compiled_invoke_stub->GetInstructionSet()));
@@ -224,7 +224,7 @@
<< " invoke_stub=" << reinterpret_cast<void*>(method_invoke_stub);
if (!method->IsAbstract()) {
- const DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
+ const mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
const CompiledMethod* compiled_method =
compiler_->GetCompiledMethod(Compiler::MethodReference(&dex_file,
@@ -473,14 +473,14 @@
ScopedLocalRef<jobject> class_loader_local(soa.Env(),
soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
- soa.Self()->SetClassLoaderOverride(soa.Decode<ClassLoader*>(class_loader_local.get()));
+ soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path);
return class_loader;
}
- void CompileClass(ClassLoader* class_loader, const char* class_name) {
+ void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) {
std::string class_descriptor(DotToDescriptor(class_name));
- Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
+ mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
CHECK(klass != NULL) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
CompileMethod(klass->GetDirectMethod(i));
@@ -490,7 +490,7 @@
}
}
- void CompileMethod(AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(method != NULL);
compiler_->CompileOne(method);
MakeExecutable(method);
@@ -498,29 +498,29 @@
MakeExecutable(runtime_->GetJniDlsymLookupStub());
}
- void CompileDirectMethod(ClassLoader* class_loader,
+ void CompileDirectMethod(mirror::ClassLoader* class_loader,
const char* class_name,
const char* method_name,
const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
- Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
+ mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
CHECK(klass != NULL) << "Class not found " << class_name;
- AbstractMethod* method = klass->FindDirectMethod(method_name, signature);
+ mirror::AbstractMethod* method = klass->FindDirectMethod(method_name, signature);
CHECK(method != NULL) << "Direct method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
}
- void CompileVirtualMethod(ClassLoader* class_loader,
+ void CompileVirtualMethod(mirror::ClassLoader* class_loader,
const char* class_name,
const char* method_name,
const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
- Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
+ mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
CHECK(klass != NULL) << "Class not found " << class_name;
- AbstractMethod* method = klass->FindVirtualMethod(method_name, signature);
+ mirror::AbstractMethod* method = klass->FindVirtualMethod(method_name, signature);
CHECK(method != NULL) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
diff --git a/src/common_throws.cc b/src/common_throws.cc
index cefc4ab..734d544 100644
--- a/src/common_throws.cc
+++ b/src/common_throws.cc
@@ -17,8 +17,12 @@
#include "common_throws.h"
#include "base/logging.h"
+#include "class_linker-inl.h"
#include "dex_instruction.h"
#include "invoke_type.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "thread.h"
@@ -26,7 +30,7 @@
namespace art {
-static void AddReferrerLocation(std::ostream& os, const AbstractMethod* referrer)
+static void AddReferrerLocation(std::ostream& os, const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (referrer != NULL) {
ClassHelper kh(referrer->GetDeclaringClass());
@@ -37,7 +41,7 @@
}
}
-static void AddReferrerLocationFromClass(std::ostream& os, Class* referrer)
+static void AddReferrerLocationFromClass(std::ostream& os, mirror::Class* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (referrer != NULL) {
ClassHelper kh(referrer);
@@ -51,16 +55,16 @@
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read) {
+void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str());
}
-void ThrowNullPointerExceptionForMethodAccess(AbstractMethod* caller, uint32_t method_idx,
+void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx,
InvokeType type) {
- DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache = caller->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
@@ -68,7 +72,7 @@
Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", msg.str().c_str());
}
-void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t dex_pc) {
+void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc) {
const DexFile::CodeItem* code = MethodHelper(throw_method).GetCodeItem();
CHECK_LT(dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
@@ -93,7 +97,7 @@
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- Field* field =
+ mirror::Field* field =
Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false);
ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
break;
@@ -105,7 +109,7 @@
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- Field* field =
+ mirror::Field* field =
Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false);
ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
break;
@@ -149,7 +153,7 @@
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed) {
+void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) {
std::ostringstream msg;
msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "'";
@@ -157,9 +161,9 @@
Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str());
}
-void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed,
- const AbstractMethod* caller,
- const AbstractMethod* called,
+void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+ const mirror::AbstractMethod* caller,
+ const mirror::AbstractMethod* called,
InvokeType type) {
std::ostringstream msg;
msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '"
@@ -169,7 +173,7 @@
Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str());
}
-void ThrowIllegalAccessErrorMethod(Class* referrer, AbstractMethod* accessed) {
+void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::AbstractMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -177,7 +181,7 @@
Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str());
}
-void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed) {
+void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::Field* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -185,7 +189,8 @@
Thread::Current()->ThrowNewException("Ljava/lang/IllegalAccessError;", msg.str().c_str());
}
-void ThrowIllegalAccessErrorFinalField(const AbstractMethod* referrer, Field* accessed) {
+void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer,
+ mirror::Field* accessed) {
std::ostringstream msg;
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
@@ -196,7 +201,8 @@
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- AbstractMethod* method, const AbstractMethod* referrer) {
+ mirror::AbstractMethod* method,
+ const mirror::AbstractMethod* referrer) {
std::ostringstream msg;
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
@@ -205,9 +211,9 @@
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const AbstractMethod* interface_method,
- Object* this_object,
- const AbstractMethod* referrer) {
+void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::AbstractMethod* interface_method,
+ mirror::Object* this_object,
+ const mirror::AbstractMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
CHECK(this_object != NULL);
@@ -221,8 +227,8 @@
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static,
- const AbstractMethod* referrer) {
+void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, bool is_static,
+ const mirror::AbstractMethod* referrer) {
std::ostringstream msg;
msg << "Expected '" << PrettyField(resolved_field) << "' to be a "
<< (is_static ? "static" : "instance") << " field" << " rather than a "
@@ -234,8 +240,8 @@
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name,
- const StringPiece& signature, const AbstractMethod* referrer) {
+void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+ const StringPiece& signature, const mirror::AbstractMethod* referrer) {
std::ostringstream msg;
ClassHelper kh(c);
msg << "No " << type << " method " << name << signature
@@ -244,8 +250,8 @@
Thread::Current()->ThrowNewException("Ljava/lang/NoSuchMethodError;", msg.str().c_str());
}
-void ThrowNoSuchMethodError(uint32_t method_idx, const AbstractMethod* referrer) {
- DexCache* dex_cache = referrer->GetDeclaringClass()->GetDexCache();
+void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer) {
+ mirror::DexCache* dex_cache = referrer->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
std::ostringstream msg;
msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'";
diff --git a/src/common_throws.h b/src/common_throws.h
index 33769c4..9e28bd7 100644
--- a/src/common_throws.h
+++ b/src/common_throws.h
@@ -18,62 +18,73 @@
#define ART_SRC_COMMON_THROWS_H_
#include "base/mutex.h"
-#include "object.h"
+#include "invoke_type.h"
namespace art {
+namespace mirror {
+class AbstractMethod;
+class Class;
+class Field;
+class Object;
+} // namespace mirror
+class StringPiece;
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(Field* field, bool is_read)
+void ThrowNullPointerExceptionForFieldAccess(mirror::Field* field, bool is_read)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowNullPointerExceptionForMethodAccess(AbstractMethod* caller, uint32_t method_idx, InvokeType type)
+void ThrowNullPointerExceptionForMethodAccess(mirror::AbstractMethod* caller, uint32_t method_idx,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowNullPointerExceptionFromDexPC(AbstractMethod* throw_method, uint32_t dex_pc)
+void ThrowNullPointerExceptionFromDexPC(mirror::AbstractMethod* throw_method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(Class* referrer, Class* accessed)
+void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIllegalAccessErrorClassForMethodDispatch(Class* referrer, Class* accessed,
- const AbstractMethod* caller, const AbstractMethod* called,
+void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+ const mirror::AbstractMethod* caller,
+ const mirror::AbstractMethod* called,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIllegalAccessErrorMethod(Class* referrer, AbstractMethod* accessed)
+void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::AbstractMethod* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIllegalAccessErrorField(Class* referrer, Field* accessed)
+void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::Field* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIllegalAccessErrorFinalField(const AbstractMethod* referrer, Field* accessed)
+void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer,
+ mirror::Field* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- AbstractMethod* method, const AbstractMethod* referrer)
+ mirror::AbstractMethod* method,
+ const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const AbstractMethod* interface_method,
- Object* this_object,
- const AbstractMethod* referrer)
+void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::AbstractMethod* interface_method,
+ mirror::Object* this_object,
+ const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowIncompatibleClassChangeErrorField(const Field* resolved_field, bool is_static,
- const AbstractMethod* referrer)
+void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, bool is_static,
+ const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, Class* c, const StringPiece& name,
- const StringPiece& signature, const AbstractMethod* referrer)
+void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+ const StringPiece& signature, const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void ThrowNoSuchMethodError(uint32_t method_idx, const AbstractMethod* referrer)
+void ThrowNoSuchMethodError(uint32_t method_idx, const mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
} // namespace art
diff --git a/src/compiled_class.h b/src/compiled_class.h
index 97dd8ee..f050ee6 100644
--- a/src/compiled_class.h
+++ b/src/compiled_class.h
@@ -17,19 +17,19 @@
#ifndef ART_SRC_COMPILED_CLASS_H_
#define ART_SRC_COMPILED_CLASS_H_
-#include "object.h"
+#include "mirror/class.h"
namespace art {
class CompiledClass {
public:
- explicit CompiledClass(Class::Status status) : status_(status) {}
+ explicit CompiledClass(mirror::Class::Status status) : status_(status) {}
~CompiledClass() {}
- Class::Status GetStatus() const {
+ mirror::Class::Status GetStatus() const {
return status_;
}
private:
- const Class::Status status_;
+ const mirror::Class::Status status_;
};
} // namespace art
diff --git a/src/compiler.cc b/src/compiler.cc
index 18460ce..903b70a 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -23,15 +23,22 @@
#include "base/stl_util.h"
#include "class_linker.h"
-#include "class_loader.h"
-#include "dex_cache.h"
#include "jni_internal.h"
#include "oat_compilation_unit.h"
#include "oat_file.h"
#include "oat/runtime/stub.h"
#include "object_utils.h"
#include "runtime.h"
+#include "gc/card_table-inl.h"
#include "gc/space.h"
+#include "mirror/class_loader.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/throwable.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "thread.h"
@@ -431,7 +438,7 @@
return res;
}
-ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set,
+mirror::ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set,
Runtime::TrampolineType type) {
switch (instruction_set) {
case kArm:
@@ -447,7 +454,7 @@
}
}
-ByteArray* Compiler::CreateJniDlsymLookupStub(InstructionSet instruction_set) {
+mirror::ByteArray* Compiler::CreateJniDlsymLookupStub(InstructionSet instruction_set) {
switch (instruction_set) {
case kArm:
case kThumb2:
@@ -462,7 +469,7 @@
}
}
-ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_set) {
+mirror::ByteArray* Compiler::CreateAbstractMethodErrorStub(InstructionSet instruction_set) {
switch (instruction_set) {
case kArm:
case kThumb2:
@@ -497,7 +504,7 @@
}
}
-void Compiler::CompileOne(const AbstractMethod* method) {
+void Compiler::CompileOne(const mirror::AbstractMethod* method) {
DCHECK(!Runtime::Current()->IsStarted());
Thread* self = Thread::Current();
jobject class_loader;
@@ -566,12 +573,12 @@
bool Compiler::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file,
uint32_t type_idx) {
ScopedObjectAccess soa(Thread::Current());
- DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
if (!IsImage()) {
stats_->TypeNotInDexCache();
return false;
}
- Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+ mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == NULL) {
stats_->TypeNotInDexCache();
return false;
@@ -593,7 +600,7 @@
if (IsImage()) {
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
- DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -608,15 +615,15 @@
bool Compiler::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
uint32_t type_idx) {
ScopedObjectAccess soa(Thread::Current());
- DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
// Get type from dex cache assuming it was populated by the verifier
- Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+ mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == NULL) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
- Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
+ mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == NULL) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -636,15 +643,15 @@
const DexFile& dex_file,
uint32_t type_idx) {
ScopedObjectAccess soa(Thread::Current());
- DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
// Get type from dex cache assuming it was populated by the verifier.
- Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+ mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == NULL) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
- Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
+ mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == NULL) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
@@ -660,33 +667,33 @@
return result;
}
-static Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
- OatCompilationUnit* mUnit)
+static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
+ OatCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_);
+ mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->class_loader_);
const DexFile::MethodId& referrer_method_id = mUnit->dex_file_->GetMethodId(mUnit->method_idx_);
return mUnit->class_linker_->ResolveType(*mUnit->dex_file_, referrer_method_id.class_idx_,
dex_cache, class_loader);
}
-static Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- OatCompilationUnit* mUnit,
- uint32_t field_idx)
+static mirror::Field* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa,
+ OatCompilationUnit* mUnit,
+ uint32_t field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_);
+ mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->class_loader_);
return mUnit->class_linker_->ResolveField(*mUnit->dex_file_, field_idx, dex_cache,
class_loader, false);
}
-static AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- OatCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType type)
+static mirror::AbstractMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
+ OatCompilationUnit* mUnit,
+ uint32_t method_idx,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(mUnit->class_loader_);
+ mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->class_loader_);
return mUnit->class_linker_->ResolveMethod(*mUnit->dex_file_, method_idx, dex_cache,
class_loader, NULL, type);
}
@@ -698,11 +705,11 @@
field_offset = -1;
is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
- Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
+ mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && !resolved_field->IsStatic()) {
- Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
+ mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
if (referrer_class != NULL) {
- Class* fields_class = resolved_field->GetDeclaringClass();
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
bool access_ok = referrer_class->CanAccess(fields_class) &&
referrer_class->CanAccessMember(fields_class,
resolved_field->GetAccessFlags());
@@ -711,7 +718,7 @@
// protected field being made public by a sub-class. Resort to the dex file to determine
// the correct class for the access check.
const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
- Class* dex_fields_class = mUnit->class_linker_->ResolveType(dex_file,
+ mirror::Class* dex_fields_class = mUnit->class_linker_->ResolveType(dex_file,
dex_file.GetFieldId(field_idx).class_idx_,
referrer_class);
access_ok = referrer_class->CanAccess(dex_fields_class) &&
@@ -746,11 +753,11 @@
is_referrers_class = false;
is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
- Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
+ mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
- Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
+ mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
if (referrer_class != NULL) {
- Class* fields_class = resolved_field->GetDeclaringClass();
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
is_referrers_class = true; // implies no worrying about class initialization
field_offset = resolved_field->GetOffset().Int32Value();
@@ -767,7 +774,7 @@
// the correct class for the access check. Don't change the field's class as that is
// used to identify the SSB.
const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
- Class* dex_fields_class =
+ mirror::Class* dex_fields_class =
mUnit->class_linker_->ResolveType(dex_file,
dex_file.GetFieldId(field_idx).class_idx_,
referrer_class);
@@ -781,7 +788,7 @@
// in its static storage base (which may fail if it doesn't have a slot for it)
// TODO: for images we can elide the static storage base null check
// if we know there's a non-null entry in the image
- DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
+ mirror::DexCache* dex_cache = mUnit->class_linker_->FindDexCache(*mUnit->dex_file_);
if (fields_class->GetDexCache() == dex_cache) {
// common case where the dex cache of both the referrer and the field are the same,
// no need to search the dex file
@@ -820,7 +827,8 @@
return false; // Incomplete knowledge needs slow path.
}
-void Compiler::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, AbstractMethod* method,
+void Compiler::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+ mirror::AbstractMethod* method,
uintptr_t& direct_code, uintptr_t& direct_method) {
// For direct and static methods compute possible direct_code and direct_method values, ie
// an address for the Method* being invoked and an address of the code for that Method*.
@@ -876,15 +884,15 @@
vtable_idx = -1;
direct_code = 0;
direct_method = 0;
- AbstractMethod* resolved_method =
+ mirror::AbstractMethod* resolved_method =
ComputeMethodReferencedFromCompilingMethod(soa, mUnit, method_idx, type);
if (resolved_method != NULL) {
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
- Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
+ mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
bool icce = resolved_method->CheckIncompatibleClassChange(type);
if (referrer_class != NULL && !icce) {
- Class* methods_class = resolved_method->GetDeclaringClass();
+ mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (!referrer_class->CanAccess(methods_class) ||
!referrer_class->CanAccessMember(methods_class,
resolved_method->GetAccessFlags())) {
@@ -1068,7 +1076,7 @@
// classes found in the boot classpath. Since at runtime we will
// select the class from the boot classpath, do not attempt to resolve
// or compile it now.
-static bool SkipClass(ClassLoader* class_loader,
+static bool SkipClass(mirror::ClassLoader* class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1077,7 +1085,7 @@
}
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Class* klass = class_linker->FindClass(descriptor, NULL);
+ mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
if (klass == NULL) {
Thread* self = Thread::Current();
CHECK(self->IsExceptionPending());
@@ -1090,7 +1098,7 @@
static void ResolveClassFieldsAndMethods(const CompilationContext* context, size_t class_def_index)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
ScopedObjectAccess soa(Thread::Current());
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(context->GetClassLoader());
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(context->GetClassLoader());
const DexFile& dex_file = *context->GetDexFile();
// Method and Field are the worst. We can't resolve without either
@@ -1115,11 +1123,11 @@
}
Thread* self = Thread::Current();
ClassLinker* class_linker = context->GetClassLinker();
- DexCache* dex_cache = class_linker->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
ClassDataItemIterator it(dex_file, class_data);
while (it.HasNextStaticField()) {
- Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
- class_loader, true);
+ mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
+ class_loader, true);
if (field == NULL) {
CHECK(self->IsExceptionPending());
self->ClearException();
@@ -1134,8 +1142,8 @@
requires_constructor_barrier = true;
}
- Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
- class_loader, false);
+ mirror::Field* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache,
+ class_loader, false);
if (field == NULL) {
CHECK(self->IsExceptionPending());
self->ClearException();
@@ -1147,9 +1155,9 @@
class_def_index);
}
while (it.HasNextDirectMethod()) {
- AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache,
- class_loader, NULL,
- it.GetMethodInvokeType(class_def));
+ mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, NULL,
+ it.GetMethodInvokeType(class_def));
if (method == NULL) {
CHECK(self->IsExceptionPending());
self->ClearException();
@@ -1157,9 +1165,9 @@
it.Next();
}
while (it.HasNextVirtualMethod()) {
- AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), dex_cache,
- class_loader, NULL,
- it.GetMethodInvokeType(class_def));
+ mirror::AbstractMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
+ dex_cache, class_loader, NULL,
+ it.GetMethodInvokeType(class_def));
if (method == NULL) {
CHECK(self->IsExceptionPending());
self->ClearException();
@@ -1175,9 +1183,9 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = context->GetClassLinker();
const DexFile& dex_file = *context->GetDexFile();
- DexCache* dex_cache = class_linker->FindDexCache(dex_file);
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(context->GetClassLoader());
- Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(context->GetClassLoader());
+ mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == NULL) {
CHECK(soa.Self()->IsExceptionPending());
@@ -1214,9 +1222,9 @@
ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index);
const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def);
- Class* klass =
+ mirror::Class* klass =
context->GetClassLinker()->FindClass(descriptor,
- soa.Decode<ClassLoader*>(context->GetClassLoader()));
+ soa.Decode<mirror::ClassLoader*>(context->GetClassLoader()));
if (klass == NULL) {
Thread* self = Thread::Current();
CHECK(self->IsExceptionPending());
@@ -1227,11 +1235,11 @@
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- DexCache* dex_cache = context->GetClassLinker()->FindDexCache(*context->GetDexFile());
+ mirror::DexCache* dex_cache = context->GetClassLinker()->FindDexCache(*context->GetDexFile());
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(context->GetDexFile(),
dex_cache,
- soa.Decode<ClassLoader*>(context->GetClassLoader()),
+ soa.Decode<mirror::ClassLoader*>(context->GetClassLoader()),
class_def_index, error_msg) ==
verifier::MethodVerifier::kHardFailure) {
const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index);
@@ -1446,9 +1454,9 @@
LOCKS_EXCLUDED(Locks::mutator_lock_) {
const DexFile::ClassDef& class_def = context->GetDexFile()->GetClassDef(class_def_index);
ScopedObjectAccess soa(Thread::Current());
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(context->GetClassLoader());
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(context->GetClassLoader());
const char* descriptor = context->GetDexFile()->GetClassDescriptor(class_def);
- Class* klass = context->GetClassLinker()->FindClass(descriptor, class_loader);
+ mirror::Class* klass = context->GetClassLinker()->FindClass(descriptor, class_loader);
Thread* self = Thread::Current();
bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1;
bool can_init_static_fields = compiling_boot &&
@@ -1480,10 +1488,10 @@
LOG(INFO) << "Initializing: " << descriptor;
if (StringPiece(descriptor) == "Ljava/lang/Void;"){
// Hand initialize j.l.Void to avoid Dex file operations in un-started runtime.
- ObjectArray<Field>* fields = klass->GetSFields();
+ mirror::ObjectArray<mirror::Field>* fields = klass->GetSFields();
CHECK_EQ(fields->GetLength(), 1);
fields->Get(0)->SetObj(klass, context->GetClassLinker()->FindPrimitiveClass('V'));
- klass->SetStatus(Class::kStatusInitialized);
+ klass->SetStatus(mirror::Class::kStatusInitialized);
} else {
context->GetClassLinker()->EnsureInitialized(klass, true, can_init_static_fields);
}
@@ -1497,7 +1505,7 @@
}
}
// Record the final class status if necessary.
- Class::Status status = klass->GetStatus();
+ mirror::Class::Status status = klass->GetStatus();
Compiler::ClassReference ref(context->GetDexFile(), class_def_index);
CompiledClass* compiled_class = context->GetCompiler()->GetCompiledClass(ref);
if (compiled_class == NULL) {
@@ -1550,7 +1558,7 @@
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
{
ScopedObjectAccess soa(Thread::Current());
- ClassLoader* class_loader = soa.Decode<ClassLoader*>(context->GetClassLoader());
+ mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(context->GetClassLoader());
if (SkipClass(class_loader, dex_file, class_def)) {
return;
}
diff --git a/src/compiler.h b/src/compiler.h
index 13130d7..0f99f4d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -24,12 +24,10 @@
#include "base/mutex.h"
#include "compiled_class.h"
#include "compiled_method.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "instruction_set.h"
#include "invoke_type.h"
#include "oat_file.h"
-#include "object.h"
#include "runtime.h"
#include "safe_map.h"
#include "thread_pool.h"
@@ -79,7 +77,7 @@
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Compile a single Method
- void CompileOne(const AbstractMethod* method)
+ void CompileOne(const mirror::AbstractMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDebuggingSupported() {
@@ -101,16 +99,16 @@
CompilerTls* GetTls();
// Stub to throw AbstractMethodError
- static ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set)
+ static mirror::ByteArray* CreateAbstractMethodErrorStub(InstructionSet instruction_set)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate the trampoline that's invoked by unresolved direct methods
- static ByteArray* CreateResolutionStub(InstructionSet instruction_set,
- Runtime::TrampolineType type)
+ static mirror::ByteArray* CreateResolutionStub(InstructionSet instruction_set,
+ Runtime::TrampolineType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set)
+ static mirror::ByteArray* CreateJniDlsymLookupStub(InstructionSet instruction_set)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile
@@ -273,7 +271,8 @@
private:
// Compute constant code and method pointers when possible
- void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type, AbstractMethod* method,
+ void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+ mirror::AbstractMethod* method,
uintptr_t& direct_code, uintptr_t& direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -397,11 +396,11 @@
CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_;
typedef const void* (*CompilerGetMethodCodeAddrFn)
- (const Compiler& compiler, const CompiledMethod* cm, const AbstractMethod* method);
+ (const Compiler& compiler, const CompiledMethod* cm, const mirror::AbstractMethod* method);
CompilerGetMethodCodeAddrFn compiler_get_method_code_addr_;
- typedef const AbstractMethod::InvokeStub* (*CompilerGetMethodInvokeStubAddrFn)
- (const Compiler& compiler, const CompiledInvokeStub* cm, const AbstractMethod* method);
+ typedef const mirror::AbstractMethod::InvokeStub* (*CompilerGetMethodInvokeStubAddrFn)
+ (const Compiler& compiler, const CompiledInvokeStub* cm, const mirror::AbstractMethod* method);
CompilerGetMethodInvokeStubAddrFn compiler_get_method_invoke_stub_addr_;
diff --git a/src/compiler/codegen/arm/call_arm.cc b/src/compiler/codegen/arm/call_arm.cc
index cb3af5e..47306f4 100644
--- a/src/compiler/codegen/arm/call_arm.cc
+++ b/src/compiler/codegen/arm/call_arm.cc
@@ -489,7 +489,7 @@
GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
NewLIR3(cu, kThumb2Ldrex, r1, r0,
- Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
+ mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
// Align owner
OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
@@ -498,7 +498,7 @@
OpRegImm(cu, kOpCmp, r1, 0);
OpIT(cu, kCondEq, "");
NewLIR4(cu, kThumb2Strex, r1, r2, r0,
- Object::MonitorOffset().Int32Value() >> 2);
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
OpRegImm(cu, kOpCmp, r1, 0);
OpIT(cu, kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
@@ -522,7 +522,7 @@
LoadValueDirectFixed(cu, rl_src, r0); // Get obj
LockCallTemps(cu); // Prepare for explicit register usage
GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
- LoadWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
+ LoadWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
OpRegRegImm(cu, kOpAnd, r3, r1,
@@ -532,7 +532,7 @@
NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
OpRegReg(cu, kOpSub, r1, r2);
OpIT(cu, kCondEq, "EE");
- StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3);
+ StoreWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
ClobberCalleeSave(cu);
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index e86f379..6a1178e 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -565,16 +565,16 @@
RegLocation rl_index, RegLocation rl_dest, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
RegLocation rl_result;
rl_array = LoadValue(cu, rl_array, kCoreReg);
rl_index = LoadValue(cu, rl_index, kCoreReg);
if (rl_dest.wide) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
/* null object? */
@@ -637,13 +637,13 @@
RegLocation rl_index, RegLocation rl_src, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
rl_array = LoadValue(cu, rl_array, kCoreReg);
@@ -706,8 +706,8 @@
void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale)
{
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
FlushAllRegs(cu); // Use explicit registers
LockCallTemps(cu);
@@ -727,7 +727,7 @@
LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
// Get the array's class.
- LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc
index e1054db..acaad5b 100644
--- a/src/compiler/codegen/gen_common.cc
+++ b/src/compiler/codegen/gen_common.cc
@@ -330,7 +330,7 @@
SRegOffset(cu, rl_first.s_reg_low));
// Set up the target pointer
OpRegRegImm(cu, kOpAdd, r_dst, TargetReg(kRet0),
- Array::DataOffset(component_size).Int32Value());
+ mirror::Array::DataOffset(component_size).Int32Value());
// Set up the loop counter (known to be > 0)
LoadConstant(cu, r_idx, elems - 1);
// Generate the copy loop. Going backwards for convenience
@@ -342,14 +342,15 @@
OpDecAndBranch(cu, kCondGe, r_idx, target);
if (cu->instruction_set == kX86) {
// Restore the target pointer
- OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst, -Array::DataOffset(component_size).Int32Value());
+ OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst,
+ -mirror::Array::DataOffset(component_size).Int32Value());
}
} else if (!info->is_range) {
// TUNING: interleave
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(cu, info->args[i], kCoreReg);
StoreBaseDisp(cu, TargetReg(kRet0),
- Array::DataOffset(component_size).Int32Value() +
+ mirror::Array::DataOffset(component_size).Int32Value() +
i * 4, rl_arg.low_reg, kWord);
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(cu, rl_arg.low_reg)) {
@@ -386,7 +387,7 @@
RegLocation rl_method = LoadCurrMethod(cu);
rBase = AllocTemp(cu);
LoadWordDisp(cu, rl_method.low_reg,
- AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
if (IsTemp(cu, rl_method.low_reg)) {
FreeTemp(cu, rl_method.low_reg);
}
@@ -404,10 +405,10 @@
rBase = TargetReg(kArg0);
LockTemp(cu, rBase);
LoadWordDisp(cu, r_method,
- AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
rBase);
LoadWordDisp(cu, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() +
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * ssb_index, rBase);
// rBase now points at appropriate static storage base (Class*)
// or NULL if not initialized. Check for NULL and call helper if NULL.
@@ -480,7 +481,7 @@
RegLocation rl_method = LoadCurrMethod(cu);
rBase = AllocTemp(cu);
LoadWordDisp(cu, rl_method.low_reg,
- AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
@@ -495,10 +496,10 @@
rBase = TargetReg(kArg0);
LockTemp(cu, rBase);
LoadWordDisp(cu, r_method,
- AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
rBase);
LoadWordDisp(cu, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() +
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * ssb_index, rBase);
// rBase now points at appropriate static storage base (Class*)
// or NULL if not initialized. Check for NULL and call helper if NULL.
@@ -619,7 +620,7 @@
OpRegCopy(cu, TargetReg(kArg0), v1);
if (target_x86) {
// x86 leaves the array pointer in v2, so load the array length that the handler expects
- OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
} else {
OpRegCopy(cu, TargetReg(kArg1), v2);
}
@@ -629,7 +630,7 @@
OpRegCopy(cu, TargetReg(kArg2), v1);
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
} else {
OpRegCopy(cu, TargetReg(kArg1), v2);
}
@@ -637,7 +638,7 @@
} else {
if (target_x86) {
// x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, Array::LengthOffset().Int32Value());
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
} else {
OpRegCopy(cu, TargetReg(kArg1), v2);
}
@@ -799,10 +800,10 @@
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
- AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
LoadWordDisp(cu, rl_method.low_reg, dex_cache_offset, res_reg);
int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
LoadWordDisp(cu, res_reg, offset_of_type, rl_result.low_reg);
if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(*cu->dex_file,
@@ -844,8 +845,8 @@
void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest)
{
/* NOTE: Most strings should be available at compile time */
- int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
- (sizeof(String*) * string_idx);
+ int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
+ (sizeof(mirror::String*) * string_idx);
if (!cu->compiler->CanAssumeStringIsPresentInDexCache(
*cu->dex_file, string_idx) || SLOW_STRING_PATH) {
// slow path, resolve string if not in dex cache
@@ -853,7 +854,7 @@
LockCallTemps(cu); // Using explicit registers
LoadCurrMethodDirect(cu, TargetReg(kArg2));
LoadWordDisp(cu, TargetReg(kArg2),
- AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
+ mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
int r_tgt = CallHelperSetup(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode));
LoadWordDisp(cu, TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
@@ -888,7 +889,7 @@
int res_reg = AllocTemp(cu);
RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
LoadWordDisp(cu, rl_method.low_reg,
- AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
LoadWordDisp(cu, res_reg, offset_of_string, rl_result.low_reg);
StoreValue(cu, rl_dest, rl_result);
}
@@ -942,9 +943,9 @@
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadWordDisp(cu, TargetReg(kArg1),
- AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
@@ -968,8 +969,8 @@
}
LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
- DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+ LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* call_inst;
LIR* branchover = NULL;
@@ -1026,10 +1027,10 @@
} else {
// Load dex cache entry into class_reg (kArg2)
LoadWordDisp(cu, TargetReg(kArg1),
- AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() +
- (sizeof(Class*) * type_idx);
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
+ (sizeof(mirror::Class*) * type_idx);
LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
*cu->dex_file, type_idx)) {
@@ -1051,8 +1052,8 @@
/* Null is OK - continue */
LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
- DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(cu, TargetReg(kArg0), Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+ LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg1 now contains object->klass_ */
LIR* branch2;
if (cu->instruction_set == kThumb2) {
diff --git a/src/compiler/codegen/gen_invoke.cc b/src/compiler/codegen/gen_invoke.cc
index fe60aff..78425c4 100644
--- a/src/compiler/codegen/gen_invoke.cc
+++ b/src/compiler/codegen/gen_invoke.cc
@@ -365,7 +365,7 @@
break;
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
- AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
+ mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<unsigned int>(-1)) {
@@ -384,13 +384,14 @@
break;
case 2: // Grab target method*
cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
- Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
cg-> TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86) {
if (direct_code == 0) {
- cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::GetCodeOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
}
break;
@@ -428,20 +429,22 @@
case 1: // Is "this" null? [use kArg1]
cg->GenNullCheck(cu, info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
- cg->LoadWordDisp(cu, cg->TargetReg(kArg1), Object::ClassOffset().Int32Value(),
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
- cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(),
+ cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
break;
case 3: // Get target method [use kInvokeTgt, set kArg0]
cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), (method_idx * 4) +
- Array::DataOffset(sizeof(Object*)).Int32Value(), cg->TargetReg(kArg0));
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
+ cg->TargetReg(kArg0));
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
if (cu->instruction_set != kX86) {
- cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::GetCodeOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
break;
}
@@ -503,12 +506,12 @@
break;
case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0]
cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
- AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
cg->TargetReg(kArg0));
break;
case 2: // Grab target method* [set/use kArg0]
cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
- Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
cg->TargetReg(kArg0));
break;
default:
@@ -837,13 +840,13 @@
return false;
}
// Location of reference to data array
- int value_offset = String::ValueOffset().Int32Value();
+ int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
- int count_offset = String::CountOffset().Int32Value();
+ int count_offset = mirror::String::CountOffset().Int32Value();
// Starting offset within data array
- int offset_offset = String::OffsetOffset().Int32Value();
+ int offset_offset = mirror::String::OffsetOffset().Int32Value();
// Start of char data with array_
- int data_offset = Array::DataOffset(sizeof(uint16_t)).Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
@@ -921,7 +924,7 @@
RegLocation rl_dest = InlineTarget(cu, info);
RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
- LoadWordDisp(cu, rl_obj.low_reg, String::CountOffset().Int32Value(),
+ LoadWordDisp(cu, rl_obj.low_reg, mirror::String::CountOffset().Int32Value(),
rl_result.low_reg);
if (is_empty) {
// dst = (dst == 0);
@@ -1284,7 +1287,7 @@
} else {
if (fast_path && info->type != kInterface) {
call_inst = OpMem(cu, kOpBlx, TargetReg(kArg0),
- AbstractMethod::GetCodeOffset().Int32Value());
+ mirror::AbstractMethod::GetCodeOffset().Int32Value());
} else {
int trampoline = 0;
switch (info->type) {
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index e2a5a02..86f0527 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -439,16 +439,16 @@
RegLocation rl_index, RegLocation rl_dest, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
RegLocation rl_result;
rl_array = LoadValue(cu, rl_array, kCoreReg);
rl_index = LoadValue(cu, rl_index, kCoreReg);
if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
/* null object? */
@@ -511,13 +511,13 @@
RegLocation rl_index, RegLocation rl_src, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
rl_array = LoadValue(cu, rl_array, kCoreReg);
@@ -583,8 +583,8 @@
void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale)
{
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
FlushAllRegs(cu); // Use explicit registers
LockCallTemps(cu);
@@ -604,7 +604,7 @@
LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
// Get the array's class.
- LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
diff --git a/src/compiler/codegen/mir_to_gbc.cc b/src/compiler/codegen/mir_to_gbc.cc
index e38977a..2657b79 100644
--- a/src/compiler/codegen/mir_to_gbc.cc
+++ b/src/compiler/codegen/mir_to_gbc.cc
@@ -2684,7 +2684,7 @@
cg->GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
RegLocation rl_dest = GetLoc(cu, call_inst);
RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
cg->LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
cg->StoreValue(cu, rl_dest, rl_result);
}
diff --git a/src/compiler/codegen/mir_to_lir.cc b/src/compiler/codegen/mir_to_lir.cc
index 2bc0f86..77d581f 100644
--- a/src/compiler/codegen/mir_to_lir.cc
+++ b/src/compiler/codegen/mir_to_lir.cc
@@ -210,7 +210,7 @@
case Instruction::ARRAY_LENGTH:
int len_offset;
- len_offset = Array::LengthOffset().Int32Value();
+ len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = cg->LoadValue(cu, rl_src[0], kCoreReg);
cg->GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc
index 727c5e8..f9b25c8 100644
--- a/src/compiler/codegen/x86/call_x86.cc
+++ b/src/compiler/codegen/x86/call_x86.cc
@@ -163,7 +163,7 @@
NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
NewLIR2(cu, kX86Xor32RR, rAX, rAX);
- NewLIR3(cu, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
+ NewLIR3(cu, kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondEq);
// If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
@@ -180,10 +180,10 @@
// TODO: clear hash state?
NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
- NewLIR3(cu, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
+ NewLIR3(cu, kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
OpRegReg(cu, kOpSub, rCX, rDX);
LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondNe);
- NewLIR3(cu, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
+ NewLIR3(cu, kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
branch->target = NewLIR0(cu, kPseudoTargetLabel);
// Otherwise, go the expensive route - UnlockObjectFromCode(obj);
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index 0f1fc53..f2d023c 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -446,16 +446,16 @@
RegLocation rl_index, RegLocation rl_dest, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
RegLocation rl_result;
rl_array = LoadValue(cu, rl_array, kCoreReg);
rl_index = LoadValue(cu, rl_index, kCoreReg);
if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
/* null object? */
@@ -494,13 +494,13 @@
RegLocation rl_index, RegLocation rl_src, int scale)
{
RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
rl_array = LoadValue(cu, rl_array, kCoreReg);
@@ -537,8 +537,8 @@
void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale)
{
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
FlushAllRegs(cu); // Use explicit registers
LockCallTemps(cu);
@@ -558,7 +558,7 @@
LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
// Get the array's class.
- LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
diff --git a/src/compiler/compiler_internals.h b/src/compiler/compiler_internals.h
index 1379a3b..746999d 100644
--- a/src/compiler/compiler_internals.h
+++ b/src/compiler/compiler_internals.h
@@ -28,11 +28,10 @@
#include "compiler.h"
#include "compiler_ir.h"
#include "compiler_utility.h"
-#include "dex_cache.h"
#include "frontend.h"
#include "gc/card_table.h"
+#include "mirror/dex_cache.h"
#include "monitor.h"
-#include "object.h"
#include "ralloc.h"
#include "thread.h"
#include "utils.h"
diff --git a/src/compiler/frontend.cc b/src/compiler/frontend.cc
index 31423ef..6ccbc07 100644
--- a/src/compiler/frontend.cc
+++ b/src/compiler/frontend.cc
@@ -19,7 +19,7 @@
#include "dataflow.h"
#include "ssa_transformation.h"
#include "leb128.h"
-#include "object.h"
+#include "mirror/object.h"
#include "runtime.h"
#include "codegen/codegen_util.h"
#include "codegen/mir_to_gbc.h"
diff --git a/src/compiler_llvm/compiler_llvm.h b/src/compiler_llvm/compiler_llvm.h
index 9aa9791..bb14c49 100644
--- a/src/compiler_llvm/compiler_llvm.h
+++ b/src/compiler_llvm/compiler_llvm.h
@@ -21,7 +21,7 @@
#include "compiler.h"
#include "dex_file.h"
#include "instruction_set.h"
-#include "object.h"
+#include "mirror/object.h"
#include "procedure_linkage_table.h"
#include <UniquePtr.h>
@@ -31,13 +31,15 @@
#include <vector>
namespace art {
- class ClassLoader;
class CompiledInvokeStub;
class CompiledMethod;
class Compiler;
class OatCompilationUnit;
- class AbstractMethod;
-}
+ namespace mirror {
+ class AbstractMethod;
+ class ClassLoader;
+ } // namespace mirror
+} // namespace art
namespace llvm {
@@ -47,7 +49,7 @@
class PointerType;
class StructType;
class Type;
-}
+} // namespace llvm
namespace art {
diff --git a/src/compiler_llvm/gbc_expander.cc b/src/compiler_llvm/gbc_expander.cc
index a7970d5..0348874 100644
--- a/src/compiler_llvm/gbc_expander.cc
+++ b/src/compiler_llvm/gbc_expander.cc
@@ -19,8 +19,8 @@
#include "compiler.h"
#include "greenland/intrinsic_helper.h"
+#include "mirror/object.h"
#include "oat_compilation_unit.h"
-#include "object.h"
#include "thread.h"
#include "verifier/method_verifier.h"
diff --git a/src/compiler_llvm/jni_compiler.cc b/src/compiler_llvm/jni_compiler.cc
index da55d94..e81afed 100644
--- a/src/compiler_llvm/jni_compiler.cc
+++ b/src/compiler_llvm/jni_compiler.cc
@@ -23,8 +23,8 @@
#include "compiler.h"
#include "compiler_llvm.h"
#include "ir_builder.h"
+#include "mirror/object.h"
#include "oat_compilation_unit.h"
-#include "object.h"
#include "runtime.h"
#include "runtime_support_func.h"
#include "utils_llvm.h"
diff --git a/src/compiler_llvm/jni_compiler.h b/src/compiler_llvm/jni_compiler.h
index c428775..0731e92 100644
--- a/src/compiler_llvm/jni_compiler.h
+++ b/src/compiler_llvm/jni_compiler.h
@@ -21,14 +21,16 @@
namespace art {
class ClassLinker;
- class ClassLoader;
class CompiledMethod;
class Compiler;
- class DexCache;
class DexFile;
- class AbstractMethod;
class OatCompilationUnit;
-}
+ namespace mirror {
+ class AbstractMethod;
+ class ClassLoader;
+ class DexCache;
+ } // namespace mirror
+} // namespace art
namespace llvm {
class AllocaInst;
@@ -39,7 +41,7 @@
class Module;
class Type;
class Value;
-}
+} // namespace llvm
namespace art {
namespace compiler_llvm {
diff --git a/src/compiler_llvm/method_compiler.cc b/src/compiler_llvm/method_compiler.cc
index ccec7e9..bc3fb92 100644
--- a/src/compiler_llvm/method_compiler.cc
+++ b/src/compiler_llvm/method_compiler.cc
@@ -25,8 +25,8 @@
#include "dalvik_reg.h"
#include "greenland/inferred_reg_category_map.h"
#include "ir_builder.h"
+#include "mirror/object.h"
#include "oat_compilation_unit.h"
-#include "object.h"
#include "object_utils.h"
#include "runtime_support_func.h"
#include "runtime_support_llvm.h"
diff --git a/src/compiler_llvm/method_compiler.h b/src/compiler_llvm/method_compiler.h
index f67866a..dd9d182 100644
--- a/src/compiler_llvm/method_compiler.h
+++ b/src/compiler_llvm/method_compiler.h
@@ -34,17 +34,20 @@
namespace art {
class ClassLinker;
- class ClassLoader;
class CompiledMethod;
class Compiler;
- class DexCache;
- class Field;
class OatCompilationUnit;
+ namespace mirror {
+ class ClassLoader;
+ class DexCache;
+ class Field;
+ } // namespace mirror
+
namespace greenland {
class InferredRegCategoryMap;
- }
-}
+ } // namespace greenland
+} // namespace art
namespace llvm {
@@ -55,7 +58,7 @@
class LLVMContext;
class Module;
class Type;
-}
+} // namespace llvm
namespace art {
diff --git a/src/compiler_llvm/runtime_support_builder.cc b/src/compiler_llvm/runtime_support_builder.cc
index 169f8e8..36b5fa1 100644
--- a/src/compiler_llvm/runtime_support_builder.cc
+++ b/src/compiler_llvm/runtime_support_builder.cc
@@ -19,7 +19,7 @@
#include "gc/card_table.h"
#include "ir_builder.h"
#include "monitor.h"
-#include "object.h"
+#include "mirror/object.h"
#include "thread.h"
#include <llvm/DerivedTypes.h>
@@ -179,7 +179,7 @@
kTBAARuntimeInfo);
Value* monitor =
irb_.LoadFromObjectOffset(object,
- Object::MonitorOffset().Int32Value(),
+ mirror::Object::MonitorOffset().Int32Value(),
irb_.getJIntTy(),
kTBAARuntimeInfo);
@@ -199,7 +199,7 @@
irb_.SetInsertPoint(bb_fast);
// Set all bits to zero (except hash state)
irb_.StoreToObjectOffset(object,
- Object::MonitorOffset().Int32Value(),
+ mirror::Object::MonitorOffset().Int32Value(),
hash_state,
kTBAARuntimeInfo);
irb_.CreateBr(bb_cont);
diff --git a/src/compiler_llvm/runtime_support_builder_thumb2.cc b/src/compiler_llvm/runtime_support_builder_thumb2.cc
index 3299afe..c18ae83 100644
--- a/src/compiler_llvm/runtime_support_builder_thumb2.cc
+++ b/src/compiler_llvm/runtime_support_builder_thumb2.cc
@@ -17,8 +17,8 @@
#include "runtime_support_builder_thumb2.h"
#include "ir_builder.h"
+#include "mirror/object.h"
#include "monitor.h"
-#include "object.h"
#include "thread.h"
#include "utils_llvm.h"
diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc
index d3552e9..8de90ff 100644
--- a/src/compiler_llvm/runtime_support_llvm.cc
+++ b/src/compiler_llvm/runtime_support_llvm.cc
@@ -22,8 +22,8 @@
#include "compiler_runtime_func_list.h"
#include "dex_file.h"
#include "dex_instruction.h"
+#include "mirror/object.h"
#include "nth_caller_visitor.h"
-#include "object.h"
#include "object_utils.h"
#include "reflection.h"
#include "runtime_support.h"
@@ -60,7 +60,7 @@
return NULL;
}
-void art_lock_object_from_code(Object* obj, Thread* thread)
+void art_lock_object_from_code(mirror::Object* obj, Thread* thread)
EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
DCHECK(obj != NULL); // Assumed to have been checked before entry
obj->MonitorEnter(thread); // May block
@@ -69,7 +69,7 @@
DCHECK(!thread->IsExceptionPending());
}
-void art_unlock_object_from_code(Object* obj, Thread* thread)
+void art_unlock_object_from_code(mirror::Object* obj, Thread* thread)
UNLOCK_FUNCTION(monitor_lock_) {
DCHECK(obj != NULL); // Assumed to have been checked before entry
// MonitorExit may throw exception
@@ -82,7 +82,7 @@
}
ShadowFrame* art_push_shadow_frame_from_code(Thread* thread, ShadowFrame* new_shadow_frame,
- AbstractMethod* method, uint32_t num_vregs) {
+ mirror::AbstractMethod* method, uint32_t num_vregs) {
ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
new_shadow_frame->SetMethod(method);
new_shadow_frame->SetNumberOfVRegs(num_vregs);
@@ -120,13 +120,13 @@
void art_throw_no_such_method_from_code(int32_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We need the calling method as context for the method_idx.
- AbstractMethod* method = Thread::Current()->GetCurrentMethod();
+ mirror::AbstractMethod* method = Thread::Current()->GetCurrentMethod();
ThrowNoSuchMethodError(method_idx, method);
}
void art_throw_null_pointer_exception_from_code(uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* throw_method =
+ mirror::AbstractMethod* throw_method =
Thread::Current()->GetManagedStack()->GetTopShadowFrame()->GetMethod();
ThrowNullPointerExceptionFromDexPC(throw_method, dex_pc);
}
@@ -135,21 +135,21 @@
ThrowStackOverflowError(Thread::Current());
}
-void art_throw_exception_from_code(Object* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread::Current()->DeliverException(static_cast<Throwable*>(exception));
+void art_throw_exception_from_code(mirror::Object* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread::Current()->DeliverException(static_cast<mirror::Throwable*>(exception));
}
void* art_get_and_clear_exception(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(self->IsExceptionPending());
- Throwable* exception = self->GetException();
+ mirror::Throwable* exception = self->GetException();
self->ClearException();
return exception;
}
-int32_t art_find_catch_block_from_code(AbstractMethod* current_method, uint32_t ti_offset)
+int32_t art_find_catch_block_from_code(mirror::AbstractMethod* current_method, uint32_t ti_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* exception_type = Thread::Current()->GetException()->GetClass();
+ mirror::Class* exception_type = Thread::Current()->GetException()->GetClass();
MethodHelper mh(current_method);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
DCHECK_LT(ti_offset, code_item->tries_size_);
@@ -164,7 +164,7 @@
return iter_index;
}
// Does this catch exception type apply?
- Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
+ mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
if (iter_exception_type == NULL) {
// The verifier should take care of resolving all exception classes early
LOG(WARNING) << "Unresolved exception class when finding catch block: "
@@ -183,57 +183,57 @@
// Object Space
//----------------------------------------------------------------------------
-Object* art_alloc_object_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_alloc_object_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectFromCode(type_idx, referrer, thread, false);
}
-Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_alloc_object_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectFromCode(type_idx, referrer, thread, true);
}
-Object* art_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
uint32_t length,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocArrayFromCode(type_idx, referrer, length, self, false);
}
-Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
uint32_t length,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocArrayFromCode(type_idx, referrer, length, self, true);
}
-Object* art_check_and_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_check_and_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
uint32_t length,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
}
-Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
uint32_t length,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
}
-static AbstractMethod* FindMethodHelper(uint32_t method_idx, Object* this_object,
- AbstractMethod* caller_method, bool access_check,
+static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method, bool access_check,
InvokeType type, Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
+ mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == NULL)) {
method = FindMethodFromCode(method_idx, this_object, caller_method,
thread, access_check, type);
@@ -254,71 +254,71 @@
return method;
}
-Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+mirror::Object* art_find_static_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
}
-Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+mirror::Object* art_find_direct_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
}
-Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+mirror::Object* art_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
}
-Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+mirror::Object* art_find_super_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
}
-Object*
+mirror::Object*
art_find_interface_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
}
-Object* art_find_interface_method_from_code(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
+mirror::Object* art_find_interface_method_from_code(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
}
-Object* art_initialize_static_storage_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_initialize_static_storage_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
}
-Object* art_initialize_type_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_initialize_type_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
}
-Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
+mirror::Object* art_initialize_type_and_verify_access_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
@@ -326,14 +326,14 @@
return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
}
-Object* art_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx)
+mirror::Object* art_resolve_string_from_code(mirror::AbstractMethod* referrer, uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return ResolveStringFromCode(referrer, string_idx);
}
-int32_t art_set32_static_from_code(uint32_t field_idx, AbstractMethod* referrer, int32_t new_value)
+int32_t art_set32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int32_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
field->Set32(field->GetDeclaringClass(), new_value);
return 0;
@@ -347,9 +347,9 @@
return -1;
}
-int32_t art_set64_static_from_code(uint32_t field_idx, AbstractMethod* referrer, int64_t new_value)
+int32_t art_set64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int64_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
field->Set64(field->GetDeclaringClass(), new_value);
return 0;
@@ -363,15 +363,15 @@
return -1;
}
-int32_t art_set_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* new_value)
+int32_t art_set_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
}
field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectWrite, sizeof(Object*));
+ StaticObjectWrite, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
@@ -379,9 +379,9 @@
return -1;
}
-int32_t art_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
+int32_t art_get32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
return field->Get32(field->GetDeclaringClass());
}
@@ -393,9 +393,9 @@
return 0;
}
-int64_t art_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
+int64_t art_get64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
return field->Get64(field->GetDeclaringClass());
}
@@ -407,24 +407,24 @@
return 0;
}
-Object* art_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
+mirror::Object* art_get_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectRead, sizeof(Object*));
+ StaticObjectRead, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
return 0;
}
-int32_t art_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, uint32_t new_value)
+int32_t art_set32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
+ mirror::Object* obj, uint32_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
field->Set32(obj, new_value);
return 0;
@@ -438,10 +438,10 @@
return -1;
}
-int32_t art_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, int64_t new_value)
+int32_t art_set64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
+ mirror::Object* obj, int64_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
field->Set64(obj, new_value);
return 0;
@@ -455,16 +455,16 @@
return -1;
}
-int32_t art_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, Object* new_value)
+int32_t art_set_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
+ mirror::Object* obj, mirror::Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
field->SetObj(obj, new_value);
return 0;
}
field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectWrite, sizeof(Object*));
+ InstanceObjectWrite, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
field->SetObj(obj, new_value);
return 0;
@@ -472,9 +472,9 @@
return -1;
}
-int32_t art_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
+int32_t art_get32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
return field->Get32(obj);
}
@@ -486,9 +486,9 @@
return 0;
}
-int64_t art_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
+int64_t art_get64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
return field->Get64(obj);
}
@@ -500,22 +500,22 @@
return 0;
}
-Object* art_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
+mirror::Object* art_get_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*));
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
return field->GetObj(obj);
}
field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectRead, sizeof(Object*));
+ InstanceObjectRead, sizeof(mirror::Object*));
if (LIKELY(field != NULL)) {
return field->GetObj(obj);
}
return 0;
}
-void art_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc,
- Array* array, uint32_t payload_offset)
+void art_fill_array_data_from_code(mirror::AbstractMethod* method, uint32_t dex_pc,
+ mirror::Array* array, uint32_t payload_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Test: Is array equal to null? (Guard NullPointerException)
if (UNLIKELY(array == NULL)) {
@@ -555,14 +555,14 @@
// Type checking, in the nature of casting
//----------------------------------------------------------------------------
-int32_t art_is_assignable_from_code(const Class* dest_type, const Class* src_type)
+int32_t art_is_assignable_from_code(const mirror::Class* dest_type, const mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(dest_type != NULL);
DCHECK(src_type != NULL);
return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
}
-void art_check_cast_from_code(const Class* dest_type, const Class* src_type)
+void art_check_cast_from_code(const mirror::Class* dest_type, const mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
DCHECK(src_type->IsClass()) << PrettyClass(src_type);
@@ -574,16 +574,16 @@
}
}
-void art_check_put_array_element_from_code(const Object* element, const Object* array)
+void art_check_put_array_element_from_code(const mirror::Object* element, const mirror::Object* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (element == NULL) {
return;
}
DCHECK(array != NULL);
- Class* array_class = array->GetClass();
+ mirror::Class* array_class = array->GetClass();
DCHECK(array_class != NULL);
- Class* component_type = array_class->GetComponentType();
- Class* element_class = element->GetClass();
+ mirror::Class* component_type = array_class->GetComponentType();
+ mirror::Class* element_class = element->GetClass();
if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"%s cannot be stored in an array of type %s",
@@ -634,11 +634,11 @@
PopLocalReferences(saved_local_ref_cookie, self);
}
-Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie,
+mirror::Object* art_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie,
Thread* self)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
self->TransitionFromSuspendedToRunnable();
- Object* o = self->DecodeJObject(result); // Must decode before pop.
+ mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
PopLocalReferences(saved_local_ref_cookie, self);
// Process result.
if (UNLIKELY(self->GetJniEnv()->check_jni)) {
@@ -650,13 +650,13 @@
return o;
}
-Object* art_jni_method_end_with_reference_synchronized(jobject result,
+mirror::Object* art_jni_method_end_with_reference_synchronized(jobject result,
uint32_t saved_local_ref_cookie,
jobject locked, Thread* self)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
self->TransitionFromSuspendedToRunnable();
UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- Object* o = self->DecodeJObject(result);
+ mirror::Object* o = self->DecodeJObject(result);
PopLocalReferences(saved_local_ref_cookie, self);
// Process result.
if (UNLIKELY(self->GetJniEnv()->check_jni)) {
@@ -711,12 +711,12 @@
// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation
// handler which is a field within the proxy object receiver. The var args encode the arguments
// with the last argument being a pointer to a JValue to store the result in.
-void art_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...)
+void art_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_method, ...)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
va_list ap;
va_start(ap, proxy_method);
- Object* receiver = va_arg(ap, Object*);
+ mirror::Object* receiver = va_arg(ap, mirror::Object*);
Thread* self = va_arg(ap, Thread*);
MethodHelper proxy_mh(proxy_method);
@@ -734,19 +734,19 @@
jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
// Convert proxy method into expected interface method.
- AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
DCHECK(interface_method != NULL);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
- // Record arguments and turn Object* arguments into jobject to survive GC.
+ // Record arguments and turn mirror::Object* arguments into jobject to survive GC.
std::vector<jvalue> args;
const size_t num_params = proxy_mh.NumArgs();
for (size_t i = 1; i < num_params; ++i) {
jvalue val;
switch (proxy_mh.GetParamPrimitiveType(i)) {
case Primitive::kPrimNot:
- val.l = soa.AddLocalReference<jobject>(va_arg(ap, Object*));
+ val.l = soa.AddLocalReference<jobject>(va_arg(ap, mirror::Object*));
break;
case Primitive::kPrimBoolean: // Fall-through.
case Primitive::kPrimByte: // Fall-through.
diff --git a/src/compiler_llvm/runtime_support_llvm.h b/src/compiler_llvm/runtime_support_llvm.h
index 6c133c9..6a0b339 100644
--- a/src/compiler_llvm/runtime_support_llvm.h
+++ b/src/compiler_llvm/runtime_support_llvm.h
@@ -17,7 +17,7 @@
#ifndef ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_H_
#define ART_SRC_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_H_
-#include "object.h"
+#include "mirror/object.h"
namespace art {
diff --git a/src/compiler_llvm/stub_compiler.cc b/src/compiler_llvm/stub_compiler.cc
index d03400f..3a28b87 100644
--- a/src/compiler_llvm/stub_compiler.cc
+++ b/src/compiler_llvm/stub_compiler.cc
@@ -22,7 +22,7 @@
#include "compiler.h"
#include "compiler_llvm.h"
#include "ir_builder.h"
-#include "object.h"
+#include "mirror/object.h"
#include "runtime_support_func.h"
#include "utils_llvm.h"
diff --git a/src/compiler_test.cc b/src/compiler_test.cc
index f513511..bd25eb3 100644
--- a/src/compiler_test.cc
+++ b/src/compiler_test.cc
@@ -22,10 +22,13 @@
#include "UniquePtr.h"
#include "class_linker.h"
#include "common_test.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "heap.h"
-#include "object.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
namespace art {
@@ -69,7 +72,7 @@
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
- Class* c = class_linker->FindClass(descriptor, soa.Decode<ClassLoader*>(class_loader));
+ mirror::Class* c = class_linker->FindClass(descriptor, soa.Decode<mirror::ClassLoader*>(class_loader));
CHECK(c != NULL);
for (size_t i = 0; i < c->NumDirectMethods(); i++) {
MakeExecutable(c->GetDirectMethod(i));
@@ -92,21 +95,21 @@
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
const DexFile* dex = java_lang_dex_file_;
- DexCache* dex_cache = class_linker_->FindDexCache(*dex);
+ mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex);
EXPECT_EQ(dex->NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- const String* string = dex_cache->GetResolvedString(i);
+ const mirror::String* string = dex_cache->GetResolvedString(i);
EXPECT_TRUE(string != NULL) << "string_idx=" << i;
}
EXPECT_EQ(dex->NumTypeIds(), dex_cache->NumResolvedTypes());
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- Class* type = dex_cache->GetResolvedType(i);
+ mirror::Class* type = dex_cache->GetResolvedType(i);
EXPECT_TRUE(type != NULL) << "type_idx=" << i
<< " " << dex->GetTypeDescriptor(dex->GetTypeId(i));
}
EXPECT_EQ(dex->NumMethodIds(), dex_cache->NumResolvedMethods());
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
- AbstractMethod* method = dex_cache->GetResolvedMethod(i);
+ mirror::AbstractMethod* method = dex_cache->GetResolvedMethod(i);
EXPECT_TRUE(method != NULL) << "method_idx=" << i
<< " " << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i))
<< " " << dex->GetMethodName(dex->GetMethodId(i));
@@ -117,7 +120,7 @@
}
EXPECT_EQ(dex->NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- Field* field = dex_cache->GetResolvedField(i);
+ mirror::Field* field = dex_cache->GetResolvedField(i);
EXPECT_TRUE(field != NULL) << "field_idx=" << i
<< " " << dex->GetFieldDeclaringClassDescriptor(dex->GetFieldId(i))
<< " " << dex->GetFieldName(dex->GetFieldId(i));
diff --git a/src/debugger.cc b/src/debugger.cc
index 3e93511..3121725 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -21,10 +21,19 @@
#include <set>
#include "class_linker.h"
-#include "class_loader.h"
+#include "class_linker-inl.h"
#include "dex_instruction.h"
+#include "gc/card_table-inl.h"
#include "gc/large_object_space.h"
#include "gc/space.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/throwable.h"
#include "oat/runtime/context.h"
#include "object_utils.h"
#include "safe_map.h"
@@ -34,6 +43,7 @@
#include "sirt_ref.h"
#include "stack_indirect_reference_table.h"
#include "thread_list.h"
+#include "utf.h"
#include "well_known_classes.h"
namespace art {
@@ -42,14 +52,14 @@
static const size_t kNumAllocRecords = 512; // Must be power of 2.
static const uintptr_t kInvalidId = 1;
-static const Object* kInvalidObject = reinterpret_cast<Object*>(kInvalidId);
+static const mirror::Object* kInvalidObject = reinterpret_cast<mirror::Object*>(kInvalidId);
class ObjectRegistry {
public:
ObjectRegistry() : lock_("ObjectRegistry lock") {
}
- JDWP::ObjectId Add(Object* o) {
+ JDWP::ObjectId Add(mirror::Object* o) {
if (o == NULL) {
return 0;
}
@@ -76,14 +86,14 @@
}
MutexLock mu(Thread::Current(), lock_);
- typedef SafeMap<JDWP::ObjectId, Object*>::iterator It; // C++0x auto
+ typedef SafeMap<JDWP::ObjectId, mirror::Object*>::iterator It; // C++0x auto
It it = map_.find(id);
return (it != map_.end()) ? reinterpret_cast<T>(it->second) : reinterpret_cast<T>(kInvalidId);
}
- void VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+ void VisitRoots(RootVisitor* visitor, void* arg) {
MutexLock mu(Thread::Current(), lock_);
- typedef SafeMap<JDWP::ObjectId, Object*>::iterator It; // C++0x auto
+ typedef SafeMap<JDWP::ObjectId, mirror::Object*>::iterator It; // C++0x auto
for (It it = map_.begin(); it != map_.end(); ++it) {
visitor(it->second, arg);
}
@@ -91,11 +101,11 @@
private:
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- SafeMap<JDWP::ObjectId, Object*> map_;
+ SafeMap<JDWP::ObjectId, mirror::Object*> map_;
};
struct AllocRecordStackTraceElement {
- AbstractMethod* method;
+ mirror::AbstractMethod* method;
uint32_t dex_pc;
int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -104,7 +114,7 @@
};
struct AllocRecord {
- Class* type;
+ mirror::Class* type;
size_t byte_count;
uint16_t thin_lock_id;
AllocRecordStackTraceElement stack[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
@@ -119,9 +129,9 @@
};
struct Breakpoint {
- AbstractMethod* method;
+ mirror::AbstractMethod* method;
uint32_t dex_pc;
- Breakpoint(AbstractMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {}
+ Breakpoint(mirror::AbstractMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {}
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -138,7 +148,7 @@
JDWP::JdwpStepSize step_size;
JDWP::JdwpStepDepth step_depth;
- const AbstractMethod* method;
+ const mirror::AbstractMethod* method;
int32_t line_number; // Or -1 for native methods.
std::set<uint32_t> dex_pcs;
int stack_depth;
@@ -180,7 +190,7 @@
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
static SingleStepControl gSingleStepControl GUARDED_BY(Locks::breakpoint_lock_);
-static bool IsBreakpoint(AbstractMethod* m, uint32_t dex_pc)
+static bool IsBreakpoint(mirror::AbstractMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
@@ -200,9 +210,9 @@
return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
}
-static Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
+static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = gRegistry->Get<Object*>(id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
if (o == NULL || o == kInvalidObject) {
status = JDWP::ERR_INVALID_OBJECT;
return NULL;
@@ -215,9 +225,9 @@
return o->AsArray();
}
-static Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
+static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = gRegistry->Get<Object*>(id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
if (o == NULL || o == kInvalidObject) {
status = JDWP::ERR_INVALID_OBJECT;
return NULL;
@@ -234,13 +244,13 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* thread_peer = gRegistry->Get<Object*>(thread_id);
+ mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
if (thread_peer == NULL || thread_peer == kInvalidObject) {
// This isn't even an object.
return JDWP::ERR_INVALID_OBJECT;
}
- Class* java_lang_Thread = soa.Decode<Class*>(WellKnownClasses::java_lang_Thread);
+ mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
// This isn't a thread.
return JDWP::ERR_INVALID_THREAD;
@@ -260,7 +270,7 @@
return static_cast<JDWP::JdwpTag>(descriptor[0]);
}
-static JDWP::JdwpTag TagFromClass(Class* c)
+static JDWP::JdwpTag TagFromClass(mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(c != NULL);
if (c->IsArrayClass()) {
@@ -291,7 +301,7 @@
*
* Null objects are tagged JT_OBJECT.
*/
-static JDWP::JdwpTag TagFromObject(const Object* o)
+static JDWP::JdwpTag TagFromObject(const mirror::Object* o)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass());
}
@@ -567,14 +577,14 @@
exit(status); // This is all dalvik did.
}
-void Dbg::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+void Dbg::VisitRoots(RootVisitor* visitor, void* arg) {
if (gRegistry != NULL) {
gRegistry->VisitRoots(visitor, arg);
}
}
std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
- Object* o = gRegistry->Get<Object*>(class_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
if (o == NULL) {
return "NULL";
}
@@ -589,7 +599,7 @@
JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
JDWP::JdwpError status;
- Class* c = DecodeClass(id, status);
+ mirror::Class* c = DecodeClass(id, status);
if (c == NULL) {
return status;
}
@@ -599,7 +609,7 @@
JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
JDWP::JdwpError status;
- Class* c = DecodeClass(id, status);
+ mirror::Class* c = DecodeClass(id, status);
if (c == NULL) {
return status;
}
@@ -613,7 +623,7 @@
}
JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
- Object* o = gRegistry->Get<Object*>(id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
if (o == NULL || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -623,7 +633,7 @@
JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Class* c = DecodeClass(id, status);
+ mirror::Class* c = DecodeClass(id, status);
if (c == NULL) {
return status;
}
@@ -641,7 +651,7 @@
JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == NULL || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -699,14 +709,14 @@
return true;
}
- static void AppendOwnedMonitors(Object* owned_monitor, void* arg) {
+ static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) {
OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
visitor->monitors.push_back(owned_monitor);
visitor->stack_depths.push_back(visitor->current_stack_depth);
}
size_t current_stack_depth;
- std::vector<Object*> monitors;
+ std::vector<mirror::Object*> monitors;
std::vector<uint32_t> stack_depths;
};
UniquePtr<Context> context(Context::Create());
@@ -743,11 +753,11 @@
std::vector<uint64_t>& counts)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::vector<Class*> classes;
+ std::vector<mirror::Class*> classes;
counts.clear();
for (size_t i = 0; i < class_ids.size(); ++i) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_ids[i], status);
+ mirror::Class* c = DecodeClass(class_ids[i], status);
if (c == NULL) {
return status;
}
@@ -762,12 +772,12 @@
JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
- std::vector<Object*> raw_instances;
+ std::vector<mirror::Object*> raw_instances;
Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
instances.push_back(gRegistry->Add(raw_instances[i]));
@@ -778,12 +788,12 @@
JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>& referring_objects)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == NULL || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
- std::vector<Object*> raw_instances;
+ std::vector<mirror::Object*> raw_instances;
Runtime::Current()->GetHeap()->GetReferringObjects(o, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
referring_objects.push_back(gRegistry->Add(raw_instances[i]));
@@ -793,7 +803,7 @@
JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -811,11 +821,11 @@
explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
}
- static bool Visit(Class* c, void* arg) {
+ static bool Visit(mirror::Class* c, void* arg) {
return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
}
- bool Visit(Class* c) {
+ bool Visit(mirror::Class* c) {
if (!c->IsPrimitive()) {
classes.push_back(static_cast<JDWP::RefTypeId>(gRegistry->Add(c)));
}
@@ -831,7 +841,7 @@
JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -855,7 +865,7 @@
}
void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
- std::vector<Class*> classes;
+ std::vector<mirror::Class*> classes;
Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
ids.clear();
for (size_t i = 0; i < classes.size(); ++i) {
@@ -864,7 +874,7 @@
}
JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == NULL || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -887,7 +897,7 @@
JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string& signature) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -897,7 +907,7 @@
JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -906,7 +916,7 @@
}
JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -946,7 +956,7 @@
JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
JDWP::JdwpError status;
- Array* a = DecodeArray(array_id, status);
+ mirror::Array* a = DecodeArray(array_id, status);
if (a == NULL) {
return status;
}
@@ -956,7 +966,7 @@
JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Array* a = DecodeArray(array_id, status);
+ mirror::Array* a = DecodeArray(array_id, status);
if (a == NULL) {
return status;
}
@@ -988,9 +998,9 @@
memcpy(dst, &src[offset * width], count * width);
}
} else {
- ObjectArray<Object>* oa = a->AsObjectArray<Object>();
+ mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
for (int i = 0; i < count; ++i) {
- Object* element = oa->Get(offset + i);
+ mirror::Object* element = oa->Get(offset + i);
JDWP::JdwpTag specific_tag = (element != NULL) ? TagFromObject(element) : tag;
expandBufAdd1(pReply, specific_tag);
expandBufAddObjectId(pReply, gRegistry->Add(element));
@@ -1004,7 +1014,7 @@
const uint8_t* src)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JDWP::JdwpError status;
- Array* a = DecodeArray(array_id, status);
+ mirror::Array* a = DecodeArray(array_id, status);
if (a == NULL) {
return status;
}
@@ -1040,10 +1050,10 @@
memcpy(&dst[offset * width], src, count * width);
}
} else {
- ObjectArray<Object>* oa = a->AsObjectArray<Object>();
+ mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
for (int i = 0; i < count; ++i) {
JDWP::ObjectId id = JDWP::ReadObjectId(&src);
- Object* o = gRegistry->Get<Object*>(id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
if (o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -1055,12 +1065,12 @@
}
JDWP::ObjectId Dbg::CreateString(const std::string& str) {
- return gRegistry->Add(String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
+ return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
}
JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -1074,24 +1084,24 @@
JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
JDWP::ObjectId& new_array) {
JDWP::JdwpError status;
- Class* c = DecodeClass(array_class_id, status);
+ mirror::Class* c = DecodeClass(array_class_id, status);
if (c == NULL) {
return status;
}
- new_array = gRegistry->Add(Array::Alloc(Thread::Current(), c, length));
+ new_array = gRegistry->Add(mirror::Array::Alloc(Thread::Current(), c, length));
return JDWP::ERR_NONE;
}
bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
JDWP::JdwpError status;
- Class* c1 = DecodeClass(instance_class_id, status);
+ mirror::Class* c1 = DecodeClass(instance_class_id, status);
CHECK(c1 != NULL);
- Class* c2 = DecodeClass(class_id, status);
+ mirror::Class* c2 = DecodeClass(class_id, status);
CHECK(c2 != NULL);
return c1->IsAssignableFrom(c2);
}
-static JDWP::FieldId ToFieldId(const Field* f)
+static JDWP::FieldId ToFieldId(const mirror::Field* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
@@ -1100,7 +1110,7 @@
#endif
}
-static JDWP::MethodId ToMethodId(const AbstractMethod* m)
+static JDWP::MethodId ToMethodId(const mirror::AbstractMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
@@ -1109,30 +1119,30 @@
#endif
}
-static Field* FromFieldId(JDWP::FieldId fid)
+static mirror::Field* FromFieldId(JDWP::FieldId fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
- return reinterpret_cast<Field*>(static_cast<uintptr_t>(fid));
+ return reinterpret_cast<mirror::Field*>(static_cast<uintptr_t>(fid));
#endif
}
-static AbstractMethod* FromMethodId(JDWP::MethodId mid)
+static mirror::AbstractMethod* FromMethodId(JDWP::MethodId mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
- return reinterpret_cast<AbstractMethod*>(static_cast<uintptr_t>(mid));
+ return reinterpret_cast<mirror::AbstractMethod*>(static_cast<uintptr_t>(mid));
#endif
}
-static void SetLocation(JDWP::JdwpLocation& location, AbstractMethod* m, uint32_t dex_pc)
+static void SetLocation(JDWP::JdwpLocation& location, mirror::AbstractMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (m == NULL) {
memset(&location, 0, sizeof(location));
} else {
- Class* c = m->GetDeclaringClass();
+ mirror::Class* c = m->GetDeclaringClass();
location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
location.class_id = gRegistry->Add(c);
location.method_id = ToMethodId(m);
@@ -1142,13 +1152,13 @@
std::string Dbg::GetMethodName(JDWP::MethodId method_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* m = FromMethodId(method_id);
+ mirror::AbstractMethod* m = FromMethodId(method_id);
return MethodHelper(m).GetName();
}
std::string Dbg::GetFieldName(JDWP::FieldId field_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* f = FromFieldId(field_id);
+ mirror::Field* f = FromFieldId(field_id);
return FieldHelper(f).GetName();
}
@@ -1190,7 +1200,7 @@
return newSlot;
}
-static uint16_t DemangleSlot(uint16_t slot, AbstractMethod* m)
+static uint16_t DemangleSlot(uint16_t slot, mirror::AbstractMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (slot == kEclipseWorkaroundSlot) {
return 0;
@@ -1204,7 +1214,7 @@
JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -1215,7 +1225,7 @@
expandBufAdd4BE(pReply, instance_field_count + static_field_count);
for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
- Field* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
+ mirror::Field* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
FieldHelper fh(f);
expandBufAddFieldId(pReply, ToFieldId(f));
expandBufAddUtf8String(pReply, fh.GetName());
@@ -1232,7 +1242,7 @@
JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -1243,7 +1253,7 @@
expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
- AbstractMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
+ mirror::AbstractMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
MethodHelper mh(m);
expandBufAddMethodId(pReply, ToMethodId(m));
expandBufAddUtf8String(pReply, mh.GetName());
@@ -1259,7 +1269,7 @@
JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError status;
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
@@ -1287,7 +1297,7 @@
return true;
}
};
- AbstractMethod* m = FromMethodId(method_id);
+ mirror::AbstractMethod* m = FromMethodId(method_id);
MethodHelper mh(m);
uint64_t start, end;
if (m->IsNative()) {
@@ -1341,14 +1351,14 @@
++pContext->variable_count;
}
};
- AbstractMethod* m = FromMethodId(method_id);
+ mirror::AbstractMethod* m = FromMethodId(method_id);
MethodHelper mh(m);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
std::string shorty(mh.GetShorty());
- expandBufAdd4BE(pReply, AbstractMethod::NumArgRegisters(shorty));
+ expandBufAdd4BE(pReply, mirror::AbstractMethod::NumArgRegisters(shorty));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
@@ -1368,7 +1378,7 @@
JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
std::vector<uint8_t>& bytecodes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* m = FromMethodId(method_id);
+ mirror::AbstractMethod* m = FromMethodId(method_id);
if (m == NULL) {
return JDWP::ERR_INVALID_METHODID;
}
@@ -1396,18 +1406,18 @@
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JDWP::JdwpError status;
- Class* c = DecodeClass(ref_type_id, status);
+ mirror::Class* c = DecodeClass(ref_type_id, status);
if (ref_type_id != 0 && c == NULL) {
return status;
}
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if ((!is_static && o == NULL) || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
- Field* f = FromFieldId(field_id);
+ mirror::Field* f = FromFieldId(field_id);
- Class* receiver_class = c;
+ mirror::Class* receiver_class = c;
if (receiver_class == NULL && o != NULL) {
receiver_class = o->GetClass();
}
@@ -1448,7 +1458,7 @@
LOG(FATAL) << "Unknown tag: " << tag;
}
} else {
- Object* value = f->GetObject(o);
+ mirror::Object* value = f->GetObject(o);
expandBufAdd1(pReply, TagFromObject(value));
expandBufAddObjectId(pReply, gRegistry->Add(value));
}
@@ -1467,11 +1477,11 @@
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Object* o = gRegistry->Get<Object*>(object_id);
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if ((!is_static && o == NULL) || o == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
- Field* f = FromFieldId(field_id);
+ mirror::Field* f = FromFieldId(field_id);
// The RI only enforces the static/non-static mismatch in one direction.
// TODO: should we change the tests and check both?
@@ -1499,12 +1509,12 @@
f->Set32(o, value);
}
} else {
- Object* v = gRegistry->Get<Object*>(value);
+ mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
if (v == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
if (v != NULL) {
- Class* field_type = FieldHelper(f).GetType();
+ mirror::Class* field_type = FieldHelper(f).GetType();
if (!field_type->IsAssignableFrom(v->GetClass())) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -1525,7 +1535,7 @@
}
std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
- String* s = gRegistry->Get<String*>(string_id);
+ mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
return s->ToModifiedUtf8();
}
@@ -1539,9 +1549,11 @@
}
// We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
- Object* thread_object = gRegistry->Get<Object*>(thread_id);
- Field* java_lang_Thread_name_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- String* s = reinterpret_cast<String*>(java_lang_Thread_name_field->GetObject(thread_object));
+ mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
+ mirror::Field* java_lang_Thread_name_field =
+ soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ mirror::String* s =
+ reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
if (s != NULL) {
name = s->ToModifiedUtf8();
}
@@ -1550,7 +1562,7 @@
JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
ScopedObjectAccess soa(Thread::Current());
- Object* thread_object = gRegistry->Get<Object*>(thread_id);
+ mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
if (thread_object == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
@@ -1568,11 +1580,11 @@
return error;
}
- Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;");
+ mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;");
CHECK(c != NULL);
- Field* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
+ mirror::Field* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
CHECK(f != NULL);
- Object* group = f->GetObject(thread_object);
+ mirror::Object* group = f->GetObject(thread_object);
CHECK(group != NULL);
JDWP::ObjectId thread_group_id = gRegistry->Add(group);
@@ -1582,40 +1594,40 @@
std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
ScopedObjectAccess soa(Thread::Current());
- Object* thread_group = gRegistry->Get<Object*>(thread_group_id);
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
CHECK(thread_group != NULL);
- Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
+ mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
CHECK(c != NULL);
- Field* f = c->FindInstanceField("name", "Ljava/lang/String;");
+ mirror::Field* f = c->FindInstanceField("name", "Ljava/lang/String;");
CHECK(f != NULL);
- String* s = reinterpret_cast<String*>(f->GetObject(thread_group));
+ mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
return s->ToModifiedUtf8();
}
JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
- Object* thread_group = gRegistry->Get<Object*>(thread_group_id);
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
CHECK(thread_group != NULL);
- Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
+ mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
CHECK(c != NULL);
- Field* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
+ mirror::Field* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
CHECK(f != NULL);
- Object* parent = f->GetObject(thread_group);
+ mirror::Object* parent = f->GetObject(thread_group);
return gRegistry->Add(parent);
}
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
ScopedObjectAccessUnchecked soa(Thread::Current());
- Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
- Object* group = f->GetObject(f->GetDeclaringClass());
+ mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
+ mirror::Object* group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
JDWP::ObjectId Dbg::GetMainThreadGroupId() {
ScopedObjectAccess soa(Thread::Current());
- Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
- Object* group = f->GetObject(f->GetDeclaringClass());
+ mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
+ mirror::Object* group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
@@ -1691,7 +1703,7 @@
void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
class ThreadListVisitor {
public:
- ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, Object* desired_thread_group,
+ ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group,
std::vector<JDWP::ObjectId>& thread_ids)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {}
@@ -1708,20 +1720,20 @@
// query all threads, so it's easier if we just don't tell them about this thread.
return;
}
- Object* peer = t->GetPeer();
+ mirror::Object* peer = t->GetPeer();
if (IsInDesiredThreadGroup(peer)) {
thread_ids_.push_back(gRegistry->Add(peer));
}
}
private:
- bool IsInDesiredThreadGroup(Object* peer)
+ bool IsInDesiredThreadGroup(mirror::Object* peer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// peer might be NULL if the thread is still starting up.
if (peer == NULL) {
// We can't tell the debugger about this thread yet.
// TODO: if we identified threads to the debugger by their Thread*
- // rather than their peer's Object*, we could fix this.
+ // rather than their peer's mirror::Object*, we could fix this.
// Doing so might help us report ZOMBIE threads too.
return false;
}
@@ -1729,17 +1741,17 @@
if (desired_thread_group_ == NULL) {
return true;
}
- Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer);
+ mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer);
return (group == desired_thread_group_);
}
const ScopedObjectAccessUnchecked& soa_;
- Object* const desired_thread_group_;
+ mirror::Object* const desired_thread_group_;
std::vector<JDWP::ObjectId>& thread_ids_;
};
ScopedObjectAccessUnchecked soa(Thread::Current());
- Object* thread_group = gRegistry->Get<Object*>(thread_group_id);
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
ThreadListVisitor tlv(soa, thread_group, thread_ids);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
@@ -1747,16 +1759,17 @@
void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
ScopedObjectAccess soa(Thread::Current());
- Object* thread_group = gRegistry->Get<Object*>(thread_group_id);
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
// Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- Field* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
- Object* groups_array_list = groups_field->GetObject(thread_group);
+ mirror::Field* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+ mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
// Get the array and size out of the ArrayList<ThreadGroup>...
- Field* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
- Field* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
- ObjectArray<Object>* groups_array = array_field->GetObject(groups_array_list)->AsObjectArray<Object>();
+ mirror::Field* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
+ mirror::Field* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+ mirror::ObjectArray<mirror::Object>* groups_array =
+ array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
const int32_t size = size_field->GetInt(groups_array_list);
// Copy the first 'size' elements out of the array into the result.
@@ -1871,7 +1884,7 @@
ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
{
ScopedObjectAccess soa(Thread::Current());
- peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<Object*>(thread_id)));
+ peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
}
if (peer.get() == NULL) {
return JDWP::ERR_THREAD_NOT_ALIVE;
@@ -1890,7 +1903,7 @@
void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- Object* peer = gRegistry->Get<Object*>(thread_id);
+ mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
Thread* thread;
{
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
@@ -1925,21 +1938,21 @@
if (frame_id != GetFrameId()) {
return true; // continue
}
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
if (m->IsNative() || m->IsStatic()) {
this_object = NULL;
} else {
uint16_t reg = DemangleSlot(0, m);
- this_object = reinterpret_cast<Object*>(GetVReg(m, reg, kReferenceVReg));
+ this_object = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
}
return false;
}
- Object* this_object;
+ mirror::Object* this_object;
JDWP::FrameId frame_id;
};
-static Object* GetThis(Thread* self, AbstractMethod* m, size_t frame_id)
+static mirror::Object* GetThis(Thread* self, mirror::AbstractMethod* m, size_t frame_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: should we return the 'this' we passed through to non-static native methods?
if (m->IsNative() || m->IsStatic()) {
@@ -1989,7 +2002,7 @@
return true; // Not our frame, carry on.
}
// TODO: check that the tag is compatible with the actual type of the slot!
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
uint16_t reg = DemangleSlot(slot_, m);
switch (tag_) {
@@ -2037,7 +2050,7 @@
case JDWP::JT_ARRAY:
{
CHECK_EQ(width_, sizeof(JDWP::ObjectId));
- Object* o = reinterpret_cast<Object*>(GetVReg(m, reg, kReferenceVReg));
+ mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
VLOG(jdwp) << "get array local " << reg << " = " << o;
if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
@@ -2053,7 +2066,7 @@
case JDWP::JT_THREAD_GROUP:
{
CHECK_EQ(width_, sizeof(JDWP::ObjectId));
- Object* o = reinterpret_cast<Object*>(GetVReg(m, reg, kReferenceVReg));
+ mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
VLOG(jdwp) << "get object local " << reg << " = " << o;
if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
@@ -2128,7 +2141,7 @@
return true; // Not our frame, carry on.
}
// TODO: check that the tag is compatible with the actual type of the slot!
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
uint16_t reg = DemangleSlot(slot_, m);
switch (tag_) {
@@ -2155,7 +2168,7 @@
case JDWP::JT_STRING:
{
CHECK_EQ(width_, sizeof(JDWP::ObjectId));
- Object* o = gRegistry->Get<Object*>(static_cast<JDWP::ObjectId>(value_));
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
if (o == kInvalidObject) {
UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store";
}
@@ -2198,8 +2211,8 @@
visitor.WalkStack();
}
-void Dbg::PostLocationEvent(const AbstractMethod* m, int dex_pc, Object* this_object, int event_flags) {
- Class* c = m->GetDeclaringClass();
+void Dbg::PostLocationEvent(const mirror::AbstractMethod* m, int dex_pc, mirror::Object* this_object, int event_flags) {
+ mirror::Class* c = m->GetDeclaringClass();
JDWP::JdwpLocation location;
location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
@@ -2220,8 +2233,9 @@
}
void Dbg::PostException(Thread* thread,
- JDWP::FrameId throw_frame_id, AbstractMethod* throw_method, uint32_t throw_dex_pc,
- AbstractMethod* catch_method, uint32_t catch_dex_pc, Throwable* exception) {
+ JDWP::FrameId throw_frame_id, mirror::AbstractMethod* throw_method,
+ uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method,
+ uint32_t catch_dex_pc, mirror::Throwable* exception) {
if (!IsDebuggerActive()) {
return;
}
@@ -2252,7 +2266,7 @@
gJdwpState->PostException(&throw_location, exception_id, exception_class_id, &catch_location, this_id);
}
-void Dbg::PostClassPrepare(Class* c) {
+void Dbg::PostClassPrepare(mirror::Class* c) {
if (!IsDebuggerActive()) {
return;
}
@@ -2271,7 +2285,7 @@
}
size_t frame_id;
- AbstractMethod* m = self->GetCurrentMethod(NULL, &frame_id);
+ mirror::AbstractMethod* m = self->GetCurrentMethod(NULL, &frame_id);
//LOG(INFO) << "UpdateDebugger " << PrettyMethod(m) << "@" << dex_pc << " frame " << frame_id;
if (dex_pc == -1) {
@@ -2375,14 +2389,14 @@
void Dbg::WatchLocation(const JDWP::JdwpLocation* location) {
MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- AbstractMethod* m = FromMethodId(location->method_id);
+ mirror::AbstractMethod* m = FromMethodId(location->method_id);
gBreakpoints.push_back(Breakpoint(m, location->dex_pc));
VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1];
}
void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) {
MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- AbstractMethod* m = FromMethodId(location->method_id);
+ mirror::AbstractMethod* m = FromMethodId(location->method_id);
for (size_t i = 0; i < gBreakpoints.size(); ++i) {
if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
@@ -2428,11 +2442,11 @@
// annotalysis.
bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Locks::breakpoint_lock_->AssertHeld(Thread::Current());
- const AbstractMethod* m = GetMethod();
+ const mirror::AbstractMethod* m = GetMethod();
if (!m->IsRuntimeMethod()) {
++gSingleStepControl.stack_depth;
if (gSingleStepControl.method == NULL) {
- const DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
+ const mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
gSingleStepControl.method = m;
gSingleStepControl.line_number = -1;
if (dex_cache != NULL) {
@@ -2497,7 +2511,7 @@
uint32_t last_pc;
};
gSingleStepControl.dex_pcs.clear();
- const AbstractMethod* m = gSingleStepControl.method;
+ const mirror::AbstractMethod* m = gSingleStepControl.method;
if (m->IsNative()) {
gSingleStepControl.line_number = -1;
} else {
@@ -2618,23 +2632,23 @@
}
JDWP::JdwpError status;
- Object* receiver = gRegistry->Get<Object*>(object_id);
+ mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
if (receiver == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
- Object* thread = gRegistry->Get<Object*>(thread_id);
+ mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
if (thread == kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
// TODO: check that 'thread' is actually a java.lang.Thread!
- Class* c = DecodeClass(class_id, status);
+ mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
- AbstractMethod* m = FromMethodId(method_id);
+ mirror::AbstractMethod* m = FromMethodId(method_id);
if (m->IsStatic() != (receiver == NULL)) {
return JDWP::ERR_INVALID_METHODID;
}
@@ -2739,13 +2753,13 @@
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- SirtRef<Throwable> old_exception(soa.Self(), soa.Self()->GetException());
+ SirtRef<mirror::Throwable> old_exception(soa.Self(), soa.Self()->GetException());
soa.Self()->ClearException();
// Translate the method through the vtable, unless the debugger wants to suppress it.
- AbstractMethod* m = pReq->method_;
+ mirror::AbstractMethod* m = pReq->method_;
if ((pReq->options_ & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver_ != NULL) {
- AbstractMethod* actual_method = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_);
+ mirror::AbstractMethod* actual_method = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_);
if (actual_method != m) {
VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m) << " to " << PrettyMethod(actual_method);
m = actual_method;
@@ -2764,7 +2778,7 @@
pReq->exception = gRegistry->Add(soa.Self()->GetException());
pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty());
if (pReq->exception != 0) {
- Object* exc = soa.Self()->GetException();
+ mirror::Object* exc = soa.Self()->GetException();
VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc);
soa.Self()->ClearException();
pReq->result_value.SetJ(0);
@@ -2801,7 +2815,7 @@
* throwing exceptions) we really want to do the registration late.
*/
void Dbg::RegisterObjectId(JDWP::ObjectId id) {
- gRegistry->Add(reinterpret_cast<Object*>(id));
+ gRegistry->Add(reinterpret_cast<mirror::Object*>(id));
}
/*
@@ -2949,7 +2963,7 @@
} else {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<String> name(soa.Self(), t->GetThreadName(soa));
+ SirtRef<mirror::String> name(soa.Self(), t->GetThreadName(soa));
size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL;
@@ -3239,7 +3253,7 @@
Flush();
}
}
- const Object *obj = (const Object *)start;
+ const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(start);
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -3282,7 +3296,7 @@
*p_++ = length - 1;
}
- uint8_t ExamineObject(const Object* o, bool is_native_heap)
+ uint8_t ExamineObject(const mirror::Object* o, bool is_native_heap)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (o == NULL) {
return HPSG_STATE(SOLIDITY_FREE, 0);
@@ -3300,7 +3314,7 @@
return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
- Class* c = o->GetClass();
+ mirror::Class* c = o->GetClass();
if (c == NULL) {
// The object was probably just created but hasn't been initialized yet.
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
@@ -3416,7 +3430,7 @@
if (depth >= kMaxAllocRecordStackDepth) {
return false;
}
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
if (!m->IsRuntimeMethod()) {
record->stack[depth].method = m;
record->stack[depth].dex_pc = GetDexPc();
@@ -3437,7 +3451,7 @@
size_t depth;
};
-void Dbg::RecordAllocation(Class* type, size_t byte_count) {
+void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
Thread* self = Thread::Current();
CHECK(self != NULL);
@@ -3499,7 +3513,7 @@
<< PrettyClass(record->type);
for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
- const AbstractMethod* m = record->stack[stack_frame].method;
+ const mirror::AbstractMethod* m = record->stack[stack_frame].method;
if (m == NULL) {
break;
}
@@ -3619,7 +3633,7 @@
MethodHelper mh;
for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
- AbstractMethod* m = record->stack[i].method;
+ mirror::AbstractMethod* m = record->stack[i].method;
if (m != NULL) {
mh.ChangeMethod(m);
class_names.Add(mh.GetDeclaringClassDescriptor());
diff --git a/src/debugger.h b/src/debugger.h
index b34a401..a796349 100644
--- a/src/debugger.h
+++ b/src/debugger.h
@@ -26,10 +26,17 @@
#include <string>
#include "jdwp/jdwp.h"
-#include "object.h"
+#include "jni.h"
+#include "jvalue.h"
+#include "root_visitor.h"
namespace art {
-
+namespace mirror {
+class AbstractMethod;
+class Class;
+class Object;
+class Throwable;
+} // namespace mirror
struct AllocRecord;
class Thread;
@@ -53,10 +60,10 @@
bool invoke_needed_;
/* request */
- Object* receiver_; /* not used for ClassType.InvokeMethod */
- Object* thread_;
- Class* class_;
- AbstractMethod* method_;
+ mirror::Object* receiver_; /* not used for ClassType.InvokeMethod */
+ mirror::Object* thread_;
+ mirror::Class* class_;
+ mirror::AbstractMethod* method_;
uint32_t arg_count_;
uint64_t* arg_values_; /* will be NULL if arg_count_ == 0 */
uint32_t options_;
@@ -118,7 +125,7 @@
static void Exit(int status);
- static void VisitRoots(Heap::RootVisitor* visitor, void* arg);
+ static void VisitRoots(RootVisitor* visitor, void* arg);
/*
* Class, Object, Array
@@ -311,17 +318,19 @@
kMethodEntry = 0x04,
kMethodExit = 0x08,
};
- static void PostLocationEvent(const AbstractMethod* method, int pcOffset, Object* thisPtr, int eventFlags)
+ static void PostLocationEvent(const mirror::AbstractMethod* method, int pcOffset,
+ mirror::Object* thisPtr, int eventFlags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostException(Thread* thread, JDWP::FrameId throw_frame_id, AbstractMethod* throw_method,
- uint32_t throw_dex_pc, AbstractMethod* catch_method, uint32_t catch_dex_pc,
- Throwable* exception)
+ static void PostException(Thread* thread, JDWP::FrameId throw_frame_id,
+ mirror::AbstractMethod* throw_method,
+ uint32_t throw_dex_pc, mirror::AbstractMethod* catch_method,
+ uint32_t catch_dex_pc, mirror::Throwable* exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadDeath(Thread* t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostClassPrepare(Class* c)
+ static void PostClassPrepare(mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void UpdateDebugger(int32_t dex_pc, Thread* self)
@@ -373,12 +382,11 @@
/*
* Recent allocation tracking support.
*/
- static void RecordAllocation(Class* type, size_t byte_count)
+ static void RecordAllocation(mirror::Class* type, size_t byte_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetAllocTrackingEnabled(bool enabled);
static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; }
- static jbyteArray GetRecentAllocations()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static jbyteArray GetRecentAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void DumpRecentAllocations();
enum HpifWhen {
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index a2d35e4..bc38bdc 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -27,10 +27,14 @@
#include "base/stringpiece.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
-#include "class_loader.h"
#include "compiler.h"
#include "image_writer.h"
#include "leb128.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "oat_writer.h"
#include "object_utils.h"
#include "os.h"
@@ -161,7 +165,7 @@
continue;
}
std::string descriptor(DotToDescriptor(dot.c_str()));
- SirtRef<Class> klass(self, class_linker->FindSystemClass(descriptor.c_str()));
+ SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(descriptor.c_str()));
if (klass.get() == NULL) {
LOG(WARNING) << "Failed to find class " << descriptor;
Thread::Current()->ClearException();
@@ -173,7 +177,7 @@
// exceptions are resolved by the verifier when there is a catch block in an interested method.
// Do this here so that exception classes appear to have been specified image classes.
std::set<std::pair<uint16_t, const DexFile*> > unresolved_exception_types;
- SirtRef<Class> java_lang_Throwable(self,
+ SirtRef<mirror::Class> java_lang_Throwable(self,
class_linker->FindSystemClass("Ljava/lang/Throwable;"));
do {
unresolved_exception_types.clear();
@@ -185,10 +189,10 @@
it != end; ++it) {
uint16_t exception_type_idx = it->first;
const DexFile* dex_file = it->second;
- DexCache* dex_cache = class_linker->FindDexCache(*dex_file);
- ClassLoader* class_loader = NULL;
- SirtRef<Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
- dex_cache, class_loader));
+ mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file);
+ mirror:: ClassLoader* class_loader = NULL;
+ SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
+ dex_cache, class_loader));
if (klass.get() == NULL) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
@@ -404,25 +408,25 @@
}
}
- static bool ResolveCatchBlockExceptionsClassVisitor(Class* c, void* arg)
+ static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::set<std::pair<uint16_t, const DexFile*> >* exceptions_to_resolve =
reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*> >*>(arg);
MethodHelper mh;
for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
- AbstractMethod* m = c->GetVirtualMethod(i);
+ mirror::AbstractMethod* m = c->GetVirtualMethod(i);
mh.ChangeMethod(m);
ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
}
for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
- AbstractMethod* m = c->GetDirectMethod(i);
+ mirror::AbstractMethod* m = c->GetDirectMethod(i);
mh.ChangeMethod(m);
ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
}
return true;
}
- static bool RecordImageClassesVisitor(Class* klass, void* arg)
+ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::set<std::string>* image_classes = reinterpret_cast<std::set<std::string>*>(arg);
if (klass->IsArrayClass() || klass->IsPrimitive()) {
diff --git a/src/dex_file.cc b/src/dex_file.cc
index 7398616..e67e767 100644
--- a/src/dex_file.cc
+++ b/src/dex_file.cc
@@ -30,7 +30,10 @@
#include "dex_file_verifier.h"
#include "globals.h"
#include "leb128.h"
-#include "object.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/field.h"
+#include "mirror/field-inl.h"
+#include "mirror/string.h"
#include "os.h"
#include "safe_map.h"
#include "thread.h"
@@ -94,6 +97,14 @@
return DexFile::OpenFile(filename, location, true);
}
+int DexFile::GetPermissions() const {
+ if (mem_map_.get() == NULL) {
+ return 0;
+ } else {
+ return mem_map_->GetProtect();
+ }
+}
+
const DexFile* DexFile::OpenFile(const std::string& filename,
const std::string& location,
bool verify) {
@@ -146,7 +157,6 @@
const char* DexFile::kClassesDex = "classes.dex";
-// Open classes.dex from within a .zip, .jar, .apk, ...
const DexFile* DexFile::OpenZip(const std::string& filename,
const std::string& location) {
UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(filename));
@@ -157,6 +167,16 @@
return DexFile::Open(*zip_archive.get(), location);
}
+const DexFile* DexFile::OpenMemory(const std::string& location,
+ uint32_t location_checksum,
+ MemMap* mem_map) {
+ return OpenMemory(mem_map->Begin(),
+ mem_map->Size(),
+ location,
+ location_checksum,
+ mem_map);
+}
+
const DexFile* DexFile::Open(const ZipArchive& zip_archive, const std::string& location) {
CHECK(!location.empty());
UniquePtr<ZipEntry> zip_entry(zip_archive.Find(kClassesDex));
@@ -584,7 +604,7 @@
return descriptor;
}
-int32_t DexFile::GetLineNumFromPC(const AbstractMethod* method, uint32_t rel_pc) const {
+int32_t DexFile::GetLineNumFromPC(const mirror::AbstractMethod* method, uint32_t rel_pc) const {
// For native method, lineno should be -2 to indicate it is native. Note that
// "line number == -2" is how libcore tells from StackTraceElement.
if (method->GetCodeItemOffset() == 0) {
@@ -601,6 +621,12 @@
return context.line_num_;
}
+const DexFile::TryItem* DexFile::GetTryItems(const CodeItem& code_item, uint32_t offset) {
+ const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_];
+ return reinterpret_cast<const TryItem*>
+ (RoundUp(reinterpret_cast<uint32_t>(insns_end_), 4)) + offset;
+}
+
int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, int32_t tries_size,
uint32_t address) {
// Note: Signed type is important for max and min.
@@ -900,8 +926,8 @@
}
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- DexCache* dex_cache,
- ClassLoader* class_loader,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
@@ -976,7 +1002,7 @@
ptr_ += width;
}
-void EncodedStaticFieldValueIterator::ReadValueToField(Field* field) const {
+void EncodedStaticFieldValueIterator::ReadValueToField(mirror::Field* field) const {
switch (type_) {
case kBoolean: field->SetBoolean(field->GetDeclaringClass(), jval_.z); break;
case kByte: field->SetByte(field->GetDeclaringClass(), jval_.b); break;
@@ -988,12 +1014,12 @@
case kDouble: field->SetDouble(field->GetDeclaringClass(), jval_.d); break;
case kNull: field->SetObject(field->GetDeclaringClass(), NULL); break;
case kString: {
- String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_);
+ mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_);
field->SetObject(field->GetDeclaringClass(), resolved);
break;
}
case kType: {
- Class* resolved = linker_->ResolveType(dex_file_, jval_.i, dex_cache_, class_loader_);
+ mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, dex_cache_, class_loader_);
field->SetObject(field->GetDeclaringClass(), resolved);
break;
}
diff --git a/src/dex_file.h b/src/dex_file.h
index 184d950..14b4ba0 100644
--- a/src/dex_file.h
+++ b/src/dex_file.h
@@ -29,10 +29,16 @@
#include "modifiers.h"
#include "safe_map.h"
#include "UniquePtr.h"
-#include "utils.h"
namespace art {
+namespace mirror {
+class AbstractMethod;
+class ClassLoader;
+class DexCache;
+class Field;
+} // namespace mirror
+class ClassLinker;
class ZipArchive;
// TODO: move all of the macro functionality into the DexCache class.
@@ -675,11 +681,7 @@
}
}
- static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset) {
- const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_];
- return reinterpret_cast<const TryItem*>
- (RoundUp(reinterpret_cast<uint32_t>(insns_end_), 4)) + offset;
- }
+ static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset);
// Get the base of the encoded data for the given DexCode.
static const byte* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) {
@@ -775,7 +777,7 @@
// Returns -2 for native methods (as expected in exception traces).
//
// This is used by runtime; therefore use art::Method not art::DexFile::Method.
- int32_t GetLineNumFromPC(const AbstractMethod* method, uint32_t rel_pc) const
+ int32_t GetLineNumFromPC(const mirror::AbstractMethod* method, uint32_t rel_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
@@ -790,13 +792,7 @@
}
}
- int GetPermissions() const {
- if (mem_map_.get() == NULL) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
- }
+ int GetPermissions() const;
private:
// Opens a .dex file
@@ -811,13 +807,7 @@
// Opens a .dex file at the given address backed by a MemMap
static const DexFile* OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map) {
- return OpenMemory(mem_map->Begin(),
- mem_map->Size(),
- location,
- location_checksum,
- mem_map);
- }
+ MemMap* mem_map);
// Opens a .dex file at the given address, optionally backed by a MemMap
static const DexFile* OpenMemory(const byte* dex_file,
@@ -1116,19 +1106,14 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassDataItemIterator);
};
-class ClassLinker;
-class ClassLoader;
-class DexCache;
-class Field;
-
class EncodedStaticFieldValueIterator {
public:
- EncodedStaticFieldValueIterator(const DexFile& dex_file, DexCache* dex_cache, ClassLoader* class_loader,
+ EncodedStaticFieldValueIterator(const DexFile& dex_file, mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
ClassLinker* linker, const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ReadValueToField(Field* field) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ReadValueToField(mirror::Field* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasNext() { return pos_ < array_size_; }
@@ -1158,14 +1143,14 @@
static const byte kEncodedValueArgShift = 5;
const DexFile& dex_file_;
- DexCache* dex_cache_; // dex cache to resolve literal objects
- ClassLoader* class_loader_; // ClassLoader to resolve types
- ClassLinker* linker_; // linker to resolve literal objects
- size_t array_size_; // size of array
- size_t pos_; // current position
- const byte* ptr_; // pointer into encoded data array
- ValueType type_; // type of current encoded value
- jvalue jval_; // value of current encoded value
+ mirror::DexCache* dex_cache_; // Dex cache to resolve literal objects.
+ mirror::ClassLoader* class_loader_; // ClassLoader to resolve types.
+ ClassLinker* linker_; // Linker to resolve literal objects.
+ size_t array_size_; // Size of array.
+ size_t pos_; // Current position.
+ const byte* ptr_; // Pointer into encoded data array.
+ ValueType type_; // Type of current encoded value.
+ jvalue jval_; // Value of current encoded value.
DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedStaticFieldValueIterator);
};
std::ostream& operator<<(std::ostream& os, const EncodedStaticFieldValueIterator::ValueType& code);
diff --git a/src/dex_file_verifier.cc b/src/dex_file_verifier.cc
index 83ef31a..2f9054e 100644
--- a/src/dex_file_verifier.cc
+++ b/src/dex_file_verifier.cc
@@ -18,9 +18,10 @@
#include "base/stringprintf.h"
#include "leb128.h"
-#include "object.h"
#include "safe_map.h"
#include "UniquePtr.h"
+#include "utf.h"
+#include "utils.h"
#include "zip_archive.h"
namespace art {
diff --git a/src/dex_instruction.cc b/src/dex_instruction.cc
index d3aa238..55f6eca 100644
--- a/src/dex_instruction.cc
+++ b/src/dex_instruction.cc
@@ -17,6 +17,7 @@
#include "dex_instruction.h"
#include "dex_file.h"
+#include "utils.h"
#include <iomanip>
namespace art {
diff --git a/src/exception_test.cc b/src/exception_test.cc
index 58e6533..240bf95 100644
--- a/src/exception_test.cc
+++ b/src/exception_test.cc
@@ -18,6 +18,8 @@
#include "common_test.h"
#include "dex_file.h"
#include "gtest/gtest.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/stack_trace_element.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
@@ -32,8 +34,8 @@
CommonTest::SetUp();
ScopedObjectAccess soa(Thread::Current());
- SirtRef<ClassLoader> class_loader(soa.Self(),
- soa.Decode<ClassLoader*>(LoadDex("ExceptionHandle")));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle")));
my_klass_ = class_linker_->FindClass("LExceptionHandle;", class_loader.get());
ASSERT_TRUE(my_klass_ != NULL);
class_linker_->EnsureInitialized(my_klass_, false, true);
@@ -90,11 +92,11 @@
std::vector<uint16_t> fake_vmap_table_data_;
std::vector<uint8_t> fake_gc_map_;
- AbstractMethod* method_f_;
- AbstractMethod* method_g_;
+ mirror::AbstractMethod* method_f_;
+ mirror::AbstractMethod* method_g_;
private:
- Class* my_klass_;
+ mirror::Class* my_klass_;
};
TEST_F(ExceptionTest, FindCatchHandler) {
@@ -193,8 +195,8 @@
ASSERT_TRUE(internal != NULL);
jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal);
ASSERT_TRUE(ste_array != NULL);
- ObjectArray<StackTraceElement>* trace_array =
- soa.Decode<ObjectArray<StackTraceElement>*>(ste_array);
+ mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
ASSERT_TRUE(trace_array != NULL);
ASSERT_TRUE(trace_array->Get(0) != NULL);
diff --git a/src/gc/atomic_stack.h b/src/gc/atomic_stack.h
index cd1781d..0197bce 100644
--- a/src/gc/atomic_stack.h
+++ b/src/gc/atomic_stack.h
@@ -101,11 +101,11 @@
}
T* Begin() {
- return const_cast<Object**>(begin_ + front_index_);
+ return const_cast<mirror::Object**>(begin_ + front_index_);
}
T* End() {
- return const_cast<Object**>(begin_ + back_index_);
+ return const_cast<mirror::Object**>(begin_ + back_index_);
}
size_t Capacity() const {
@@ -159,6 +159,8 @@
DISALLOW_COPY_AND_ASSIGN(AtomicStack);
};
+typedef AtomicStack<mirror::Object*> ObjectStack;
+
} // namespace art
#endif // ART_SRC_MARK_STACK_H_
diff --git a/src/gc/card_table-inl.h b/src/gc/card_table-inl.h
new file mode 100644
index 0000000..13590b7
--- /dev/null
+++ b/src/gc/card_table-inl.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_CARDTABLE_INL_H_
+#define ART_SRC_GC_CARDTABLE_INL_H_
+
+#include "base/logging.h"
+#include "card_table.h"
+#include "cutils/atomic-inline.h"
+#include "space_bitmap.h"
+#include "utils.h"
+
+namespace art {
+
+static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
+ // Little endian means most significant byte is on the left.
+ const size_t shift = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
+ // Align the address down.
+ address -= shift;
+ int32_t* word_address = reinterpret_cast<int32_t*>(address);
+ // Word with the byte we are trying to cas cleared.
+ const int32_t cur_word = *word_address & ~(0xFF << shift);
+ const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift);
+ const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift);
+ bool success = android_atomic_cas(old_word, new_word, word_address) == 0;
+ return success;
+}
+
+template <typename Visitor, typename FingerVisitor>
+inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
+ const Visitor& visitor, const FingerVisitor& finger_visitor,
+ const byte minimum_age) const {
+ DCHECK(bitmap->HasAddress(scan_begin));
+ DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
+ byte* card_cur = CardFromAddr(scan_begin);
+ byte* card_end = CardFromAddr(scan_end);
+ CheckCardValid(card_cur);
+ CheckCardValid(card_end);
+
+ // Handle any unaligned cards at the start.
+ while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+ if (*card_cur >= minimum_age) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
+ uintptr_t end = start + kCardSize;
+ bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+ }
+ ++card_cur;
+ }
+
+ byte* aligned_end = card_end -
+ (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
+
+ // Now we have the words, we can send these to be processed in parallel.
+ uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
+ uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
+
+ // TODO: Parallelize
+ while (word_cur < word_end) {
+ // Find the first dirty card.
+ while (*word_cur == 0 && word_cur < word_end) {
+ word_cur++;
+ }
+ if (word_cur >= word_end) {
+ break;
+ }
+ uintptr_t start_word = *word_cur;
+ for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
+ if ((start_word & 0xFF) >= minimum_age) {
+ byte* card = reinterpret_cast<byte*>(word_cur) + i;
+ const byte card_byte = *card;
+ DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty)
+ << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
+ uintptr_t end = start + kCardSize;
+ bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+ }
+ start_word >>= 8;
+ }
+ ++word_cur;
+ }
+
+ // Handle any unaligned cards at the end.
+ card_cur = reinterpret_cast<byte*>(word_end);
+ while (card_cur < card_end) {
+ if (*card_cur >= minimum_age) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
+ uintptr_t end = start + kCardSize;
+ bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+ }
+ ++card_cur;
+ }
+}
+
+/*
+ * Visitor is expected to take in a card and return the new value. When a value is modified, the
+ * modify visitor is called.
+ * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
+ * value.
+ * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
+ * us to know which cards got cleared.
+ */
+template <typename Visitor, typename ModifiedVisitor>
+inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+ const ModifiedVisitor& modified) {
+ byte* card_cur = CardFromAddr(scan_begin);
+ byte* card_end = CardFromAddr(scan_end);
+ CheckCardValid(card_cur);
+ CheckCardValid(card_end);
+
+ // Handle any unaligned cards at the start.
+ while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+ byte expected, new_value;
+ do {
+ expected = *card_cur;
+ new_value = visitor(expected);
+ } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_cur)));
+ if (expected != new_value) {
+ modified(card_cur, expected, new_value);
+ }
+ ++card_cur;
+ }
+
+ // Handle unaligned cards at the end.
+ while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
+ --card_end;
+ byte expected, new_value;
+ do {
+ expected = *card_end;
+ new_value = visitor(expected);
+ } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_end)));
+ if (expected != new_value) {
+ modified(card_cur, expected, new_value);
+ }
+ }
+
+ // Now we have the words, we can process words in parallel.
+ uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
+ uintptr_t* word_end = reinterpret_cast<uintptr_t*>(card_end);
+ uintptr_t expected_word;
+ uintptr_t new_word;
+
+ // TODO: Parallelize.
+ while (word_cur < word_end) {
+ while ((expected_word = *word_cur) != 0) {
+ new_word =
+ (visitor((expected_word >> 0) & 0xFF) << 0) |
+ (visitor((expected_word >> 8) & 0xFF) << 8) |
+ (visitor((expected_word >> 16) & 0xFF) << 16) |
+ (visitor((expected_word >> 24) & 0xFF) << 24);
+ if (new_word == expected_word) {
+ // No need to do a cas.
+ break;
+ }
+ if (LIKELY(android_atomic_cas(expected_word, new_word,
+ reinterpret_cast<int32_t*>(word_cur)) == 0)) {
+ for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
+ const byte expected_byte = (expected_word >> (8 * i)) & 0xFF;
+ const byte new_byte = (new_word >> (8 * i)) & 0xFF;
+ if (expected_byte != new_byte) {
+ modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
+ }
+ }
+ break;
+ }
+ }
+ ++word_cur;
+ }
+}
+
+inline void* CardTable::AddrFromCard(const byte *card_addr) const {
+ DCHECK(IsValidCard(card_addr))
+ << " card_addr: " << reinterpret_cast<const void*>(card_addr)
+ << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_->End());
+ uintptr_t offset = card_addr - biased_begin_;
+ return reinterpret_cast<void*>(offset << kCardShift);
+}
+
+inline byte* CardTable::CardFromAddr(const void *addr) const {
+ byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
+ // Sanity check the caller was asking for address covered by the card table
+ DCHECK(IsValidCard(card_addr)) << "addr: " << addr
+ << " card_addr: " << reinterpret_cast<void*>(card_addr);
+ return card_addr;
+}
+
+inline void CardTable::CheckCardValid(byte* card) const {
+ DCHECK(IsValidCard(card))
+ << " card_addr: " << reinterpret_cast<const void*>(card)
+ << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_->End());
+}
+
+} // namespace art
+
+#endif // ART_SRC_GC_CARDTABLE_INL_H_
diff --git a/src/gc/card_table.cc b/src/gc/card_table.cc
index f27777b..4331270 100644
--- a/src/gc/card_table.cc
+++ b/src/gc/card_table.cc
@@ -19,6 +19,7 @@
#include <dynamic_annotations.h>
#include "base/logging.h"
+#include "gc/card_table-inl.h"
#include "heap.h"
#include "heap_bitmap.h"
#include "runtime.h"
diff --git a/src/gc/card_table.h b/src/gc/card_table.h
index 8f1bc92..842fcc3 100644
--- a/src/gc/card_table.h
+++ b/src/gc/card_table.h
@@ -17,19 +17,18 @@
#ifndef ART_SRC_GC_CARDTABLE_H_
#define ART_SRC_GC_CARDTABLE_H_
-#include "base/logging.h"
#include "globals.h"
+#include "locks.h"
#include "mem_map.h"
-#include "space_bitmap.h"
#include "UniquePtr.h"
-#include "utils.h"
namespace art {
-
+namespace mirror {
+class Object;
+} // namespace mirror
class Heap;
class ContinuousSpace;
class SpaceBitmap;
-class Object;
// Maintain a card table from the the write barrier. All writes of
// non-NULL values to heap addresses should go through an entry in
@@ -50,12 +49,12 @@
}
// Is the object on a dirty card?
- bool IsDirty(const Object* obj) const {
+ bool IsDirty(const mirror::Object* obj) const {
return GetCard(obj) == kCardDirty;
}
// Return the state of the card at an address.
- byte GetCard(const Object* obj) const {
+ byte GetCard(const mirror::Object* obj) const {
return *CardFromAddr(obj);
}
@@ -88,71 +87,7 @@
*/
template <typename Visitor, typename ModifiedVisitor>
void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
- const ModifiedVisitor& modified = VoidFunctor()) {
- byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(scan_end);
- CheckCardValid(card_cur);
- CheckCardValid(card_end);
-
- // Handle any unaligned cards at the start.
- while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
- byte expected, new_value;
- do {
- expected = *card_cur;
- new_value = visitor(expected);
- } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_cur) != 0));
- if (expected != new_value) {
- modified(card_cur, expected, new_value);
- }
- ++card_cur;
- }
-
- // Handle unaligned cards at the end.
- while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
- --card_end;
- byte expected, new_value;
- do {
- expected = *card_end;
- new_value = visitor(expected);
- } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_end) != 0));
- if (expected != new_value) {
- modified(card_cur, expected, new_value);
- }
- }
-
- // Now we have the words, we can process words in parallel.
- uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
- uintptr_t* word_end = reinterpret_cast<uintptr_t*>(card_end);
- uintptr_t expected_word;
- uintptr_t new_word;
-
- // TODO: Parallelize.
- while (word_cur < word_end) {
- while ((expected_word = *word_cur) != 0) {
- new_word =
- (visitor((expected_word >> 0) & 0xFF) << 0) |
- (visitor((expected_word >> 8) & 0xFF) << 8) |
- (visitor((expected_word >> 16) & 0xFF) << 16) |
- (visitor((expected_word >> 24) & 0xFF) << 24);
- if (new_word == expected_word) {
- // No need to do a cas.
- break;
- }
- if (LIKELY(android_atomic_cas(expected_word, new_word,
- reinterpret_cast<int32_t*>(word_cur)) == 0)) {
- for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- const byte expected_byte = (expected_word >> (8 * i)) & 0xFF;
- const byte new_byte = (new_word >> (8 * i)) & 0xFF;
- if (expected_byte != new_byte) {
- modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
- }
- }
- break;
- }
- }
- ++word_cur;
- }
- }
+ const ModifiedVisitor& modified);
// For every dirty at least minumum age between begin and end invoke the visitor with the
// specified argument.
@@ -161,67 +96,7 @@
const Visitor& visitor, const FingerVisitor& finger_visitor,
const byte minimum_age = kCardDirty) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(bitmap->HasAddress(scan_begin));
- DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
- byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(scan_end);
- CheckCardValid(card_cur);
- CheckCardValid(card_end);
-
- // Handle any unaligned cards at the start.
- while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
- if (*card_cur >= minimum_age) {
- uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
- }
- ++card_cur;
- }
-
- byte* aligned_end = card_end -
- (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
-
- // Now we have the words, we can send these to be processed in parallel.
- uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
- uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
-
- // TODO: Parallelize
- while (word_cur < word_end) {
- // Find the first dirty card.
- while (*word_cur == 0 && word_cur < word_end) {
- word_cur++;
- }
- if (word_cur >= word_end) {
- break;
- }
- uintptr_t start_word = *word_cur;
- for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- if ((start_word & 0xFF) >= minimum_age) {
- byte* card = reinterpret_cast<byte*>(word_cur) + i;
- const byte card_byte = *card;
- DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty)
- << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
- uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
- }
- start_word >>= 8;
- }
- ++word_cur;
- }
-
- // Handle any unaligned cards at the end.
- card_cur = reinterpret_cast<byte*>(word_end);
- while (card_cur < card_end) {
- if (*card_cur >= minimum_age) {
- uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
- }
- ++card_cur;
- }
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Assertion used to check the given address is covered by the card table
void CheckAddrIsInCardTable(const byte* addr) const;
@@ -233,40 +108,14 @@
void ClearSpaceCards(ContinuousSpace* space);
// Returns the first address in the heap which maps to this card.
- void* AddrFromCard(const byte *card_addr) const {
- DCHECK(IsValidCard(card_addr))
- << " card_addr: " << reinterpret_cast<const void*>(card_addr)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
- uintptr_t offset = card_addr - biased_begin_;
- return reinterpret_cast<void*>(offset << kCardShift);
- }
+ void* AddrFromCard(const byte *card_addr) const;
// Returns the address of the relevant byte in the card table, given an address on the heap.
- byte* CardFromAddr(const void *addr) const {
- byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
- // Sanity check the caller was asking for address covered by the card table
- DCHECK(IsValidCard(card_addr)) << "addr: " << addr
- << " card_addr: " << reinterpret_cast<void*>(card_addr);
- return card_addr;
- }
+ byte* CardFromAddr(const void *addr) const;
bool AddrIsInCardTable(const void* addr) const;
private:
- static int byte_cas(byte old_value, byte new_value, byte* address) {
- // Little endian means most significant byte is on the left.
- const size_t shift = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
- // Align the address down.
- address -= shift;
- int32_t* word_address = reinterpret_cast<int32_t*>(address);
- // Word with the byte we are trying to cas cleared.
- const int32_t cur_word = *word_address & ~(0xFF << shift);
- const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift);
- const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift);
- return android_atomic_cas(old_word, new_word, word_address);
- }
-
CardTable(MemMap* begin, byte* biased_begin, size_t offset);
// Returns true iff the card table address is within the bounds of the card table.
@@ -276,12 +125,7 @@
return card_addr >= begin && card_addr < end;
}
- void CheckCardValid(byte* card) const {
- DCHECK(IsValidCard(card))
- << " card_addr: " << reinterpret_cast<const void*>(card)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
- }
+ void CheckCardValid(byte* card) const;
// Verifies that all gray objects are on a dirty card.
void VerifyCardTable();
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
index bcc7b63..fbcdbaf 100644
--- a/src/gc/garbage_collector.cc
+++ b/src/gc/garbage_collector.cc
@@ -15,6 +15,7 @@
*/
#include "garbage_collector.h"
+#include "thread.h"
#include "thread_list.h"
namespace art {
diff --git a/src/gc/garbage_collector.h b/src/gc/garbage_collector.h
index 9ddf45f..a1014c2 100644
--- a/src/gc/garbage_collector.h
+++ b/src/gc/garbage_collector.h
@@ -14,11 +14,13 @@
* limitations under the License.
*/
-#ifndef ART_SRC_GC_GARBAGE_COLLECTR_H_
-#define ART_SRC_GC_GARBAGE_COLLECTR_H_
+#ifndef ART_SRC_GC_GARBAGE_COLLECTOR_H_
+#define ART_SRC_GC_GARBAGE_COLLECTOR_H_
#include "locks.h"
-#include "utils.h"
+
+#include <stdint.h>
+#include <vector>
namespace art {
@@ -56,7 +58,7 @@
void RegisterPause(uint64_t nano_length);
protected:
- // The initial phase. Done with mutators upaused.
+ // The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
// Mark all reachable objects, done concurrently.
@@ -68,7 +70,7 @@
// Called with mutators running.
virtual void ReclaimPhase() = 0;
- // Called after the GC is finished. Done with mutators upaused.
+ // Called after the GC is finished. Done without mutators paused.
virtual void FinishPhase() = 0;
Heap* heap_;
@@ -78,4 +80,4 @@
} // namespace art
-#endif // ART_SRC_GC_GARBAGE_COLLECTR_H_
+#endif // ART_SRC_GC_GARBAGE_COLLECTOR_H_
diff --git a/src/gc/gc_type.h b/src/gc/gc_type.h
new file mode 100644
index 0000000..908f038
--- /dev/null
+++ b/src/gc/gc_type.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_GC_TYPE_H_
+#define ART_SRC_GC_GC_TYPE_H_
+
+namespace art {
+
+// The ordering of the enum matters, it is used to determine which GCs are run first.
+enum GcType {
+ // No Gc
+ kGcTypeNone,
+ // Sticky mark bits "generational" GC.
+ kGcTypeSticky,
+ // Partial GC, over only the alloc space.
+ kGcTypePartial,
+ // Full GC
+ kGcTypeFull,
+ // Number of different Gc types.
+ kGcTypeMax,
+};
+std::ostream& operator<<(std::ostream& os, const GcType& policy);
+
+} // namespace art
+
+#endif // ART_SRC_GC_GC_TYPE_H_
diff --git a/src/gc/heap_bitmap-inl.h b/src/gc/heap_bitmap-inl.h
new file mode 100644
index 0000000..2811183
--- /dev/null
+++ b/src/gc/heap_bitmap-inl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_HEAP_BITMAP_INL_H_
+#define ART_SRC_GC_HEAP_BITMAP_INL_H_
+
+#include "heap_bitmap.h"
+
+namespace art {
+
+template <typename Visitor>
+inline void HeapBitmap::Visit(const Visitor& visitor) {
+ // TODO: C++0x auto
+ for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+ SpaceBitmap* bitmap = *it;
+ bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
+ }
+ large_objects_->Visit(visitor);
+}
+
+} // namespace art
+
+#endif // ART_SRC_GC_HEAP_BITMAP_INL_H_
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h
index 42c4166..87e0848 100644
--- a/src/gc/heap_bitmap.h
+++ b/src/gc/heap_bitmap.h
@@ -14,96 +14,91 @@
* limitations under the License.
*/
-#ifndef ART_SRC_HEAP_BITMAP_H_
-#define ART_SRC_HEAP_BITMAP_H_
+#ifndef ART_SRC_GC_HEAP_BITMAP_H_
+#define ART_SRC_GC_HEAP_BITMAP_H_
+#include "locks.h"
#include "space_bitmap.h"
namespace art {
- class Heap;
- class SpaceBitmap;
+class Heap;
- class HeapBitmap {
- public:
- bool Test(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- return bitmap->Test(obj);
- } else {
- return large_objects_->Test(obj);
+class HeapBitmap {
+ public:
+ bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ return bitmap->Test(obj);
+ } else {
+ return large_objects_->Test(obj);
+ }
+ }
+
+ void Clear(const mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Clear(obj);
+ } else {
+ large_objects_->Clear(obj);
+ }
+ }
+
+ void Set(const mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Set(obj);
+ } else {
+ large_objects_->Set(obj);
+ }
+ }
+
+ SpaceBitmap* GetSpaceBitmap(const mirror::Object* obj) {
+ // TODO: C++0x auto
+ for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+ if ((*it)->HasAddress(obj)) {
+ return *it;
}
}
+ return NULL;
+ }
- void Clear(const Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Clear(obj);
- } else {
- large_objects_->Clear(obj);
- }
- }
+ void Walk(SpaceBitmap::Callback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void Set(const Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- large_objects_->Set(obj);
- }
- }
+ template <typename Visitor>
+ void Visit(const Visitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- SpaceBitmap* GetSpaceBitmap(const Object* obj) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- if ((*it)->HasAddress(obj)) {
- return *it;
- }
- }
- return NULL;
- }
+ // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
+ void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void Walk(SpaceBitmap::Callback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ HeapBitmap(Heap* heap);
- template <typename Visitor>
- void Visit(const Visitor& visitor)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- SpaceBitmap* bitmap = *it;
- bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
- }
- large_objects_->Visit(visitor);
- }
+ inline SpaceSetMap* GetLargeObjects() const {
+ return large_objects_;
+ }
- // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
- void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SetLargeObjects(SpaceSetMap* large_objects);
- HeapBitmap(Heap* heap);
+ private:
- inline SpaceSetMap* GetLargeObjects() const {
- return large_objects_;
- }
+ const Heap* const heap_;
- void SetLargeObjects(SpaceSetMap* large_objects);
+ void AddSpaceBitmap(SpaceBitmap* bitmap);
- private:
+ typedef std::vector<SpaceBitmap*> Bitmaps;
+ Bitmaps bitmaps_;
- const Heap* const heap_;
+ // Large object sets.
+ SpaceSetMap* large_objects_;
- void AddSpaceBitmap(SpaceBitmap* bitmap);
+ friend class Heap;
+};
- typedef std::vector<SpaceBitmap*> Bitmaps;
- Bitmaps bitmaps_;
-
- // Large object sets.
- SpaceSetMap* large_objects_;
-
- friend class Heap;
- };
} // namespace art
-#endif // ART_SRC_HEAP_BITMAP_H_
+#endif // ART_SRC_GC_HEAP_BITMAP_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index 1b93e5d..69320fa 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -22,6 +22,7 @@
#include "image.h"
#include "os.h"
#include "space_bitmap.h"
+#include "thread.h"
#include "utils.h"
namespace art {
@@ -58,13 +59,13 @@
return new LargeObjectMapSpace(name);
}
-Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
if (mem_map == NULL) {
return NULL;
}
MutexLock mu(self, lock_);
- Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
size_t allocation_size = mem_map->Size();
@@ -75,7 +76,7 @@
return obj;
}
-size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
+size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
MutexLock mu(self, lock_);
MemMaps::iterator found = mem_maps_.find(ptr);
CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
@@ -88,14 +89,14 @@
return allocation_size;
}
-size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
+size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
MutexLock mu(Thread::Current(), lock_);
- MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
+ MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
return found->second->Size();
}
-size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
size_t total = 0;
for (size_t i = 0; i < num_ptrs; ++i) {
if (kDebugSpaces) {
@@ -115,9 +116,9 @@
}
}
-bool LargeObjectMapSpace::Contains(const Object* obj) const {
+bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
MutexLock mu(Thread::Current(), lock_);
- return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
+ return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
}
FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
@@ -191,7 +192,7 @@
}
}
-size_t FreeListSpace::Free(Thread* self, Object* obj) {
+size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
MutexLock mu(self, lock_);
CHECK(Contains(obj));
// Check adjacent chunks to see if we need to combine.
@@ -220,7 +221,7 @@
return allocation_size;
}
-bool FreeListSpace::Contains(const Object* obj) const {
+bool FreeListSpace::Contains(const mirror::Object* obj) const {
return mem_map_->HasAddress(obj);
}
@@ -228,13 +229,13 @@
return chunk + chunk->GetSize() / kAlignment;
}
-size_t FreeListSpace::AllocationSize(const Object* obj) {
- Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
+size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
+ Chunk* chunk = ChunkFromAddr(const_cast<mirror::Object*>(obj));
CHECK(!chunk->IsFree());
return chunk->GetSize();
}
-Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
num_bytes = RoundUp(num_bytes, kAlignment);
Chunk temp;
@@ -261,7 +262,7 @@
total_objects_allocated_++;
num_bytes_allocated_ += num_bytes;
total_bytes_allocated_ += num_bytes;
- return reinterpret_cast<Object*>(addr);
+ return reinterpret_cast<mirror::Object*>(addr);
}
void FreeListSpace::Dump(std::ostream& os) const{
diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h
index 979fce6..c34dbcc 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/large_object_space.h
@@ -18,8 +18,13 @@
#define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
#include "space.h"
+#include "safe_map.h"
+
+#include <set>
+#include <vector>
namespace art {
+class SpaceSetMap;
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
@@ -64,7 +69,7 @@
return total_objects_allocated_;
}
- size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
protected:
@@ -90,19 +95,19 @@
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- virtual size_t AllocationSize(const Object* obj);
- virtual Object* Alloc(Thread* self, size_t num_bytes);
- size_t Free(Thread* self, Object* ptr);
+ virtual size_t AllocationSize(const mirror::Object* obj);
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, mirror::Object* ptr);
virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
- virtual bool Contains(const Object* obj) const;
+ virtual bool Contains(const mirror::Object* obj) const;
private:
LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_;
- std::vector<Object*> large_objects_;
- typedef SafeMap<Object*, MemMap*> MemMaps;
+ std::vector<mirror::Object*> large_objects_;
+ typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
MemMaps mem_maps_;
};
@@ -111,10 +116,10 @@
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
- size_t AllocationSize(const Object* obj);
- Object* Alloc(Thread* self, size_t num_bytes);
- size_t Free(Thread* self, Object* obj);
- bool Contains(const Object* obj) const;
+ size_t AllocationSize(const mirror::Object* obj);
+ mirror::Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, mirror::Object* obj);
+ bool Contains(const mirror::Object* obj) const;
void Walk(DlMallocSpace::WalkCallback callback, void* arg);
// Address at which the space begins
diff --git a/src/gc/mark_sweep-inl.h b/src/gc/mark_sweep-inl.h
new file mode 100644
index 0000000..7265023
--- /dev/null
+++ b/src/gc/mark_sweep-inl.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MARK_SWEEP_INL_H_
+#define ART_SRC_GC_MARK_SWEEP_INL_H_
+
+#include "heap.h"
+#include "mirror/class.h"
+#include "mirror/field.h"
+#include "mirror/object_array.h"
+
+namespace art {
+
+template <typename MarkVisitor>
+inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
+ DCHECK(obj != NULL);
+ if (kIsDebugBuild && !IsMarked(obj)) {
+ heap_->DumpSpaces();
+ LOG(FATAL) << "Scanning unmarked object " << obj;
+ }
+ mirror::Class* klass = obj->GetClass();
+ DCHECK(klass != NULL);
+ if (klass == java_lang_Class_) {
+ DCHECK_EQ(klass->GetClass(), java_lang_Class_);
+ if (kCountScannedTypes) {
+ ++class_count_;
+ }
+ VisitClassReferences(klass, obj, visitor);
+ } else if (klass->IsArrayClass()) {
+ if (kCountScannedTypes) {
+ ++array_count_;
+ }
+ visitor(obj, klass, mirror::Object::ClassOffset(), false);
+ if (klass->IsObjectArrayClass()) {
+ VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+ }
+ } else {
+ if (kCountScannedTypes) {
+ ++other_count_;
+ }
+ VisitOtherReferences(klass, obj, visitor);
+ if (UNLIKELY(klass->IsReferenceClass())) {
+ DelayReferenceReferent(const_cast<mirror::Object*>(obj));
+ }
+ }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ Locks::mutator_lock_) {
+ DCHECK(obj != NULL);
+ DCHECK(obj->GetClass() != NULL);
+
+ mirror::Class* klass = obj->GetClass();
+ DCHECK(klass != NULL);
+ if (klass == mirror::Class::GetJavaLangClass()) {
+ DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
+ VisitClassReferences(klass, obj, visitor);
+ } else {
+ if (klass->IsArrayClass()) {
+ visitor(obj, klass, mirror::Object::ClassOffset(), false);
+ if (klass->IsObjectArrayClass()) {
+ VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+ }
+ } else {
+ VisitOtherReferences(klass, obj, visitor);
+ }
+ }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass,
+ const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ DCHECK(obj != NULL);
+ DCHECK(klass != NULL);
+ VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ VisitInstanceFieldsReferences(klass, obj, visitor);
+ VisitStaticFieldsReferences(obj->AsClass(), visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitStaticFieldsReferences(const mirror::Class* klass,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ DCHECK(klass != NULL);
+ VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets,
+ bool is_static, const Visitor& visitor) {
+ if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
+ // Found a reference offset bitmap. Mark the specified offsets.
+ while (ref_offsets != 0) {
+ size_t right_shift = CLZ(ref_offsets);
+ MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+ const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+ visitor(obj, ref, field_offset, is_static);
+ ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+ }
+ } else {
+ // There is no reference offset bitmap. In the non-static case,
+ // walk up the class inheritance hierarchy and find reference
+ // offsets the hard way. In the static case, just consider this
+ // class.
+ for (const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ klass != NULL;
+ klass = is_static ? NULL : klass->GetSuperClass()) {
+ size_t num_reference_fields = (is_static
+ ? klass->NumReferenceStaticFields()
+ : klass->NumReferenceInstanceFields());
+ for (size_t i = 0; i < num_reference_fields; ++i) {
+ mirror::Field* field = (is_static ? klass->GetStaticField(i)
+ : klass->GetInstanceField(i));
+ MemberOffset field_offset = field->GetOffset();
+ const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+ visitor(obj, ref, field_offset, is_static);
+ }
+ }
+ }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+ const Visitor& visitor) {
+ const int32_t length = array->GetLength();
+ for (int32_t i = 0; i < length; ++i) {
+ const mirror::Object* element = array->GetWithoutChecks(i);
+ const size_t width = sizeof(mirror::Object*);
+ MemberOffset offset = MemberOffset(i * width + mirror::Array::DataOffset(width).Int32Value());
+ visitor(array, element, offset, false);
+ }
+}
+
+} // namespace art
+
+#endif // ART_SRC_GC_MARK_SWEEP_INL_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 7c52c83..40102b2 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -25,22 +25,32 @@
#include "base/logging.h"
#include "base/macros.h"
#include "card_table.h"
-#include "class_loader.h"
-#include "dex_cache.h"
+#include "card_table-inl.h"
#include "heap.h"
#include "indirect_reference_table.h"
#include "intern_table.h"
#include "jni_internal.h"
#include "large_object_space.h"
#include "monitor.h"
-#include "object.h"
+#include "mark_sweep-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
#include "runtime.h"
#include "space.h"
+#include "space_bitmap-inl.h"
#include "timing_logger.h"
#include "thread.h"
#include "thread_list.h"
#include "verifier/method_verifier.h"
+using namespace art::mirror;
+
namespace art {
// Performance options.
@@ -186,7 +196,7 @@
timings_.AddSplit("ReMarkRoots");
// Scan dirty objects, this is only required if we are not doing concurrent GC.
- RecursiveMarkDirtyObjects();
+ RecursiveMarkDirtyObjects(CardTable::kCardDirty);
}
ProcessReferences(self);
@@ -700,7 +710,7 @@
Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
}
-void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) {
+void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
IndirectReferenceTable* table = &vm->weak_globals;
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 3581d98..0d43bee 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -14,28 +14,38 @@
* limitations under the License.
*/
-#ifndef ART_SRC_MARK_SWEEP_H_
-#define ART_SRC_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_MARK_SWEEP_H_
+#define ART_SRC_GC_MARK_SWEEP_H_
-#include "atomic_stack.h"
+#include "atomic_integer.h"
#include "base/macros.h"
+#include "base/mutex.h"
#include "garbage_collector.h"
-#include "heap_bitmap.h"
-#include "object.h"
+#include "gc_type.h"
#include "offsets.h"
+#include "root_visitor.h"
+#include "timing_logger.h"
+#include "UniquePtr.h"
namespace art {
-
+namespace mirror {
+class Class;
+class Object;
+template<class T> class ObjectArray;
+}
+template <typename T> class AtomicStack;
class Barrier;
class CheckObjectVisitor;
-class Class;
+class ContinuousSpace;
class Heap;
class MarkIfReachesAllocspaceVisitor;
class ModUnionClearCardVisitor;
class ModUnionVisitor;
class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
+typedef AtomicStack<mirror::Object*> ObjectStack;
+class SpaceBitmap;
+class StackVisitor;
+class Thread;
class MarkStackChunk;
class MarkSweep : public GarbageCollector {
@@ -79,7 +89,9 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Verify that image roots point to only marked objects within the alloc space.
- void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void VerifyImageRoots()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
@@ -88,8 +100,8 @@
// Make a space immune, immune spaces are assumed to have all live objects marked.
void ImmuneSpace(ContinuousSpace* space)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps based on the gc type.
virtual void BindBitmaps()
@@ -102,7 +114,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
- void RecursiveMarkDirtyObjects(byte minimum_age = CardTable::kCardDirty)
+ void RecursiveMarkDirtyObjects(byte minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -130,61 +142,31 @@
virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- Object* GetClearedReferences() {
+ mirror::Object* GetClearedReferences() {
return cleared_reference_list_;
}
// Proxy for external access to ScanObject.
- void ScanRoot(const Object* obj)
+ void ScanRoot(const mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Blackens an object.
- void ScanObject(const Object* obj)
+ void ScanObject(const mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // TODO: enable thread safety analysis when in use by multiple worker threads.
template <typename MarkVisitor>
- void ScanObjectVisit(const Object* obj, const MarkVisitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS {
- DCHECK(obj != NULL);
- if (kIsDebugBuild && !IsMarked(obj)) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Scanning unmarked object " << obj;
- }
- Class* klass = obj->GetClass();
- DCHECK(klass != NULL);
- if (klass == java_lang_Class_) {
- DCHECK_EQ(klass->GetClass(), java_lang_Class_);
- if (kCountScannedTypes) {
- ++class_count_;
- }
- VisitClassReferences(klass, obj, visitor);
- } else if (klass->IsArrayClass()) {
- if (kCountScannedTypes) {
- ++array_count_;
- }
- visitor(obj, klass, Object::ClassOffset(), false);
- if (klass->IsObjectArrayClass()) {
- VisitObjectArrayReferences(obj->AsObjectArray<Object>(), visitor);
- }
- } else {
- if (kCountScannedTypes) {
- ++other_count_;
- }
- VisitOtherReferences(klass, obj, visitor);
- if (UNLIKELY(klass->IsReferenceClass())) {
- DelayReferenceReferent(const_cast<Object*>(obj));
- }
- }
- }
+ void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
- void SetFinger(Object* new_finger) {
+ void SetFinger(mirror::Object* new_finger) {
finger_ = new_finger;
}
void DisableFinger() {
- SetFinger(reinterpret_cast<Object*>(~static_cast<uintptr_t>(0)));
+ SetFinger(reinterpret_cast<mirror::Object*>(~static_cast<uintptr_t>(0)));
}
size_t GetFreedBytes() const {
@@ -212,7 +194,7 @@
}
// Everything inside the immune range is assumed to be marked.
- void SetImmuneRange(Object* begin, Object* end);
+ void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -221,52 +203,33 @@
void SweepSystemWeaksArray(ObjectStack* allocations)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool VerifyIsLiveCallback(const Object* obj, void* arg)
+ static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void VerifySystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
- void VerifyIsLive(const Object* obj)
+ void VerifyIsLive(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
template <typename Visitor>
- static void VisitObjectReferences(const Object* obj, const Visitor& visitor)
+ static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- DCHECK(obj->GetClass() != NULL);
+ Locks::mutator_lock_);
- Class* klass = obj->GetClass();
- DCHECK(klass != NULL);
- if (klass == Class::GetJavaLangClass()) {
- DCHECK_EQ(klass->GetClass(), Class::GetJavaLangClass());
- VisitClassReferences(klass, obj, visitor);
- } else {
- if (klass->IsArrayClass()) {
- visitor(obj, klass, Object::ClassOffset(), false);
- if (klass->IsObjectArrayClass()) {
- VisitObjectArrayReferences(obj->AsObjectArray<Object>(), visitor);
- }
- } else {
- VisitOtherReferences(klass, obj, visitor);
- }
- }
- }
-
- static void MarkObjectCallback(const Object* root, void* arg)
+ static void MarkObjectCallback(const mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void MarkRootParallelCallback(const Object* root, void* arg);
+ static void MarkRootParallelCallback(const mirror::Object* root, void* arg);
// Marks an object.
- void MarkObject(const Object* obj)
+ void MarkObject(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void MarkRoot(const Object* obj)
+ void MarkRoot(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -277,45 +240,46 @@
protected:
// Returns true if the object has its bit set in the mark bitmap.
- bool IsMarked(const Object* object) const;
+ bool IsMarked(const mirror::Object* object) const;
- static bool IsMarkedCallback(const Object* object, void* arg)
+ static bool IsMarkedCallback(const mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool IsMarkedArrayCallback(const Object* object, void* arg)
+ static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ReMarkObjectVisitor(const Object* root, void* arg)
+ static void ReMarkObjectVisitor(const mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void VerifyImageRootVisitor(Object* root, void* arg)
+ static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
- void MarkObjectNonNull(const Object* obj, bool check_finger)
+ void MarkObjectNonNull(const mirror::Object* obj, bool check_finger)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void MarkObjectNonNullParallel(const Object* obj, bool check_finger);
+ void MarkObjectNonNullParallel(const mirror::Object* obj, bool check_finger);
- bool MarkLargeObject(const Object* obj)
+ bool MarkLargeObject(const mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Returns true if we need to add obj to a mark stack.
- bool MarkObjectParallel(const Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
- static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Special sweep for zygote that just marks objects / dirties cards.
- static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+ static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static)
+ void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset,
+ bool is_static)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void CheckObject(const Object* obj)
+ void CheckObject(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Verify the roots of the heap and print out information related to any invalid roots.
@@ -326,90 +290,41 @@
// Expand mark stack to 2x its current size. Thread safe.
void ExpandMarkStack();
- static void VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+ static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor *visitor);
- void VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor)
+ void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
NO_THREAD_SAFETY_ANALYSIS;
template <typename Visitor>
- static void VisitInstanceFieldsReferences(const Class* klass, const Object* obj,
+ static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- DCHECK(klass != NULL);
- VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
- }
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visit the header, static field references, and interface pointers of a class object.
template <typename Visitor>
- static void VisitClassReferences(const Class* klass, const Object* obj,
+ static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- VisitInstanceFieldsReferences(klass, obj, visitor);
- VisitStaticFieldsReferences(obj->AsClass(), visitor);
- }
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
template <typename Visitor>
- static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(klass != NULL);
- VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
- }
+ static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
template <typename Visitor>
- static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
- // Found a reference offset bitmap. Mark the specified offsets.
- while (ref_offsets != 0) {
- size_t right_shift = CLZ(ref_offsets);
- MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
- visitor(obj, ref, field_offset, is_static);
- ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
- }
- } else {
- // There is no reference offset bitmap. In the non-static case,
- // walk up the class inheritance hierarchy and find reference
- // offsets the hard way. In the static case, just consider this
- // class.
- for (const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
- klass != NULL;
- klass = is_static ? NULL : klass->GetSuperClass()) {
- size_t num_reference_fields = (is_static
- ? klass->NumReferenceStaticFields()
- : klass->NumReferenceInstanceFields());
- for (size_t i = 0; i < num_reference_fields; ++i) {
- Field* field = (is_static
- ? klass->GetStaticField(i)
- : klass->GetInstanceField(i));
- MemberOffset field_offset = field->GetOffset();
- const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
- visitor(obj, ref, field_offset, is_static);
- }
- }
- }
- }
+ static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visit all of the references in an object array.
template <typename Visitor>
- static void VisitObjectArrayReferences(const ObjectArray<Object>* array,
+ static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- const int32_t length = array->GetLength();
- for (int32_t i = 0; i < length; ++i) {
- const Object* element = array->GetWithoutChecks(i);
- const size_t width = sizeof(Object*);
- MemberOffset offset = MemberOffset(i * width + Array::DataOffset(width).Int32Value());
- visitor(array, element, offset, false);
- }
- }
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visits the header and field references of a data object.
template <typename Visitor>
- static void VisitOtherReferences(const Class* klass, const Object* obj,
+ static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
return VisitInstanceFieldsReferences(klass, obj, visitor);
@@ -421,7 +336,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(Object* reference)
+ void DelayReferenceReferent(mirror::Object* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
@@ -433,25 +348,25 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueueFinalizerReferences(Object** ref)
+ void EnqueueFinalizerReferences(mirror::Object** ref)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PreserveSomeSoftReferences(Object** ref)
+ void PreserveSomeSoftReferences(mirror::Object** ref)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ClearWhiteReferences(Object** list)
+ void ClearWhiteReferences(mirror::Object** list)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void ProcessReferences(Object** soft_references, bool clear_soft_references,
- Object** weak_references,
- Object** finalizer_references,
- Object** phantom_references)
+ void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+ mirror::Object** weak_references,
+ mirror::Object** finalizer_references,
+ mirror::Object** phantom_references)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg)
+ void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Whether or not we count how many of each type of object were scanned.
@@ -461,21 +376,21 @@
SpaceBitmap* current_mark_bitmap_;
// Cache java.lang.Class for optimization.
- Class* java_lang_Class_;
+ mirror::Class* java_lang_Class_;
ObjectStack* mark_stack_;
- Object* finger_;
+ mirror::Object* finger_;
// Immune range, every object inside the immune range is assumed to be marked.
- Object* immune_begin_;
- Object* immune_end_;
+ mirror::Object* immune_begin_;
+ mirror::Object* immune_end_;
- Object* soft_reference_list_;
- Object* weak_reference_list_;
- Object* finalizer_reference_list_;
- Object* phantom_reference_list_;
- Object* cleared_reference_list_;
+ mirror::Object* soft_reference_list_;
+ mirror::Object* weak_reference_list_;
+ mirror::Object* finalizer_reference_list_;
+ mirror::Object* phantom_reference_list_;
+ mirror::Object* cleared_reference_list_;
AtomicInteger freed_bytes_;
AtomicInteger freed_objects_;
@@ -529,4 +444,4 @@
} // namespace art
-#endif // ART_SRC_MARK_SWEEP_H_
+#endif // ART_SRC_GC_MARK_SWEEP_H_
diff --git a/src/gc/mod_union_table-inl.h b/src/gc/mod_union_table-inl.h
new file mode 100644
index 0000000..c1c69fb
--- /dev/null
+++ b/src/gc/mod_union_table-inl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+
+#include "mod_union_table.h"
+
+namespace art {
+
+template <typename Implementation>
+class ModUnionTableToZygoteAllocspace : public Implementation {
+public:
+ ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
+ }
+
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->Contains(ref)) {
+ return (*it)->IsAllocSpace();
+ }
+ }
+ // Assume it points to a large object.
+ // TODO: Check.
+ return true;
+ }
+};
+
+template <typename Implementation>
+class ModUnionTableToAllocspace : public Implementation {
+public:
+ ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
+ }
+
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->Contains(ref)) {
+ return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
+ }
+ }
+ // Assume it points to a large object.
+ // TODO: Check.
+ return true;
+ }
+};
+
+} // namespace art
+
+#endif // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
index 8953c5a..da950bb 100644
--- a/src/gc/mod_union_table.cc
+++ b/src/gc/mod_union_table.cc
@@ -17,12 +17,22 @@
#include "mod_union_table.h"
#include "base/stl_util.h"
+#include "card_table-inl.h"
#include "heap.h"
#include "heap_bitmap.h"
#include "mark_sweep.h"
+#include "mark_sweep-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object_array-inl.h"
#include "space.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
#include "UniquePtr.h"
+using namespace art::mirror;
+
namespace art {
class MarkIfReachesAllocspaceVisitor {
@@ -260,7 +270,7 @@
// TODO: Fixme when anotatalysis works with visitors.
void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Heap* heap = mod_union_table_->GetHeap();
if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
references_.find(ref) == references_.end()) {
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
index f3da41c..23c0a51 100644
--- a/src/gc/mod_union_table.h
+++ b/src/gc/mod_union_table.h
@@ -14,23 +14,30 @@
* limitations under the License.
*/
-#ifndef ART_SRC_MOD_UNION_TABLE_H_
-#define ART_SRC_MOD_UNION_TABLE_H_
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_H_
-#include "heap.h"
+#include "globals.h"
#include "safe_map.h"
-#include "space.h"
+
+#include <set>
+#include <vector>
namespace art {
-
+namespace mirror {
+class Object;
+}
+class ContinuousSpace;
class Heap;
class HeapBitmap;
+class MarkSweep;
class Space;
+class SpaceBitmap;
// Base class
class ModUnionTable {
public:
- typedef std::vector<const Object*> ReferenceArray;
+ typedef std::vector<const mirror::Object*> ReferenceArray;
typedef std::set<byte*> ClearedCards;
ModUnionTable(Heap* heap) : heap_(heap) {
@@ -118,7 +125,7 @@
void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
- virtual bool AddReference(const Object* obj, const Object* ref) = 0;
+ virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
protected:
// Cleared card array, used to update the mod-union table.
@@ -155,44 +162,6 @@
ClearedCards cleared_cards_;
};
-template <typename Implementation>
-class ModUnionTableToZygoteAllocspace : public Implementation {
-public:
- ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
- }
-
- bool AddReference(const Object* /* obj */, const Object* ref) {
- const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->Contains(ref)) {
- return (*it)->IsAllocSpace();
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
-template <typename Implementation>
-class ModUnionTableToAllocspace : public Implementation {
-public:
- ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
- }
-
- bool AddReference(const Object* /* obj */, const Object* ref) {
- const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->Contains(ref)) {
- return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
} // namespace art
-#endif // ART_SRC_MOD_UNION_TABLE_H_
+#endif // ART_SRC_GC_MOD_UNION_TABLE_H_
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/partial_mark_sweep.cc
index 64f09ff..f9c1787 100644
--- a/src/gc/partial_mark_sweep.cc
+++ b/src/gc/partial_mark_sweep.cc
@@ -14,32 +14,38 @@
* limitations under the License.
*/
+#include "partial_mark_sweep.h"
+
+#include "heap.h"
#include "large_object_space.h"
#include "partial_mark_sweep.h"
#include "space.h"
+#include "thread.h"
namespace art {
- PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
- : MarkSweep(heap, is_concurrent) {
- cumulative_timings_.SetName(GetName());
- }
- PartialMarkSweep::~PartialMarkSweep() {
+PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
+ : MarkSweep(heap, is_concurrent) {
+ cumulative_timings_.SetName(GetName());
+}
- }
+PartialMarkSweep::~PartialMarkSweep() {
- void PartialMarkSweep::BindBitmaps() {
- MarkSweep::BindBitmaps();
+}
- Spaces& spaces = GetHeap()->GetSpaces();
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
- // zygote space are viewed as marked.
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
- ImmuneSpace(space);
- }
+void PartialMarkSweep::BindBitmaps() {
+ MarkSweep::BindBitmaps();
+
+ Spaces& spaces = GetHeap()->GetSpaces();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
+ // zygote space are viewed as marked.
+ for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ ImmuneSpace(space);
}
}
+}
+
} // namespace art
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
index 80a1563..64c0bcd 100644
--- a/src/gc/partial_mark_sweep.h
+++ b/src/gc/partial_mark_sweep.h
@@ -14,27 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_SRC_PARTIAL_MARK_SWEEP_H_
-#define ART_SRC_PARTIAL_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
#include "locks.h"
#include "mark_sweep.h"
-#include "utils.h"
namespace art {
-class Barrier;
-class CheckObjectVisitor;
-class Class;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
-class MarkStackChunk;
-
class PartialMarkSweep : public MarkSweep {
public:
virtual GcType GetGcType() const {
@@ -53,4 +40,4 @@
} // namespace art
-#endif // ART_SRC_PARTIAL_MARK_SWEEP_H_
+#endif // ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/space.cc b/src/gc/space.cc
index 04f932d..9db84f2 100644
--- a/src/gc/space.cc
+++ b/src/gc/space.cc
@@ -19,10 +19,16 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
+#include "card_table.h"
#include "dlmalloc.h"
#include "image.h"
+#include "mirror/array.h"
+#include "mirror/abstract_method.h"
#include "os.h"
+#include "runtime.h"
#include "space_bitmap.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
#include "UniquePtr.h"
#include "utils.h"
@@ -204,12 +210,12 @@
mark_bitmap_->SetName(temp_name);
}
-Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
+mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
if (kDebugSpaces) {
num_bytes += sizeof(word);
}
- Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
if (kDebugSpaces && result != NULL) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
@@ -225,18 +231,18 @@
return result;
}
-Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
return AllocWithoutGrowthLocked(num_bytes);
}
-Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
+mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
// Grow as much as possible within the mspace.
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
- Object* result = AllocWithoutGrowthLocked(num_bytes);
+ mirror::Object* result = AllocWithoutGrowthLocked(num_bytes);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
@@ -301,7 +307,7 @@
return alloc_space;
}
-size_t DlMallocSpace::Free(Thread* self, Object* ptr) {
+size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
MutexLock mu(self, lock_);
if (kDebugSpaces) {
CHECK(ptr != NULL);
@@ -317,13 +323,13 @@
return bytes_freed;
}
-size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
DCHECK(ptrs != NULL);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
- Object* ptr = ptrs[i];
+ mirror::Object* ptr = ptrs[i];
const size_t look_ahead = 8;
if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
// The head of chunk for the allocation is sizeof(size_t) behind the allocation.
@@ -397,12 +403,12 @@
}
// Virtual functions can't get inlined.
-inline size_t DlMallocSpace::InternalAllocationSize(const Object* obj) {
+inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
kChunkOverhead;
}
-size_t DlMallocSpace::AllocationSize(const Object* obj) {
+size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
return InternalAllocationSize(obj);
}
@@ -504,29 +510,29 @@
DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
Runtime* runtime = Runtime::Current();
- Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
- runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
+ mirror::Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
+ runtime->SetJniDlsymLookupStub(down_cast<mirror::ByteArray*>(jni_stub_array));
- Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
- runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
+ mirror::Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
+ runtime->SetAbstractMethodErrorStubArray(down_cast<mirror::ByteArray*>(ame_stub_array));
- Object* resolution_stub_array =
+ mirror::Object* resolution_stub_array =
image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
runtime->SetResolutionStubArray(
- down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
+ down_cast<mirror::ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
runtime->SetResolutionStubArray(
- down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
+ down_cast<mirror::ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
- Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
- runtime->SetResolutionMethod(down_cast<AbstractMethod*>(resolution_method));
+ mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+ runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
- Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+ mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
ImageSpace* space = new ImageSpace(image_file_name, map.release());
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -548,7 +554,7 @@
byte* end = End();
while (current < end) {
DCHECK_ALIGNED(current, kObjectAlignment);
- const Object* obj = reinterpret_cast<const Object*>(current);
+ const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
live_bitmap->Set(obj);
current += RoundUp(obj->SizeOf(), kObjectAlignment);
}
diff --git a/src/gc/space.h b/src/gc/space.h
index 2ed4988..d2bcd53 100644
--- a/src/gc/space.h
+++ b/src/gc/space.h
@@ -31,10 +31,12 @@
static const bool kDebugSpaces = kIsDebugBuild;
+namespace mirror {
+class Object;
+} // namespace mirror
class DlMallocSpace;
class ImageSpace;
class LargeObjectSpace;
-class Object;
class SpaceBitmap;
enum GcRetentionPolicy {
@@ -57,7 +59,7 @@
public:
virtual bool CanAllocateInto() const = 0;
virtual bool IsCompactible() const = 0;
- virtual bool Contains(const Object* obj) const = 0;
+ virtual bool Contains(const mirror::Object* obj) const = 0;
virtual SpaceType GetType() const = 0;
virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0;
virtual std::string GetName() const = 0;
@@ -108,16 +110,16 @@
virtual uint64_t GetTotalObjectsAllocated() const = 0;
// Allocate num_bytes without allowing growth.
- virtual Object* Alloc(Thread* self, size_t num_bytes) = 0;
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
// Return the storage space required by obj.
- virtual size_t AllocationSize(const Object* obj) = 0;
+ virtual size_t AllocationSize(const mirror::Object* obj) = 0;
// Returns how many bytes were freed.
- virtual size_t Free(Thread* self, Object* ptr) = 0;
+ virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
// Returns how many bytes were freed.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs) = 0;
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
protected:
AllocSpace() {}
@@ -149,12 +151,12 @@
virtual SpaceBitmap* GetMarkBitmap() const = 0;
// Is object within this space?
- bool HasAddress(const Object* obj) const {
+ bool HasAddress(const mirror::Object* obj) const {
const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
return Begin() <= byte_ptr && byte_ptr < End();
}
- virtual bool Contains(const Object* obj) const {
+ virtual bool Contains(const mirror::Object* obj) const {
return HasAddress(obj);
}
@@ -188,7 +190,7 @@
class DiscontinuousSpace : public virtual Space {
public:
// Is object within this space?
- virtual bool Contains(const Object* obj) const = 0;
+ virtual bool Contains(const mirror::Object* obj) const = 0;
virtual std::string GetName() const {
return name_;
@@ -267,15 +269,15 @@
size_t capacity, byte* requested_begin);
// Allocate num_bytes without allowing the underlying mspace to grow.
- virtual Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+ virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
// Allocate num_bytes allowing the underlying mspace to grow.
- virtual Object* Alloc(Thread* self, size_t num_bytes);
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
// Return the storage space required by obj.
- virtual size_t AllocationSize(const Object* obj);
- virtual size_t Free(Thread* self, Object* ptr);
- virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+ virtual size_t AllocationSize(const mirror::Object* obj);
+ virtual size_t Free(Thread* self, mirror::Object* ptr);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
void* MoreCore(intptr_t increment);
@@ -353,8 +355,8 @@
}
private:
- size_t InternalAllocationSize(const Object* obj);
- Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ size_t InternalAllocationSize(const mirror::Object* obj);
+ mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
UniquePtr<SpaceBitmap> live_bitmap_;
UniquePtr<SpaceBitmap> mark_bitmap_;
diff --git a/src/gc/space_bitmap-inl.h b/src/gc/space_bitmap-inl.h
new file mode 100644
index 0000000..e1fdd29
--- /dev/null
+++ b/src/gc/space_bitmap-inl.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_BITMAP_INL_H_
+#define ART_SRC_GC_SPACE_BITMAP_INL_H_
+
+#include "base/logging.h"
+#include "cutils/atomic.h"
+
+namespace art {
+
+inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+ DCHECK_GE(addr, heap_begin_);
+ const uintptr_t offset = addr - heap_begin_;
+ const size_t index = OffsetToIndex(offset);
+ const word mask = OffsetToMask(offset);
+ word* const address = &bitmap_begin_[index];
+ DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
+ word old_word;
+ do {
+ old_word = *address;
+ // Fast path: The bit is already set.
+ if ((old_word & mask) != 0) {
+ return true;
+ }
+ } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0));
+ return false;
+}
+
+inline bool SpaceBitmap::Test(const mirror::Object* obj) const {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+ DCHECK(HasAddress(obj)) << obj;
+ DCHECK(bitmap_begin_ != NULL);
+ DCHECK_GE(addr, heap_begin_);
+ const uintptr_t offset = addr - heap_begin_;
+ return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
+}
+
+template <typename Visitor, typename FingerVisitor>
+void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
+ const Visitor& visitor,
+ const FingerVisitor& finger_visitor) const {
+ DCHECK_LT(visit_begin, visit_end);
+
+ const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1).
+ const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
+ const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+
+ size_t word_start = bit_index_start / kBitsPerWord;
+ size_t word_end = bit_index_end / kBitsPerWord;
+ DCHECK_LT(word_end * kWordSize, Size());
+
+ // Trim off left_bits of left bits.
+ size_t edge_word = bitmap_begin_[word_start];
+
+ // Handle bits on the left first as a special case
+ size_t left_bits = bit_index_start & (kBitsPerWord - 1);
+ if (left_bits != 0) {
+ edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
+ }
+
+ // If word_start == word_end then handle this case at the same place we handle the right edge.
+ if (edge_word != 0 && word_start < word_end) {
+ uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
+ finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+ do {
+ const size_t shift = CLZ(edge_word);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ } while (edge_word != 0);
+ }
+ word_start++;
+
+ for (size_t i = word_start; i < word_end; i++) {
+ size_t w = bitmap_begin_[i];
+ if (w != 0) {
+ uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+ do {
+ const size_t shift = CLZ(w);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ } while (w != 0);
+ }
+ }
+
+ // Handle the right edge, and also the left edge if both edges are on the same word.
+ size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+
+ // If word_start == word_end then we need to use the word which we removed the left bits.
+ if (word_start <= word_end) {
+ edge_word = bitmap_begin_[word_end];
+ }
+
+ // Bits that we trim off the right.
+ edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
+ uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
+ finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+ while (edge_word != 0) {
+ const size_t shift = CLZ(edge_word);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ }
+}
+
+inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+ DCHECK_GE(addr, heap_begin_);
+ const uintptr_t offset = addr - heap_begin_;
+ const size_t index = OffsetToIndex(offset);
+ const word mask = OffsetToMask(offset);
+ DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
+ word* address = &bitmap_begin_[index];
+ word old_word = *address;
+ if (do_set) {
+ *address = old_word | mask;
+ } else {
+ *address = old_word & ~mask;
+ }
+ return (old_word & mask) != 0;
+}
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_BITMAP_INL_H_
diff --git a/src/gc/space_bitmap.cc b/src/gc/space_bitmap.cc
index 25fa672..d90c090 100644
--- a/src/gc/space_bitmap.cc
+++ b/src/gc/space_bitmap.cc
@@ -17,6 +17,11 @@
#include "heap_bitmap.h"
#include "base/logging.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "space_bitmap-inl.h"
#include "UniquePtr.h"
#include "utils.h"
@@ -32,7 +37,7 @@
void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) {
for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
- callback(const_cast<Object*>(*it), arg);
+ callback(const_cast<mirror::Object*>(*it), arg);
}
}
@@ -98,7 +103,7 @@
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
const size_t shift = CLZ(w);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
} while (w != 0);
@@ -127,10 +132,10 @@
return;
}
- // TODO: rewrite the callbacks to accept a std::vector<Object*> rather than a Object**?
+ // TODO: rewrite the callbacks to accept a std::vector<mirror::Object*> rather than a mirror::Object**?
const size_t buffer_size = kWordSize * kBitsPerWord;
- Object* pointer_buf[buffer_size];
- Object** pb = &pointer_buf[0];
+ mirror::Object* pointer_buf[buffer_size];
+ mirror::Object** pb = &pointer_buf[0];
size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
CHECK_LT(end, live_bitmap.Size() / kWordSize);
@@ -143,7 +148,7 @@
do {
const size_t shift = CLZ(garbage);
garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- *pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+ *pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
} while (garbage != 0);
// Make sure that there are always enough slots available for an
// entire word of one bits.
@@ -161,32 +166,32 @@
} // namespace art
// Support needed for in order traversal
-#include "object.h"
+#include "mirror/object.h"
#include "object_utils.h"
namespace art {
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
void* arg);
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
-static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
- Class* klass, void* arg)
+static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
+ mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
- Class* super = klass->GetSuperClass();
+ mirror::Class* super = klass->GetSuperClass();
if (super != NULL) {
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
- ObjectArray<Field>* fields = klass->GetIFields();
+ mirror::ObjectArray<mirror::Field>* fields = klass->GetIFields();
if (fields != NULL) {
for (int32_t i = 0; i < fields->GetLength(); i++) {
- Field* field = fields->Get(i);
+ mirror::Field* field = fields->Get(i);
FieldHelper fh(field);
if (!fh.IsPrimitiveType()) {
- Object* value = field->GetObj(obj);
+ mirror::Object* value = field->GetObj(obj);
if (value != NULL) {
WalkFieldsInOrder(visited, callback, value, arg);
}
@@ -196,7 +201,7 @@
}
// For an unvisited object, visit it then all its children found via fields.
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (visited->Test(obj)) {
@@ -206,17 +211,17 @@
(*callback)(obj, arg);
visited->Set(obj);
// Walk instance fields of all objects
- Class* klass = obj->GetClass();
+ mirror::Class* klass = obj->GetClass();
WalkInstanceFields(visited, callback, obj, klass, arg);
// Walk static fields of a Class
if (obj->IsClass()) {
- ObjectArray<Field>* fields = klass->GetSFields();
+ mirror::ObjectArray<mirror::Field>* fields = klass->GetSFields();
if (fields != NULL) {
for (int32_t i = 0; i < fields->GetLength(); i++) {
- Field* field = fields->Get(i);
+ mirror::Field* field = fields->Get(i);
FieldHelper fh(field);
if (!fh.IsPrimitiveType()) {
- Object* value = field->GetObj(NULL);
+ mirror::Object* value = field->GetObj(NULL);
if (value != NULL) {
WalkFieldsInOrder(visited, callback, value, arg);
}
@@ -225,10 +230,10 @@
}
} else if (obj->IsObjectArray()) {
// Walk elements of an object array
- ObjectArray<Object>* obj_array = obj->AsObjectArray<Object>();
+ mirror::ObjectArray<mirror::Object>* obj_array = obj->AsObjectArray<mirror::Object>();
int32_t length = obj_array->GetLength();
for (int32_t i = 0; i < length; i++) {
- Object* value = obj_array->Get(i);
+ mirror::Object* value = obj_array->Get(i);
if (value != NULL) {
WalkFieldsInOrder(visited, callback, value, arg);
}
@@ -251,7 +256,7 @@
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
const size_t shift = CLZ(w);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
diff --git a/src/gc/space_bitmap.h b/src/gc/space_bitmap.h
index dd2f47d..6bc06d6 100644
--- a/src/gc/space_bitmap.h
+++ b/src/gc/space_bitmap.h
@@ -14,35 +14,33 @@
* limitations under the License.
*/
-#ifndef ART_SRC_SPACE_BITMAP_H_
-#define ART_SRC_SPACE_BITMAP_H_
+#ifndef ART_SRC_GC_SPACE_BITMAP_H_
+#define ART_SRC_GC_SPACE_BITMAP_H_
+
+#include "locks.h"
+#include "globals.h"
+#include "mem_map.h"
+#include "UniquePtr.h"
#include <limits.h>
#include <set>
#include <stdint.h>
#include <vector>
-#include "base/logging.h"
-#include "cutils/atomic.h"
-#include "cutils/atomic-inline.h"
-#include "UniquePtr.h"
-#include "globals.h"
-#include "mem_map.h"
-#include "utils.h"
-
namespace art {
-
+namespace mirror {
class Object;
+} // namespace mirror
class SpaceBitmap {
public:
static const size_t kAlignment = 8;
- typedef void Callback(Object* obj, void* arg);
+ typedef void Callback(mirror::Object* obj, void* arg);
- typedef void ScanCallback(Object* obj, void* finger, void* arg);
+ typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
- typedef void SweepCallback(size_t ptr_count, Object** ptrs, void* arg);
+ typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
// Initialize a HeapBitmap so that it points to a bitmap large enough to cover a heap at
// heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
@@ -66,44 +64,20 @@
return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
}
- inline bool Set(const Object* obj) {
+ inline bool Set(const mirror::Object* obj) {
return Modify(obj, true);
}
- inline bool Clear(const Object* obj) {
+ inline bool Clear(const mirror::Object* obj) {
return Modify(obj, false);
}
// Returns true if the object was previously marked.
- inline bool AtomicTestAndSet(const Object* obj) {
- uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
- DCHECK_GE(addr, heap_begin_);
- const uintptr_t offset = addr - heap_begin_;
- const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
- word* const address = &bitmap_begin_[index];
- DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word old_word;
- do {
- old_word = *address;
- // Fast path: The bit is already set.
- if ((old_word & mask) != 0) {
- return true;
- }
- } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0));
- return false;
- }
+ bool AtomicTestAndSet(const mirror::Object* obj);
void Clear();
- inline bool Test(const Object* obj) const {
- uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
- DCHECK(HasAddress(obj)) << obj;
- DCHECK(bitmap_begin_ != NULL);
- DCHECK_GE(addr, heap_begin_);
- const uintptr_t offset = addr - heap_begin_;
- return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
- }
+ bool Test(const mirror::Object* obj) const;
// Return true iff <obj> is within the range of pointers that this bitmap could potentially cover,
// even if a bit has not been set for it.
@@ -123,7 +97,7 @@
: bitmap_(bitmap) {
}
- void operator ()(Object* obj) const {
+ void operator ()(mirror::Object* obj) const {
bitmap_->Clear(obj);
}
private:
@@ -133,86 +107,21 @@
template <typename Visitor>
void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
for (; visit_begin < visit_end; visit_begin += kAlignment ) {
- visitor(reinterpret_cast<Object*>(visit_begin));
+ visitor(reinterpret_cast<mirror::Object*>(visit_begin));
}
}
template <typename Visitor, typename FingerVisitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
const Visitor& visitor, const FingerVisitor& finger_visitor) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- DCHECK_LT(visit_begin, visit_end);
-
- const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1).
- const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
- const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
-
- size_t word_start = bit_index_start / kBitsPerWord;
- size_t word_end = bit_index_end / kBitsPerWord;
- DCHECK_LT(word_end * kWordSize, Size());
-
- // Trim off left_bits of left bits.
- size_t edge_word = bitmap_begin_[word_start];
-
- // Handle bits on the left first as a special case
- size_t left_bits = bit_index_start & (kBitsPerWord - 1);
- if (left_bits != 0) {
- edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
- }
-
- // If word_start == word_end then handle this case at the same place we handle the right edge.
- if (edge_word != 0 && word_start < word_end) {
- uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
- finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
- do {
- const size_t shift = CLZ(edge_word);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (edge_word != 0);
- }
- word_start++;
-
- for (size_t i = word_start; i < word_end; i++) {
- size_t w = bitmap_begin_[i];
- if (w != 0) {
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
- finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
- do {
- const size_t shift = CLZ(w);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (w != 0);
- }
- }
-
- // Handle the right edge, and also the left edge if both edges are on the same word.
- size_t right_bits = bit_index_end & (kBitsPerWord - 1);
-
- // If word_start == word_end then we need to use the word which we removed the left bits.
- if (word_start <= word_end) {
- edge_word = bitmap_begin_[word_end];
- }
-
- // Bits that we trim off the right.
- edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
- uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
- finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
- while (edge_word != 0) {
- const size_t shift = CLZ(edge_word);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- }
- }
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Walk(Callback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void InOrderWalk(Callback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
static void SweepWalk(const SpaceBitmap& live,
const SpaceBitmap& mark,
@@ -251,7 +160,7 @@
std::string GetName() const;
void SetName(const std::string& name);
- const void* GetObjectWordAddress(const Object* obj) const {
+ const void* GetObjectWordAddress(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
@@ -265,22 +174,7 @@
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
name_(name) {}
- inline bool Modify(const Object* obj, bool do_set) {
- uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
- DCHECK_GE(addr, heap_begin_);
- const uintptr_t offset = addr - heap_begin_;
- const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
- DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word* address = &bitmap_begin_[index];
- word old_word = *address;
- if (do_set) {
- *address = old_word | mask;
- } else {
- *address = old_word & ~mask;
- }
- return (old_word & mask) != 0;
- }
+ bool Modify(const mirror::Object* obj, bool do_set);
// Backing storage for bitmap.
UniquePtr<MemMap> mem_map_;
@@ -302,17 +196,17 @@
// Like a bitmap except it keeps track of objects using sets.
class SpaceSetMap {
public:
- typedef std::set<const Object*> Objects;
+ typedef std::set<const mirror::Object*> Objects;
bool IsEmpty() const {
return contained_.empty();
}
- inline void Set(const Object* obj) {
+ inline void Set(const mirror::Object* obj) {
contained_.insert(obj);
}
- inline void Clear(const Object* obj) {
+ inline void Clear(const mirror::Object* obj) {
Objects::iterator found = contained_.find(obj);
if (found != contained_.end()) {
contained_.erase(found);
@@ -323,7 +217,7 @@
contained_.clear();
}
- inline bool Test(const Object* obj) const {
+ inline bool Test(const mirror::Object* obj) const {
return contained_.find(obj) != contained_.end();
}
@@ -357,4 +251,4 @@
} // namespace art
-#endif // ART_SRC_SPACE_BITMAP_H_
+#endif // ART_SRC_GC_SPACE_BITMAP_H_
diff --git a/src/gc/space_bitmap_test.cc b/src/gc/space_bitmap_test.cc
index a2f1afc..5a829e4 100644
--- a/src/gc/space_bitmap_test.cc
+++ b/src/gc/space_bitmap_test.cc
@@ -19,6 +19,7 @@
#include "common_test.h"
#include "dlmalloc.h"
#include "globals.h"
+#include "space_bitmap-inl.h"
#include "UniquePtr.h"
#include <stdint.h>
@@ -39,20 +40,20 @@
class BitmapVerify {
public:
- BitmapVerify(SpaceBitmap* bitmap, const Object* begin, const Object* end)
+ BitmapVerify(SpaceBitmap* bitmap, const mirror::Object* begin, const mirror::Object* end)
: bitmap_(bitmap),
begin_(begin),
end_(end) {}
- void operator ()(const Object* obj) {
+ void operator ()(const mirror::Object* obj) {
EXPECT_TRUE(obj >= begin_);
EXPECT_TRUE(obj <= end_);
EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
}
SpaceBitmap* bitmap_;
- const Object* begin_;
- const Object* end_;
+ const mirror::Object* begin_;
+ const mirror::Object* end_;
};
TEST_F(SpaceBitmapTest, ScanRange) {
@@ -65,7 +66,8 @@
// Set all the odd bits in the first BitsPerWord * 3 to one.
for (size_t j = 0;j < kBitsPerWord * 3; ++j) {
- const Object* obj = reinterpret_cast<Object*>(heap_begin + j * SpaceBitmap::kAlignment);
+ const mirror::Object* obj =
+ reinterpret_cast<mirror::Object*>(heap_begin + j * SpaceBitmap::kAlignment);
if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
space_bitmap->Set(obj);
}
@@ -75,9 +77,11 @@
// This handles all the cases, having runs which start and end on the same word, and different
// words.
for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
- Object* start = reinterpret_cast<Object*>(heap_begin + i * SpaceBitmap::kAlignment);
+ mirror::Object* start =
+ reinterpret_cast<mirror::Object*>(heap_begin + i * SpaceBitmap::kAlignment);
for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
- Object* end = reinterpret_cast<Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
+ mirror::Object* end =
+ reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
BitmapVerify(space_bitmap.get(), start, end);
}
}
diff --git a/src/gc/space_test.cc b/src/gc/space_test.cc
index 2e03eae..372ec77 100644
--- a/src/gc/space_test.cc
+++ b/src/gc/space_test.cc
@@ -83,23 +83,23 @@
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the footprint limit.
- Object* ptr1 = space->Alloc(self, 1 * MB);
+ mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
EXPECT_TRUE(ptr1 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr2 = space->Alloc(self, 8 * MB);
+ mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr2 == NULL);
// Succeeds, adjusts the footprint.
- Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+ mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
EXPECT_TRUE(ptr3 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr4 = space->Alloc(self, 8 * MB);
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr4 == NULL);
// Also fails, requires a higher allowed footprint.
- Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
EXPECT_TRUE(ptr5 == NULL);
// Release some memory.
@@ -151,23 +151,23 @@
Runtime::Current()->GetHeap()->AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- Object* ptr1 = space->Alloc(self, 1 * MB);
+ mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
EXPECT_TRUE(ptr1 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr2 = space->Alloc(self, 8 * MB);
+ mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr2 == NULL);
// Succeeds, adjusts the footprint.
- Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+ mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
EXPECT_TRUE(ptr3 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr4 = space->Alloc(self, 8 * MB);
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr4 == NULL);
// Also fails, requires a higher allowed footprint.
- Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
EXPECT_TRUE(ptr5 == NULL);
// Release some memory.
@@ -194,7 +194,7 @@
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the max allowed footprint.
- Object* lots_of_objects[1024];
+ mirror::Object* lots_of_objects[1024];
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
lots_of_objects[i] = space->Alloc(self, 16);
EXPECT_TRUE(lots_of_objects[i] != NULL);
@@ -252,7 +252,7 @@
// Fill the space with lots of small objects up to the growth limit
size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
- UniquePtr<Object*[]> lots_of_objects(new Object*[max_objects]);
+ UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
size_t last_object = 0; // last object for which allocation succeeded
size_t amount_allocated = 0; // amount of space allocated
Thread* self = Thread::Current();
@@ -269,7 +269,7 @@
alloc_size = 8;
}
}
- Object* object;
+ mirror::Object* object;
if (round <= 1) {
object = space->Alloc(self, alloc_size);
} else {
@@ -326,7 +326,7 @@
// Free some objects
for (size_t i = 0; i < last_object; i += free_increment) {
- Object* object = lots_of_objects.get()[i];
+ mirror::Object* object = lots_of_objects.get()[i];
if (object == NULL) {
continue;
}
@@ -347,7 +347,7 @@
}
// All memory was released, try a large allocation to check freed memory is being coalesced
- Object* large_object;
+ mirror::Object* large_object;
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
if (round <= 1) {
large_object = space->Alloc(self, three_quarters_space);
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
index 23196fd..988d4e7 100644
--- a/src/gc/sticky_mark_sweep.cc
+++ b/src/gc/sticky_mark_sweep.cc
@@ -14,47 +14,51 @@
* limitations under the License.
*/
+#include "heap.h"
#include "large_object_space.h"
#include "space.h"
#include "sticky_mark_sweep.h"
+#include "thread.h"
namespace art {
- StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
- : PartialMarkSweep(heap, is_concurrent) {
- cumulative_timings_.SetName(GetName());
- }
- StickyMarkSweep::~StickyMarkSweep() {
+StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
+ : PartialMarkSweep(heap, is_concurrent) {
+ cumulative_timings_.SetName(GetName());
+}
- }
+StickyMarkSweep::~StickyMarkSweep() {
- void StickyMarkSweep::BindBitmaps() {
- PartialMarkSweep::BindBitmaps();
+}
- Spaces& spaces = GetHeap()->GetSpaces();
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
- // This lets us start with the mark bitmap of the previous garbage collection as the current
- // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
- // making it so that the live bitmap of the alloc space is contains the newly marked objects
- // from the sticky GC.
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
- BindLiveToMarkBitmap(*it);
- }
+void StickyMarkSweep::BindBitmaps() {
+ PartialMarkSweep::BindBitmaps();
+
+ Spaces& spaces = GetHeap()->GetSpaces();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
+ // This lets us start with the mark bitmap of the previous garbage collection as the current
+ // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
+ // making it so that the live bitmap of the alloc space is contains the newly marked objects
+ // from the sticky GC.
+ for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+ BindLiveToMarkBitmap(*it);
}
-
- GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
}
- void StickyMarkSweep::MarkReachableObjects() {
- DisableFinger();
- RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
- }
+ GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+}
- void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
- ObjectStack* live_stack = GetHeap()->GetLiveStack();
- SweepArray(timings_, live_stack, false);
- timings_.AddSplit("SweepArray");
- }
+void StickyMarkSweep::MarkReachableObjects() {
+ DisableFinger();
+ RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
+}
+
+void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+ ObjectStack* live_stack = GetHeap()->GetLiveStack();
+ SweepArray(timings_, live_stack, false);
+ timings_.AddSplit("SweepArray");
+}
+
} // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/sticky_mark_sweep.h
index 8396bbe..41ab0cc 100644
--- a/src/gc/sticky_mark_sweep.h
+++ b/src/gc/sticky_mark_sweep.h
@@ -14,28 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_SRC_STICKY_MARK_SWEEP_H_
-#define ART_SRC_STICKY_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_STICKY_MARK_SWEEP_H_
+#define ART_SRC_GC_STICKY_MARK_SWEEP_H_
#include "base/macros.h"
#include "locks.h"
#include "partial_mark_sweep.h"
-#include "utils.h"
namespace art {
-class Barrier;
-class CheckObjectVisitor;
-class Class;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
-class MarkStackChunk;
-
class StickyMarkSweep : public PartialMarkSweep {
public:
virtual GcType GetGcType() const {
@@ -60,4 +47,4 @@
} // namespace art
-#endif // ART_SRC_STICKY_MARK_SWEEP_H_
+#endif // ART_SRC_GC_STICKY_MARK_SWEEP_H_
diff --git a/src/heap.cc b/src/heap.cc
index 805d63c..5c96dec 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -27,15 +27,24 @@
#include "debugger.h"
#include "gc/atomic_stack.h"
#include "gc/card_table.h"
+#include "gc/card_table-inl.h"
#include "gc/heap_bitmap.h"
+#include "gc/heap_bitmap-inl.h"
#include "gc/large_object_space.h"
#include "gc/mark_sweep.h"
+#include "gc/mark_sweep-inl.h"
#include "gc/partial_mark_sweep.h"
+#include "gc/space_bitmap-inl.h"
#include "gc/sticky_mark_sweep.h"
#include "gc/mod_union_table.h"
+#include "gc/mod_union_table-inl.h"
#include "gc/space.h"
#include "image.h"
-#include "object.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "os.h"
#include "ScopedLocalRef.h"
@@ -431,7 +440,7 @@
delete gc_complete_lock_;
}
-ContinuousSpace* Heap::FindSpaceFromObject(const Object* obj) const {
+ContinuousSpace* Heap::FindSpaceFromObject(const mirror::Object* obj) const {
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
if ((*it)->Contains(obj)) {
@@ -465,13 +474,13 @@
}
}
-Object* Heap::AllocObject(Thread* self, Class* c, size_t byte_count) {
- DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
+mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) {
+ DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
(c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
strlen(ClassHelper(c).GetDescriptor()) == 0);
- DCHECK_GE(byte_count, sizeof(Object));
+ DCHECK_GE(byte_count, sizeof(mirror::Object));
- Object* obj = NULL;
+ mirror::Object* obj = NULL;
size_t size = 0;
uint64_t allocation_start = 0;
if (measure_allocation_time_) {
@@ -513,7 +522,7 @@
// concurrent_start_bytes_.
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
// The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<Object> ref(self, obj);
+ SirtRef<mirror::Object> ref(self, obj);
RequestConcurrentGC(self);
}
VerifyObject(obj);
@@ -547,7 +556,7 @@
return NULL;
}
-bool Heap::IsHeapAddress(const Object* obj) {
+bool Heap::IsHeapAddress(const mirror::Object* obj) {
// Note: we deliberately don't take the lock here, and mustn't test anything that would
// require taking the lock.
if (obj == NULL) {
@@ -566,7 +575,7 @@
return large_object_space_->Contains(obj);
}
-bool Heap::IsLiveObjectLocked(const Object* obj) {
+bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
}
@@ -596,7 +605,7 @@
}
}
-void Heap::VerifyObjectBody(const Object* obj) {
+void Heap::VerifyObjectBody(const mirror::Object* obj) {
if (!IsAligned<kObjectAlignment>(obj)) {
LOG(FATAL) << "Object isn't aligned: " << obj;
}
@@ -618,8 +627,8 @@
// Ignore early dawn of the universe verifications
if (!VERIFY_OBJECT_FAST && GetObjectsAllocated() > 10) {
const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
- Object::ClassOffset().Int32Value();
- const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
+ mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
if (c == NULL) {
LOG(FATAL) << "Null class in object: " << obj;
} else if (!IsAligned<kObjectAlignment>(c)) {
@@ -630,15 +639,15 @@
// Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
// Note: we don't use the accessors here as they have internal sanity checks
// that we don't want to run
- raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
- const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
- raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
- const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
CHECK_EQ(c_c, c_c_c);
}
}
-void Heap::VerificationCallback(Object* obj, void* arg) {
+void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
DCHECK(obj != NULL);
reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
}
@@ -648,7 +657,7 @@
GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
}
-void Heap::RecordAllocation(size_t size, Object* obj) {
+void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
DCHECK(obj != NULL);
DCHECK_GT(size, 0u);
num_bytes_allocated_ += size;
@@ -687,7 +696,7 @@
}
}
-Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) {
+mirror::Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) {
// Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
// max_allowed_footprint_ <= growth_limit_ so it is safe to check in here.
@@ -711,13 +720,13 @@
return space->Alloc(self, alloc_size);
}
-Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) {
+mirror::Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) {
// Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
// done in the runnable state where suspension is expected.
DCHECK_EQ(self->GetState(), kRunnable);
self->AssertThreadSuspensionIsAllowable();
- Object* ptr = TryToAllocate(self, space, alloc_size, false);
+ mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false);
if (ptr != NULL) {
return ptr;
}
@@ -838,14 +847,14 @@
class InstanceCounter {
public:
- InstanceCounter(const std::vector<Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
+ InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
}
- void operator()(const Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
for (size_t i = 0; i < classes_.size(); ++i) {
- const Class* instance_class = o->GetClass();
+ const mirror::Class* instance_class = o->GetClass();
if (use_is_assignable_from_) {
if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
++counts_[i];
@@ -859,14 +868,14 @@
}
private:
- const std::vector<Class*>& classes_;
+ const std::vector<mirror::Class*>& classes_;
bool use_is_assignable_from_;
uint64_t* const counts_;
DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
};
-void Heap::CountInstances(const std::vector<Class*>& classes, bool use_is_assignable_from,
+void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
uint64_t* counts) {
// We only want reachable instances, so do a GC. This also ensures that the alloc stack
// is empty, so the live bitmap is the only place we need to look.
@@ -882,29 +891,30 @@
class InstanceCollector {
public:
- InstanceCollector(Class* c, int32_t max_count, std::vector<Object*>& instances)
+ InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: class_(c), max_count_(max_count), instances_(instances) {
}
- void operator()(const Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const Class* instance_class = o->GetClass();
+ void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const mirror::Class* instance_class = o->GetClass();
if (instance_class == class_) {
if (max_count_ == 0 || instances_.size() < max_count_) {
- instances_.push_back(const_cast<Object*>(o));
+ instances_.push_back(const_cast<mirror::Object*>(o));
}
}
}
private:
- Class* class_;
+ mirror::Class* class_;
uint32_t max_count_;
- std::vector<Object*>& instances_;
+ std::vector<mirror::Object*>& instances_;
DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
};
-void Heap::GetInstances(Class* c, int32_t max_count, std::vector<Object*>& instances) {
+void Heap::GetInstances(mirror::Class* c, int32_t max_count,
+ std::vector<mirror::Object*>& instances) {
// We only want reachable instances, so do a GC. This also ensures that the alloc stack
// is empty, so the live bitmap is the only place we need to look.
Thread* self = Thread::Current();
@@ -919,7 +929,8 @@
class ReferringObjectsFinder {
public:
- ReferringObjectsFinder(Object* object, int32_t max_count, std::vector<Object*>& referring_objects)
+ ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
+ std::vector<mirror::Object*>& referring_objects)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: object_(object), max_count_(max_count), referring_objects_(referring_objects) {
}
@@ -927,27 +938,28 @@
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(const Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
MarkSweep::VisitObjectReferences(o, *this);
}
// For MarkSweep::VisitObjectReferences.
- void operator ()(const Object* referrer, const Object* object, const MemberOffset&, bool) const {
+ void operator ()(const mirror::Object* referrer, const mirror::Object* object,
+ const MemberOffset&, bool) const {
if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
- referring_objects_.push_back(const_cast<Object*>(referrer));
+ referring_objects_.push_back(const_cast<mirror::Object*>(referrer));
}
}
private:
- Object* object_;
+ mirror::Object* object_;
uint32_t max_count_;
- std::vector<Object*>& referring_objects_;
+ std::vector<mirror::Object*>& referring_objects_;
DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
};
-void Heap::GetReferringObjects(Object* o, int32_t max_count,
- std::vector<Object*>& referring_objects) {
+void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
+ std::vector<mirror::Object*>& referring_objects) {
// We only want reachable instances, so do a GC. This also ensures that the alloc stack
// is empty, so the live bitmap is the only place we need to look.
Thread* self = Thread::Current();
@@ -1026,9 +1038,9 @@
}
void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
- Object** limit = stack->End();
- for (Object** it = stack->Begin(); it != limit; ++it) {
- const Object* obj = *it;
+ mirror::Object** limit = stack->End();
+ for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
+ const mirror::Object* obj = *it;
DCHECK(obj != NULL);
if (LIKELY(bitmap->HasAddress(obj))) {
bitmap->Set(obj);
@@ -1039,9 +1051,9 @@
}
void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
- Object** limit = stack->End();
- for (Object** it = stack->Begin(); it != limit; ++it) {
- const Object* obj = *it;
+ mirror::Object** limit = stack->End();
+ for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
+ const mirror::Object* obj = *it;
DCHECK(obj != NULL);
if (LIKELY(bitmap->HasAddress(obj))) {
bitmap->Clear(obj);
@@ -1187,8 +1199,8 @@
timings.AddSplit("MarkImageToAllocSpaceReferences");
}
-void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) {
- Object* obj = reinterpret_cast<Object*>(arg);
+void Heap::RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
if (root == obj) {
LOG(INFO) << "Object " << obj << " is a root";
}
@@ -1196,7 +1208,7 @@
class ScanVisitor {
public:
- void operator ()(const Object* obj) const {
+ void operator ()(const mirror::Object* obj) const {
LOG(INFO) << "Would have rescanned object " << obj;
}
};
@@ -1212,8 +1224,9 @@
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
// analysis.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator ()(const mirror::Object* obj, const mirror::Object* ref,
+ const MemberOffset& /* offset */, bool /* is_static */) const
+ NO_THREAD_SAFETY_ANALYSIS {
// Verify that the reference is live.
if (ref != NULL && !IsLive(ref)) {
CardTable* card_table = heap_->GetCardTable();
@@ -1260,7 +1273,7 @@
}
}
- bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (heap_->GetLiveBitmap()->Test(obj)) {
return true;
}
@@ -1284,7 +1297,7 @@
}
- void operator ()(const Object* obj) const
+ void operator ()(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_));
MarkSweep::VisitObjectReferences(obj, visitor);
@@ -1328,8 +1341,8 @@
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& offset,
- bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
+ bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
// Filter out class references since changing an object's class does not mark the card as dirty.
// Also handles large objects, since the only reference they hold is a class reference.
if (ref != NULL && !ref->IsClass()) {
@@ -1355,12 +1368,13 @@
// Print which field of the object is dead.
if (!obj->IsObjectArray()) {
- const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
CHECK(klass != NULL);
- const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
+ const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields()
+ : klass->GetIFields();
CHECK(fields != NULL);
for (int32_t i = 0; i < fields->GetLength(); ++i) {
- const Field* cur = fields->Get(i);
+ const mirror::Field* cur = fields->Get(i);
if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
<< PrettyField(cur);
@@ -1368,7 +1382,8 @@
}
}
} else {
- const ObjectArray<Object>* object_array = obj->AsObjectArray<Object>();
+ const mirror::ObjectArray<mirror::Object>* object_array =
+ obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0; i < object_array->GetLength(); ++i) {
if (object_array->Get(i) == ref) {
LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
@@ -1395,7 +1410,7 @@
}
- void operator ()(const Object* obj) const
+ void operator ()(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
MarkSweep::VisitObjectReferences(obj, visitor);
@@ -1419,7 +1434,7 @@
GetLiveBitmap()->Visit(visitor);
// We can verify objects in the live stack since none of these should reference dead objects.
- for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
+ for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
visitor(*it);
}
@@ -1636,34 +1651,36 @@
CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
}
-Object* Heap::GetReferenceReferent(Object* reference) {
+mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
DCHECK(reference != NULL);
DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
+ return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
}
-void Heap::ClearReferenceReferent(Object* reference) {
+void Heap::ClearReferenceReferent(mirror::Object* reference) {
DCHECK(reference != NULL);
DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
reference->SetFieldObject(reference_referent_offset_, NULL, true);
}
// Returns true if the reference object has not yet been enqueued.
-bool Heap::IsEnqueuable(const Object* ref) {
+bool Heap::IsEnqueuable(const mirror::Object* ref) {
DCHECK(ref != NULL);
- const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
- const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
+ const mirror::Object* queue =
+ ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false);
+ const mirror::Object* queue_next =
+ ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false);
return (queue != NULL) && (queue_next == NULL);
}
-void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
+void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) {
DCHECK(ref != NULL);
- CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
- CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
+ CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL);
+ CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL);
EnqueuePendingReference(ref, cleared_reference_list);
}
-void Heap::EnqueuePendingReference(Object* ref, Object** list) {
+void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
DCHECK(ref != NULL);
DCHECK(list != NULL);
@@ -1673,17 +1690,19 @@
ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
*list = ref;
} else {
- Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
+ mirror::Object* head =
+ (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
ref->SetFieldObject(reference_pendingNext_offset_, head, false);
(*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
}
}
-Object* Heap::DequeuePendingReference(Object** list) {
+mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) {
DCHECK(list != NULL);
DCHECK(*list != NULL);
- Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
- Object* ref;
+ mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
+ false);
+ mirror::Object* ref;
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
@@ -1691,7 +1710,8 @@
ref = *list;
*list = NULL;
} else {
- Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
+ mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
+ false);
(*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
ref = head;
}
@@ -1699,7 +1719,7 @@
return ref;
}
-void Heap::AddFinalizerReference(Thread* self, Object* object) {
+void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
ScopedObjectAccess soa(self);
JValue args[1];
args[0].SetL(object);
@@ -1731,7 +1751,7 @@
return concurrent_min_free_;
}
-void Heap::EnqueueClearedReferences(Object** cleared) {
+void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
DCHECK(cleared != NULL);
if (*cleared != NULL) {
// When a runtime isn't started there are no reference queues to care about so ignore.
diff --git a/src/heap.h b/src/heap.h
index b7fc34d..9981f83 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -24,6 +24,7 @@
#include "atomic_integer.h"
#include "gc/atomic_stack.h"
#include "gc/card_table.h"
+#include "gc/gc_type.h"
#include "gc/heap_bitmap.h"
#include "globals.h"
#include "gtest/gtest.h"
@@ -39,9 +40,11 @@
#define VERIFY_OBJECT_FAST 1
namespace art {
-
-class AllocSpace;
+namespace mirror {
class Class;
+class Object;
+} // namespace mirror
+class AllocSpace;
class ConditionVariable;
class DlMallocSpace;
class GarbageCollector;
@@ -51,14 +54,12 @@
class MarkSweep;
class ModUnionTable;
class Mutex;
-class Object;
class Space;
class SpaceTest;
class StackVisitor;
class Thread;
class TimingLogger;
-typedef AtomicStack<Object*> ObjectStack;
typedef std::vector<ContinuousSpace*> Spaces;
class AgeCardVisitor {
@@ -72,21 +73,6 @@
}
};
-// The ordering of the enum matters, it is used to determine which GCs are run first.
-enum GcType {
- // No Gc
- kGcTypeNone,
- // Sticky mark bits "generational" GC.
- kGcTypeSticky,
- // Partial GC, over only the alloc space.
- kGcTypePartial,
- // Full GC
- kGcTypeFull,
- // Number of different Gc types.
- kGcTypeMax,
-};
-std::ostream& operator<<(std::ostream& os, const GcType& policy);
-
enum GcCause {
kGcCauseForAlloc,
kGcCauseBackground,
@@ -107,11 +93,6 @@
// Used so that we don't overflow the allocation time atomic integer.
static const size_t kTimeAdjust = 1024;
- typedef void (RootVisitor)(const Object* root, void* arg);
- typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg,
- const StackVisitor* visitor);
- typedef bool (IsMarkedTester)(const Object* object, void* arg);
-
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
// ImageWriter output.
@@ -122,19 +103,19 @@
~Heap();
// Allocates and initializes storage for an object instance.
- Object* AllocObject(Thread* self, Class* klass, size_t num_bytes)
+ mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Check sanity of given reference. Requires the heap lock.
#if VERIFY_OBJECT_ENABLED
- void VerifyObject(const Object* o);
+ void VerifyObject(const mirror::Object* o);
#else
- void VerifyObject(const Object*) {}
+ void VerifyObject(const mirror::Object*) {}
#endif
// Check sanity of all live references. Requires the heap lock.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- static void RootMatchesObjectVisitor(const Object* root, void* arg);
+ static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg);
bool VerifyHeapReferences()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -145,11 +126,11 @@
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsHeapAddress(const Object* obj);
+ bool IsHeapAddress(const mirror::Object* obj);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- bool IsLiveObjectLocked(const Object* obj)
+ bool IsLiveObjectLocked(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Initiates an explicit garbage collection.
@@ -169,16 +150,16 @@
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
- void CountInstances(const std::vector<Class*>& classes, bool use_is_assignable_from,
+ void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
uint64_t* counts)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Implements JDWP RT_Instances.
- void GetInstances(Class* c, int32_t max_count, std::vector<Object*>& instances)
+ void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Implements JDWP OR_ReferringObjects.
- void GetReferringObjects(Object* o, int32_t max_count, std::vector<Object*>& referring_objects)
+ void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -218,15 +199,15 @@
MemberOffset reference_pendingNext_offset,
MemberOffset finalizer_reference_zombie_offset);
- Object* GetReferenceReferent(Object* reference);
- void ClearReferenceReferent(Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetReferenceReferent(mirror::Object* reference);
+ void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if the reference object has not yet been enqueued.
- bool IsEnqueuable(const Object* ref);
- void EnqueueReference(Object* ref, Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueuePendingReference(Object* ref, Object** list)
+ bool IsEnqueuable(const mirror::Object* ref);
+ void EnqueueReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Object* DequeuePendingReference(Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* DequeuePendingReference(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
MemberOffset GetReferencePendingNextOffset() {
DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
@@ -257,12 +238,12 @@
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
- void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
+ void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
}
// Write barrier for array operations that update many field positions
- void WriteBarrierArray(const Object* dst, int /*start_offset*/,
+ void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
size_t /*length TODO: element_count or byte_count?*/) {
card_table_->MarkCard(dst);
}
@@ -271,7 +252,7 @@
return card_table_.get();
}
- void AddFinalizerReference(Thread* self, Object* object);
+ void AddFinalizerReference(Thread* self, mirror::Object* object);
size_t GetBytesAllocated() const;
size_t GetObjectsAllocated() const;
@@ -293,7 +274,7 @@
// Functions for getting the bitmap which corresponds to an object's address.
// This is probably slow, TODO: use better data structure like binary tree .
- ContinuousSpace* FindSpaceFromObject(const Object*) const;
+ ContinuousSpace* FindSpaceFromObject(const mirror::Object*) const;
void DumpForSigQuit(std::ostream& os);
@@ -354,22 +335,22 @@
private:
// Allocates uninitialized storage. Passing in a null space tries to place the object in the
// large object space.
- Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes)
+ mirror::Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Try to allocate a number of bytes, this function never does any GCs.
- Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow)
+ mirror::Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Pushes a list of cleared references out to the managed heap.
- void EnqueueClearedReferences(Object** cleared_references);
+ void EnqueueClearedReferences(mirror::Object** cleared_references);
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
- void RecordAllocation(size_t size, Object* object)
+ void RecordAllocation(size_t size, mirror::Object* object)
LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -395,9 +376,9 @@
// No thread saftey analysis since we call this everywhere and it is impossible to find a proper
// lock ordering for it.
- void VerifyObjectBody(const Object *obj) NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
- static void VerificationCallback(Object* obj, void* arg)
+ static void VerificationCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
// Swap the allocation stack with the live stack.
diff --git a/src/heap_test.cc b/src/heap_test.cc
index 6db7416..79cc835 100644
--- a/src/heap_test.cc
+++ b/src/heap_test.cc
@@ -15,6 +15,11 @@
*/
#include "common_test.h"
+#include "gc/card_table-inl.h"
+#include "gc/space_bitmap-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "sirt_ref.h"
namespace art {
@@ -37,12 +42,12 @@
ScopedObjectAccess soa(Thread::Current());
// garbage is created during ClassLinker::Init
- Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
+ mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
for (size_t i = 0; i < 1024; ++i) {
- SirtRef<ObjectArray<Object> > array(soa.Self(),
- ObjectArray<Object>::Alloc(soa.Self(), c, 2048));
+ SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c, 2048));
for (size_t j = 0; j < 2048; ++j) {
- array->Set(j, String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ array->Set(j, mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
}
}
}
@@ -53,7 +58,7 @@
byte* heap_begin = reinterpret_cast<byte*>(0x1000);
const size_t heap_capacity = SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1);
UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("test-bitmap", heap_begin, heap_capacity));
- bitmap->Set(reinterpret_cast<const Object*>(&heap_begin[heap_capacity - SpaceBitmap::kAlignment]));
+ bitmap->Set(reinterpret_cast<const mirror::Object*>(&heap_begin[heap_capacity - SpaceBitmap::kAlignment]));
}
} // namespace art
diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc
index c0e73bc..e0a4c05 100644
--- a/src/hprof/hprof.cc
+++ b/src/hprof/hprof.cc
@@ -44,7 +44,11 @@
#include "debugger.h"
#include "globals.h"
#include "heap.h"
-#include "object.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/field.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
#include "object_utils.h"
#include "os.h"
#include "safe_map.h"
@@ -165,8 +169,8 @@
typedef HprofId HprofStringId;
typedef HprofId HprofObjectId;
typedef HprofId HprofClassObjectId;
-typedef std::set<Class*> ClassSet;
-typedef std::set<Class*>::iterator ClassSetIterator;
+typedef std::set<mirror::Class*> ClassSet;
+typedef std::set<mirror::Class*>::iterator ClassSetIterator;
typedef SafeMap<std::string, size_t> StringMap;
typedef SafeMap<std::string, size_t>::iterator StringMapIterator;
@@ -480,14 +484,14 @@
}
private:
- static void RootVisitor(const Object* obj, void* arg)
+ static void RootVisitor(const mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(arg != NULL);
Hprof* hprof = reinterpret_cast<Hprof*>(arg);
hprof->VisitRoot(obj);
}
- static void HeapBitmapCallback(Object* obj, void* arg)
+ static void HeapBitmapCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(obj != NULL);
CHECK(arg != NULL);
@@ -495,9 +499,9 @@
hprof->DumpHeapObject(obj);
}
- void VisitRoot(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoot(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int DumpHeapObject(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int DumpHeapObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Finish() {
}
@@ -507,7 +511,7 @@
uint32_t nextSerialNumber = 1;
for (ClassSetIterator it = classes_.begin(); it != classes_.end(); ++it) {
- const Class* c = *it;
+ const mirror::Class* c = *it;
CHECK(c != NULL);
int err = current_record_.StartNewRecord(header_fp_, HPROF_TAG_LOAD_CLASS, HPROF_TIME);
@@ -567,9 +571,9 @@
current_heap_ = HPROF_HEAP_DEFAULT;
}
- int MarkRootObject(const Object* obj, jobject jniObj);
+ int MarkRootObject(const mirror::Object* obj, jobject jniObj);
- HprofClassObjectId LookupClassId(Class* c)
+ HprofClassObjectId LookupClassId(mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (c == NULL) {
// c is the superclass of java.lang.Object or a primitive
@@ -577,7 +581,7 @@
}
std::pair<ClassSetIterator, bool> result = classes_.insert(c);
- const Class* present = *result.first;
+ const mirror::Class* present = *result.first;
// Make sure that we've assigned a string ID for this class' name
LookupClassNameId(c);
@@ -586,7 +590,7 @@
return (HprofStringId) present;
}
- HprofStringId LookupStringId(String* string) {
+ HprofStringId LookupStringId(mirror::String* string) {
return LookupStringId(string->ToModifiedUtf8());
}
@@ -604,7 +608,7 @@
return id;
}
- HprofStringId LookupClassNameId(const Class* c)
+ HprofStringId LookupClassNameId(const mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return LookupStringId(PrettyDescriptor(c));
}
@@ -740,7 +744,7 @@
// something when ctx->gc_scan_state_ is non-zero, which is usually
// only true when marking the root set or unreachable
// objects. Used to add rootset references to obj.
-int Hprof::MarkRootObject(const Object* obj, jobject jniObj) {
+int Hprof::MarkRootObject(const mirror::Object* obj, jobject jniObj) {
HprofRecord* rec = ¤t_record_;
HprofHeapTag heapTag = (HprofHeapTag)gc_scan_state_;
@@ -823,11 +827,11 @@
return 0;
}
-static int StackTraceSerialNumber(const Object* /*obj*/) {
+static int StackTraceSerialNumber(const mirror::Object* /*obj*/) {
return HPROF_NULL_STACK_TRACE;
}
-int Hprof::DumpHeapObject(Object* obj) {
+int Hprof::DumpHeapObject(mirror::Object* obj) {
HprofRecord* rec = ¤t_record_;
HprofHeapId desiredHeap = false ? HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP; // TODO: zygote objects?
@@ -859,7 +863,7 @@
current_heap_ = desiredHeap;
}
- Class* c = obj->GetClass();
+ mirror::Class* c = obj->GetClass();
if (c == NULL) {
// This object will bother HprofReader, because it has a NULL
// class, so just don't dump it. It could be
@@ -867,7 +871,7 @@
// allocated which hasn't been initialized yet.
} else {
if (obj->IsClass()) {
- Class* thisClass = obj->AsClass();
+ mirror::Class* thisClass = obj->AsClass();
// obj is a ClassObject.
size_t sFieldCount = thisClass->NumStaticFields();
if (sFieldCount != 0) {
@@ -896,7 +900,7 @@
if (thisClass->IsClassClass()) {
// ClassObjects have their static fields appended, so aren't all the same size.
// But they're at least this size.
- rec->AddU4(sizeof(Class)); // instance size
+ rec->AddU4(sizeof(mirror::Class)); // instance size
} else if (thisClass->IsArrayClass() || thisClass->IsPrimitive()) {
rec->AddU4(0);
} else {
@@ -917,7 +921,7 @@
rec->AddId(CLASS_STATICS_ID(obj));
for (size_t i = 0; i < sFieldCount; ++i) {
- Field* f = thisClass->GetStaticField(i);
+ mirror::Field* f = thisClass->GetStaticField(i);
fh.ChangeField(f);
size_t size;
@@ -942,14 +946,14 @@
int iFieldCount = thisClass->IsObjectClass() ? 0 : thisClass->NumInstanceFields();
rec->AddU2((uint16_t)iFieldCount);
for (int i = 0; i < iFieldCount; ++i) {
- Field* f = thisClass->GetInstanceField(i);
+ mirror::Field* f = thisClass->GetInstanceField(i);
fh.ChangeField(f);
HprofBasicType t = SignatureToBasicTypeAndSize(fh.GetTypeDescriptor(), NULL);
rec->AddId(LookupStringId(fh.GetName()));
rec->AddU1(t);
}
} else if (c->IsArrayClass()) {
- const Array* aobj = obj->AsArray();
+ const mirror::Array* aobj = obj->AsArray();
uint32_t length = aobj->GetLength();
if (obj->IsObjectArray()) {
@@ -962,7 +966,7 @@
rec->AddId(LookupClassId(c));
// Dump the elements, which are always objects or NULL.
- rec->AddIdList((const HprofObjectId*)aobj->GetRawData(sizeof(Object*)), length);
+ rec->AddIdList((const HprofObjectId*)aobj->GetRawData(sizeof(mirror::Object*)), length);
} else {
size_t size;
HprofBasicType t = PrimitiveToBasicTypeAndSize(c->GetComponentType()->GetPrimitiveType(), &size);
@@ -1000,12 +1004,12 @@
// Write the instance data; fields for this class, followed by super class fields,
// and so on. Don't write the klass or monitor fields of Object.class.
- const Class* sclass = c;
+ const mirror::Class* sclass = c;
FieldHelper fh;
while (!sclass->IsObjectClass()) {
int ifieldCount = sclass->NumInstanceFields();
for (int i = 0; i < ifieldCount; ++i) {
- Field* f = sclass->GetInstanceField(i);
+ mirror::Field* f = sclass->GetInstanceField(i);
fh.ChangeField(f);
size_t size;
SignatureToBasicTypeAndSize(fh.GetTypeDescriptor(), &size);
@@ -1034,7 +1038,7 @@
return 0;
}
-void Hprof::VisitRoot(const Object* obj) {
+void Hprof::VisitRoot(const mirror::Object* obj) {
uint32_t threadId = 0; // TODO
/*RootType*/ size_t type = 0; // TODO
diff --git a/src/image.cc b/src/image.cc
index a190f10..8eeb772 100644
--- a/src/image.cc
+++ b/src/image.cc
@@ -16,9 +16,47 @@
#include "image.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "utils.h"
+
namespace art {
const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
const byte ImageHeader::kImageVersion[] = { '0', '0', '2', '\0' };
+ImageHeader::ImageHeader(uint32_t image_begin,
+ uint32_t image_roots,
+ uint32_t oat_checksum,
+ uint32_t oat_file_begin,
+ uint32_t oat_data_begin,
+ uint32_t oat_data_end,
+ uint32_t oat_file_end)
+ : image_begin_(image_begin),
+ oat_checksum_(oat_checksum),
+ oat_file_begin_(oat_file_begin),
+ oat_data_begin_(oat_data_begin),
+ oat_data_end_(oat_data_end),
+ oat_file_end_(oat_file_end),
+ image_roots_(image_roots) {
+ CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
+ CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
+ CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
+ CHECK_LT(image_begin, image_roots);
+ CHECK_LT(image_roots, oat_file_begin);
+ CHECK_LE(oat_file_begin, oat_data_begin);
+ CHECK_LT(oat_data_begin, oat_data_end);
+ CHECK_LE(oat_data_end, oat_file_end);
+ memcpy(magic_, kImageMagic, sizeof(kImageMagic));
+ memcpy(version_, kImageVersion, sizeof(kImageVersion));
+}
+
+mirror::Object* ImageHeader::GetImageRoot(ImageRoot image_root) const {
+ return GetImageRoots()->Get(image_root);
+}
+
+mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const {
+ return reinterpret_cast<mirror::ObjectArray<mirror::Object>*>(image_roots_);
+}
+
} // namespace art
diff --git a/src/image.h b/src/image.h
index f38f04b..6501328 100644
--- a/src/image.h
+++ b/src/image.h
@@ -20,7 +20,7 @@
#include <string.h>
#include "globals.h"
-#include "object.h"
+#include "mirror/object.h"
namespace art {
@@ -35,25 +35,7 @@
uint32_t oat_file_begin,
uint32_t oat_data_begin,
uint32_t oat_data_end,
- uint32_t oat_file_end)
- : image_begin_(image_begin),
- oat_checksum_(oat_checksum),
- oat_file_begin_(oat_file_begin),
- oat_data_begin_(oat_data_begin),
- oat_data_end_(oat_data_end),
- oat_file_end_(oat_file_end),
- image_roots_(image_roots) {
- CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
- CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
- CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
- CHECK_LT(image_begin, image_roots);
- CHECK_LT(image_roots, oat_file_begin);
- CHECK_LE(oat_file_begin, oat_data_begin);
- CHECK_LT(oat_data_begin, oat_data_end);
- CHECK_LE(oat_data_end, oat_file_end);
- memcpy(magic_, kImageMagic, sizeof(kImageMagic));
- memcpy(version_, kImageVersion, sizeof(kImageVersion));
- }
+ uint32_t oat_file_end);
bool IsValid() const {
if (memcmp(magic_, kImageMagic, sizeof(kImageMagic) != 0)) {
@@ -113,15 +95,11 @@
kImageRootsMax,
};
- Object* GetImageRoot(ImageRoot image_root) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetImageRoots()->Get(image_root);
- }
+ mirror::Object* GetImageRoot(ImageRoot image_root) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- ObjectArray<Object>* GetImageRoots() const {
- return reinterpret_cast<ObjectArray<Object>*>(image_roots_);
- }
+ mirror::ObjectArray<mirror::Object>* GetImageRoots() const;
static const byte kImageMagic[4];
static const byte kImageVersion[4];
diff --git a/src/image_test.cc b/src/image_test.cc
index 89e3a05..ed6426b 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -54,7 +54,7 @@
for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = java_lang_dex_file_->GetClassDef(i);
const char* descriptor = java_lang_dex_file_->GetClassDescriptor(class_def);
- Class* klass = class_linker_->FindSystemClass(descriptor);
+ mirror::Class* klass = class_linker_->FindSystemClass(descriptor);
EXPECT_TRUE(klass != NULL) << descriptor;
}
}
@@ -139,7 +139,7 @@
for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex->GetClassDef(i);
const char* descriptor = dex->GetClassDescriptor(class_def);
- Class* klass = class_linker_->FindSystemClass(descriptor);
+ mirror::Class* klass = class_linker_->FindSystemClass(descriptor);
EXPECT_TRUE(klass != NULL) << descriptor;
EXPECT_LT(image_begin, reinterpret_cast<byte*>(klass)) << descriptor;
EXPECT_LT(reinterpret_cast<byte*>(klass), image_end) << descriptor;
diff --git a/src/image_writer.cc b/src/image_writer.cc
index fc88cbb..dc19d72 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -23,19 +23,25 @@
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
-#include "class_loader.h"
#include "compiled_method.h"
#include "compiler.h"
-#include "dex_cache.h"
+#include "gc/card_table-inl.h"
#include "gc/large_object_space.h"
#include "gc/space.h"
#include "globals.h"
#include "heap.h"
#include "image.h"
#include "intern_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "oat.h"
#include "oat_file.h"
-#include "object.h"
#include "object_utils.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -43,6 +49,8 @@
#include "UniquePtr.h"
#include "utils.h"
+using namespace art::mirror;
+
namespace art {
bool ImageWriter::Write(const std::string& image_filename,
@@ -442,7 +450,7 @@
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
- copy->monitor_ = 0; // We may have inflated the lock during compilation.
+ copy->SetField32(Object::MonitorOffset(), 0, false); // We may have inflated the lock during compilation.
image_writer->FixupObject(obj, copy);
}
@@ -476,13 +484,13 @@
// Every type of method can have an invoke stub
uint32_t invoke_stub_offset = orig->GetOatInvokeStubOffset();
const byte* invoke_stub = GetOatAddress(invoke_stub_offset);
- copy->invoke_stub_ = reinterpret_cast<AbstractMethod::InvokeStub*>(const_cast<byte*>(invoke_stub));
+ copy->SetInvokeStub(reinterpret_cast<AbstractMethod::InvokeStub*>(const_cast<byte*>(invoke_stub)));
if (orig->IsAbstract()) {
// Abstract methods are pointed to a stub that will throw AbstractMethodError if they are called
ByteArray* orig_ame_stub_array_ = Runtime::Current()->GetAbstractMethodErrorStubArray();
ByteArray* copy_ame_stub_array_ = down_cast<ByteArray*>(GetImageAddress(orig_ame_stub_array_));
- copy->code_ = copy_ame_stub_array_->GetData();
+ copy->SetCode(copy_ame_stub_array_->GetData());
return;
}
@@ -492,7 +500,7 @@
Runtime::Current()->GetResolutionStubArray(Runtime::kUnknownMethod);
CHECK(orig->GetCode() == orig_res_stub_array_->GetData());
ByteArray* copy_res_stub_array_ = down_cast<ByteArray*>(GetImageAddress(orig_res_stub_array_));
- copy->code_ = copy_res_stub_array_->GetData();
+ copy->SetCode(copy_res_stub_array_->GetData());
return;
}
@@ -511,27 +519,27 @@
if (code == NULL) {
code = GetOatAddress(code_offset);
}
- copy->code_ = code;
+ copy->SetCode(code);
if (orig->IsNative()) {
// The native method's pointer is directed to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
ByteArray* orig_jni_stub_array_ = Runtime::Current()->GetJniDlsymLookupStub();
ByteArray* copy_jni_stub_array_ = down_cast<ByteArray*>(GetImageAddress(orig_jni_stub_array_));
- copy->native_method_ = copy_jni_stub_array_->GetData();
+ copy->SetNativeMethod(copy_jni_stub_array_->GetData());
} else {
// normal (non-abstract non-native) methods have mapping tables to relocate
uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
const byte* mapping_table = GetOatAddress(mapping_table_off);
- copy->mapping_table_ = reinterpret_cast<const uint32_t*>(mapping_table);
+ copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
const byte* vmap_table = GetOatAddress(vmap_table_offset);
- copy->vmap_table_ = reinterpret_cast<const uint16_t*>(vmap_table);
+ copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->native_gc_map_ = reinterpret_cast<const uint8_t*>(native_gc_map);
+ copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
}
}
diff --git a/src/image_writer.h b/src/image_writer.h
index 64bac2e..eff9ffb 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -24,10 +24,9 @@
#include <string>
#include "compiler.h"
-#include "dex_cache.h"
#include "mem_map.h"
#include "oat_file.h"
-#include "object.h"
+#include "mirror/dex_cache.h"
#include "os.h"
#include "safe_map.h"
#include "gc/space.h"
@@ -59,7 +58,7 @@
bool AllocMemory();
// we use the lock word to store the offset of the object in the image
- void AssignImageOffset(Object* object)
+ void AssignImageOffset(mirror::Object* object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(object != NULL);
SetImageOffset(object, image_end_);
@@ -67,35 +66,35 @@
DCHECK_LT(image_end_, image_->Size());
}
- void SetImageOffset(Object* object, size_t offset) {
+ void SetImageOffset(mirror::Object* object, size_t offset) {
DCHECK(object != NULL);
DCHECK_NE(offset, 0U);
DCHECK(!IsImageOffsetAssigned(object));
offsets_.Put(object, offset);
}
- size_t IsImageOffsetAssigned(const Object* object) const {
+ size_t IsImageOffsetAssigned(const mirror::Object* object) const {
DCHECK(object != NULL);
return offsets_.find(object) != offsets_.end();
}
- size_t GetImageOffset(const Object* object) const {
+ size_t GetImageOffset(const mirror::Object* object) const {
DCHECK(object != NULL);
DCHECK(IsImageOffsetAssigned(object));
return offsets_.find(object)->second;
}
- Object* GetImageAddress(const Object* object) const {
+ mirror::Object* GetImageAddress(const mirror::Object* object) const {
if (object == NULL) {
return NULL;
}
- return reinterpret_cast<Object*>(image_begin_ + GetImageOffset(object));
+ return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
}
- Object* GetLocalAddress(const Object* object) const {
+ mirror::Object* GetLocalAddress(const mirror::Object* object) const {
size_t offset = GetImageOffset(object);
byte* dst = image_->Begin() + offset;
- return reinterpret_cast<Object*>(dst);
+ return reinterpret_cast<mirror::Object*>(dst);
}
const byte* GetOatAddress(uint32_t offset) const {
@@ -106,50 +105,52 @@
return oat_data_begin_ + offset;
}
- bool IsImageClass(const Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImageClass(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpImageClasses();
void ComputeLazyFieldsForImageClasses()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool ComputeLazyFieldsForClassesVisitor(Class* klass, void* arg)
+ static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Wire dex cache resolved strings to strings in the image to avoid runtime resolution
void ComputeEagerResolvedStrings();
- static void ComputeEagerResolvedStringsCallback(Object* obj, void* arg)
+ static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool NonImageClassesVisitor(Class* c, void* arg)
+ static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckNonImageClassesRemoved();
- static void CheckNonImageClassesRemovedCallback(Object* obj, void* arg)
+ static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<Object>* CreateImageRoots() const
+ mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void CalculateNewObjectOffsetsCallback(Object* obj, void* arg)
+ static void CalculateNewObjectOffsetsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyAndFixupObjects();
- static void CopyAndFixupObjectsCallback(Object* obj, void* arg)
+ static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupClass(const Class* orig, Class* copy)
+ void FixupClass(const mirror::Class* orig, mirror::Class* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupMethod(const AbstractMethod* orig, AbstractMethod* copy)
+ void FixupMethod(const mirror::AbstractMethod* orig, mirror::AbstractMethod* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupObject(const Object* orig, Object* copy)
+ void FixupObject(const mirror::Object* orig, mirror::Object* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupObjectArray(const ObjectArray<Object>* orig, ObjectArray<Object>* copy)
+ void FixupObjectArray(const mirror::ObjectArray<mirror::Object>* orig,
+ mirror::ObjectArray<mirror::Object>* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupInstanceFields(const Object* orig, Object* copy)
+ void FixupInstanceFields(const mirror::Object* orig, mirror::Object* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupStaticFields(const Class* orig, Class* copy)
+ void FixupStaticFields(const mirror::Class* orig, mirror::Class* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static)
+ void FixupFields(const mirror::Object* orig, mirror::Object* copy, uint32_t ref_offsets,
+ bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchOatCodeAndMethods(const Compiler& compiler)
@@ -157,7 +158,7 @@
void SetPatchLocation(const Compiler::PatchInformation* patch, uint32_t value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- SafeMap<const Object*, size_t> offsets_;
+ SafeMap<const mirror::Object*, size_t> offsets_;
// oat file with code for this image
OatFile* oat_file_;
@@ -178,7 +179,7 @@
const byte* oat_data_begin_;
// DexCaches seen while scanning for fixing up CodeAndDirectMethods
- typedef std::set<DexCache*> Set;
+ typedef std::set<mirror::DexCache*> Set;
Set dex_caches_;
};
diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc
index 9bb6edc..720380a 100644
--- a/src/indirect_reference_table.cc
+++ b/src/indirect_reference_table.cc
@@ -40,9 +40,9 @@
CHECK_LE(initialCount, maxCount);
CHECK_NE(desiredKind, kSirtOrInvalid);
- table_ = reinterpret_cast<const Object**>(malloc(initialCount * sizeof(const Object*)));
+ table_ = reinterpret_cast<const mirror::Object**>(malloc(initialCount * sizeof(const mirror::Object*)));
CHECK(table_ != NULL);
- memset(table_, 0xd1, initialCount * sizeof(const Object*));
+ memset(table_, 0xd1, initialCount * sizeof(const mirror::Object*));
slot_data_ = reinterpret_cast<IndirectRefSlot*>(calloc(initialCount, sizeof(IndirectRefSlot)));
CHECK(slot_data_ != NULL);
@@ -63,7 +63,7 @@
// Make sure that the entry at "idx" is correctly paired with "iref".
bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
- const Object* obj = table_[idx];
+ const mirror::Object* obj = table_[idx];
IndirectRef checkRef = ToIndirectRef(obj, idx);
if (checkRef != iref) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what
@@ -75,7 +75,7 @@
return true;
}
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const Object* obj) {
+IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* obj) {
IRTSegmentState prevState;
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
@@ -101,7 +101,7 @@
}
DCHECK_GT(newSize, alloc_entries_);
- table_ = reinterpret_cast<const Object**>(realloc(table_, newSize * sizeof(const Object*)));
+ table_ = reinterpret_cast<const mirror::Object**>(realloc(table_, newSize * sizeof(const mirror::Object*)));
slot_data_ = reinterpret_cast<IndirectRefSlot*>(realloc(slot_data_,
newSize * sizeof(IndirectRefSlot)));
if (table_ == NULL || slot_data_ == NULL) {
@@ -126,7 +126,7 @@
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- const Object** pScan = &table_[topIndex - 1];
+ const mirror::Object** pScan = &table_[topIndex - 1];
DCHECK(*pScan != NULL);
while (*--pScan != NULL) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
@@ -194,7 +194,7 @@
return true;
}
-static int Find(Object* direct_pointer, int bottomIndex, int topIndex, const Object** table) {
+static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex, const mirror::Object** table) {
for (int i = bottomIndex; i < topIndex; ++i) {
if (table[i] == direct_pointer) {
return i;
@@ -203,7 +203,7 @@
return -1;
}
-bool IndirectReferenceTable::ContainsDirectPointer(Object* direct_pointer) const {
+bool IndirectReferenceTable::ContainsDirectPointer(mirror::Object* direct_pointer) const {
return Find(direct_pointer, 0, segment_state_.parts.topIndex, table_) != -1;
}
@@ -234,7 +234,7 @@
return true;
}
if (GetIndirectRefKind(iref) == kSirtOrInvalid && vm->work_around_app_jni_bugs) {
- Object* direct_pointer = reinterpret_cast<Object*>(iref);
+ mirror::Object* direct_pointer = reinterpret_cast<mirror::Object*>(iref);
idx = Find(direct_pointer, bottomIndex, topIndex, table_);
if (idx == -1) {
LOG(WARNING) << "trying to work around app JNI bugs, but didn't find " << iref << " in table!";
@@ -308,7 +308,7 @@
return true;
}
-void IndirectReferenceTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
for (It it = begin(), end = this->end(); it != end; ++it) {
visitor(**it, arg);
@@ -317,7 +317,7 @@
void IndirectReferenceTable::Dump(std::ostream& os) const {
os << kind_ << " table dump:\n";
- std::vector<const Object*> entries(table_, table_ + Capacity());
+ std::vector<const mirror::Object*> entries(table_, table_ + Capacity());
// Remove NULLs.
for (int i = entries.size() - 1; i >= 0; --i) {
if (entries[i] == NULL) {
diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h
index cd358e9..e09043d 100644
--- a/src/indirect_reference_table.h
+++ b/src/indirect_reference_table.h
@@ -23,11 +23,13 @@
#include <string>
#include "base/logging.h"
-#include "heap.h"
+#include "offsets.h"
+#include "root_visitor.h"
namespace art {
-
+namespace mirror {
class Object;
+} // namespace mirror
/*
* Maintain a table of indirect references. Used for local/global JNI
@@ -98,8 +100,8 @@
typedef void* IndirectRef;
// Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress().
-static Object* const kInvalidIndirectRefObject = reinterpret_cast<Object*>(0xdead4321);
-static Object* const kClearedJniWeakGlobal = reinterpret_cast<Object*>(0xdead1234);
+static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321);
+static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234);
/*
* Indirect reference kind, used as the two low bits of IndirectRef.
@@ -128,7 +130,7 @@
static const size_t kIRTPrevCount = 4;
struct IndirectRefSlot {
uint32_t serial;
- const Object* previous[kIRTPrevCount];
+ const mirror::Object* previous[kIRTPrevCount];
};
/* use as initial value for "cookie", and when table has only one segment */
@@ -204,7 +206,7 @@
class IrtIterator {
public:
- explicit IrtIterator(const Object** table, size_t i, size_t capacity)
+ explicit IrtIterator(const mirror::Object** table, size_t i, size_t capacity)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
}
@@ -215,7 +217,7 @@
return *this;
}
- const Object** operator*() {
+ const mirror::Object** operator*() {
return &table_[i_];
}
@@ -231,7 +233,7 @@
}
}
- const Object** table_;
+ const mirror::Object** table_;
size_t i_;
size_t capacity_;
};
@@ -258,7 +260,7 @@
* Returns NULL if the table is full (max entries reached, or alloc
* failed during expansion).
*/
- IndirectRef Add(uint32_t cookie, const Object* obj)
+ IndirectRef Add(uint32_t cookie, const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -266,7 +268,7 @@
*
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
- const Object* Get(IndirectRef iref) const {
+ const mirror::Object* Get(IndirectRef iref) const {
if (!GetChecked(iref)) {
return kInvalidIndirectRefObject;
}
@@ -274,7 +276,7 @@
}
// TODO: remove when we remove work_around_app_jni_bugs support.
- bool ContainsDirectPointer(Object* direct_pointer) const;
+ bool ContainsDirectPointer(mirror::Object* direct_pointer) const;
/*
* Remove an existing entry.
@@ -307,7 +309,7 @@
return iterator(table_, Capacity(), Capacity());
}
- void VisitRoots(Heap::RootVisitor* visitor, void* arg);
+ void VisitRoots(RootVisitor* visitor, void* arg);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -334,7 +336,7 @@
* The object pointer itself is subject to relocation in some GC
* implementations, so we shouldn't really be using it here.
*/
- IndirectRef ToIndirectRef(const Object* /*o*/, uint32_t tableIndex) const {
+ IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const {
DCHECK_LT(tableIndex, 65536U);
uint32_t serialChunk = slot_data_[tableIndex].serial;
uint32_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
@@ -347,7 +349,7 @@
* We advance the serial number, invalidating any outstanding references to
* this slot.
*/
- void UpdateSlotAdd(const Object* obj, int slot) {
+ void UpdateSlotAdd(const mirror::Object* obj, int slot) {
if (slot_data_ != NULL) {
IndirectRefSlot* pSlot = &slot_data_[slot];
pSlot->serial++;
@@ -363,7 +365,7 @@
IRTSegmentState segment_state_;
/* bottom of the stack */
- const Object** table_;
+ const mirror::Object** table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
/* extended debugging info */
diff --git a/src/indirect_reference_table_test.cc b/src/indirect_reference_table_test.cc
index b5a05ec..bd2890c 100644
--- a/src/indirect_reference_table_test.cc
+++ b/src/indirect_reference_table_test.cc
@@ -47,15 +47,15 @@
static const size_t kTableMax = 20;
IndirectReferenceTable irt(kTableInitial, kTableMax, kGlobal);
- Class* c = class_linker_->FindSystemClass("Ljava/lang/Object;");
+ mirror::Class* c = class_linker_->FindSystemClass("Ljava/lang/Object;");
ASSERT_TRUE(c != NULL);
- Object* obj0 = c->AllocObject(soa.Self());
+ mirror::Object* obj0 = c->AllocObject(soa.Self());
ASSERT_TRUE(obj0 != NULL);
- Object* obj1 = c->AllocObject(soa.Self());
+ mirror::Object* obj1 = c->AllocObject(soa.Self());
ASSERT_TRUE(obj1 != NULL);
- Object* obj2 = c->AllocObject(soa.Self());
+ mirror::Object* obj2 = c->AllocObject(soa.Self());
ASSERT_TRUE(obj2 != NULL);
- Object* obj3 = c->AllocObject(soa.Self());
+ mirror::Object* obj3 = c->AllocObject(soa.Self());
ASSERT_TRUE(obj3 != NULL);
const uint32_t cookie = IRT_FIRST_SEGMENT;
diff --git a/src/instrumentation.cc b/src/instrumentation.cc
index 065758d..e3d4d28 100644
--- a/src/instrumentation.cc
+++ b/src/instrumentation.cc
@@ -21,7 +21,10 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "debugger.h"
-#include "dex_cache.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
#if !defined(ART_USE_LLVM_COMPILER)
#include "oat/runtime/oat_support_entrypoints.h"
#endif
@@ -34,18 +37,18 @@
namespace art {
-static bool InstallStubsClassVisitor(Class* klass, void*)
+static bool InstallStubsClassVisitor(mirror::Class* klass, void*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- AbstractMethod* method = klass->GetDirectMethod(i);
+ mirror::AbstractMethod* method = klass->GetDirectMethod(i);
if (instrumentation->GetSavedCodeFromMap(method) == NULL) {
instrumentation->SaveAndUpdateCode(method);
}
}
for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- AbstractMethod* method = klass->GetVirtualMethod(i);
+ mirror::AbstractMethod* method = klass->GetVirtualMethod(i);
if (instrumentation->GetSavedCodeFromMap(method) == NULL) {
instrumentation->SaveAndUpdateCode(method);
}
@@ -53,18 +56,18 @@
return true;
}
-static bool UninstallStubsClassVisitor(Class* klass, void*)
+static bool UninstallStubsClassVisitor(mirror::Class* klass, void*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- AbstractMethod* method = klass->GetDirectMethod(i);
+ mirror::AbstractMethod* method = klass->GetDirectMethod(i);
if (instrumentation->GetSavedCodeFromMap(method) != NULL) {
instrumentation->ResetSavedCode(method);
}
}
for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- AbstractMethod* method = klass->GetVirtualMethod(i);
+ mirror::AbstractMethod* method = klass->GetVirtualMethod(i);
if (instrumentation->GetSavedCodeFromMap(method) != NULL) {
instrumentation->ResetSavedCode(method);
}
@@ -83,7 +86,7 @@
if (GetCurrentQuickFrame() == NULL) {
return true; // Ignore shadow frames.
}
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
if (m == NULL) {
return true; // Ignore upcalls.
}
@@ -125,7 +128,7 @@
if (self_->IsInstrumentationStackEmpty()) {
return false; // Stop.
}
- AbstractMethod* m = GetMethod();
+ mirror::AbstractMethod* m = GetMethod();
if (m == NULL) {
return true; // Ignore upcalls.
}
@@ -171,16 +174,16 @@
Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, NULL);
}
-void Instrumentation::AddSavedCodeToMap(const AbstractMethod* method, const void* code) {
+void Instrumentation::AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code) {
saved_code_map_.Put(method, code);
}
-void Instrumentation::RemoveSavedCodeFromMap(const AbstractMethod* method) {
+void Instrumentation::RemoveSavedCodeFromMap(const mirror::AbstractMethod* method) {
saved_code_map_.erase(method);
}
-const void* Instrumentation::GetSavedCodeFromMap(const AbstractMethod* method) {
- typedef SafeMap<const AbstractMethod*, const void*>::const_iterator It; // TODO: C++0x auto
+const void* Instrumentation::GetSavedCodeFromMap(const mirror::AbstractMethod* method) {
+ typedef SafeMap<const mirror::AbstractMethod*, const void*>::const_iterator It; // TODO: C++0x auto
It it = saved_code_map_.find(method);
if (it == saved_code_map_.end()) {
return NULL;
@@ -189,7 +192,7 @@
}
}
-void Instrumentation::SaveAndUpdateCode(AbstractMethod* method) {
+void Instrumentation::SaveAndUpdateCode(mirror::AbstractMethod* method) {
#if defined(ART_USE_LLVM_COMPILER)
UNUSED(method);
UNIMPLEMENTED(FATAL);
@@ -201,7 +204,7 @@
#endif
}
-void Instrumentation::ResetSavedCode(AbstractMethod* method) {
+void Instrumentation::ResetSavedCode(mirror::AbstractMethod* method) {
CHECK(GetSavedCodeFromMap(method) != NULL);
method->SetCode(GetSavedCodeFromMap(method));
RemoveSavedCodeFromMap(method);
@@ -223,7 +226,7 @@
uint32_t InstrumentationMethodUnwindFromCode(Thread* self) {
Trace* trace = Runtime::Current()->GetInstrumentation()->GetTrace();
InstrumentationStackFrame instrumentation_frame = self->PopInstrumentationStackFrame();
- AbstractMethod* method = instrumentation_frame.method_;
+ mirror::AbstractMethod* method = instrumentation_frame.method_;
uint32_t lr = instrumentation_frame.return_pc_;
trace->LogMethodTraceEvent(self, method, Trace::kMethodTraceUnwind);
diff --git a/src/instrumentation.h b/src/instrumentation.h
index 00060ce..fb49bf8 100644
--- a/src/instrumentation.h
+++ b/src/instrumentation.h
@@ -17,29 +17,27 @@
#ifndef ART_SRC_INSTRUMENTATION_H_
#define ART_SRC_INSTRUMENTATION_H_
-#include <ostream>
-#include <set>
-#include <string>
-
#include "base/macros.h"
-#include "globals.h"
#include "safe_map.h"
-#include "trace.h"
-#include "UniquePtr.h"
+
+#include <stdint.h>
namespace art {
+namespace mirror {
class AbstractMethod;
+}
class Thread;
+class Trace;
uint32_t InstrumentationMethodUnwindFromCode(Thread* self);
struct InstrumentationStackFrame {
InstrumentationStackFrame() : method_(NULL), return_pc_(0), frame_id_(0) {}
- InstrumentationStackFrame(AbstractMethod* method, uintptr_t return_pc, size_t frame_id)
+ InstrumentationStackFrame(mirror::AbstractMethod* method, uintptr_t return_pc, size_t frame_id)
: method_(method), return_pc_(return_pc), frame_id_(frame_id) {
}
- AbstractMethod* method_;
+ mirror::AbstractMethod* method_;
uintptr_t return_pc_;
size_t frame_id_;
};
@@ -55,20 +53,20 @@
// Restores original code for each method and fixes the return values of each thread's stack.
void UninstallStubs() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- const void* GetSavedCodeFromMap(const AbstractMethod* method);
- void SaveAndUpdateCode(AbstractMethod* method);
- void ResetSavedCode(AbstractMethod* method);
+ const void* GetSavedCodeFromMap(const mirror::AbstractMethod* method);
+ void SaveAndUpdateCode(mirror::AbstractMethod* method);
+ void ResetSavedCode(mirror::AbstractMethod* method);
Trace* GetTrace() const;
void SetTrace(Trace* trace);
void RemoveTrace();
private:
- void AddSavedCodeToMap(const AbstractMethod* method, const void* code);
- void RemoveSavedCodeFromMap(const AbstractMethod* method);
+ void AddSavedCodeToMap(const mirror::AbstractMethod* method, const void* code);
+ void RemoveSavedCodeFromMap(const mirror::AbstractMethod* method);
// Maps a method to its original code pointer.
- SafeMap<const AbstractMethod*, const void*> saved_code_map_;
+ SafeMap<const mirror::AbstractMethod*, const void*> saved_code_map_;
Trace* trace_;
diff --git a/src/intern_table.cc b/src/intern_table.cc
index 817ce1e..fa3c075 100644
--- a/src/intern_table.cc
+++ b/src/intern_table.cc
@@ -16,6 +16,8 @@
#include "intern_table.h"
+#include "mirror/string.h"
+#include "thread.h"
#include "UniquePtr.h"
#include "utf.h"
@@ -36,7 +38,7 @@
<< image_strong_interns_.size() << " image strong\n";
}
-void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+void InternTable::VisitRoots(RootVisitor* visitor, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
@@ -46,11 +48,11 @@
// Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
}
-String* InternTable::Lookup(Table& table, String* s, uint32_t hash_code) {
+mirror::String* InternTable::Lookup(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
- String* existing_string = it->second;
+ mirror::String* existing_string = it->second;
if (existing_string->Equals(s)) {
return existing_string;
}
@@ -58,18 +60,18 @@
return NULL;
}
-String* InternTable::Insert(Table& table, String* s, uint32_t hash_code) {
+mirror::String* InternTable::Insert(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
table.insert(std::make_pair(hash_code, s));
return s;
}
-void InternTable::RegisterStrong(String* s) {
+void InternTable::RegisterStrong(mirror::String* s) {
MutexLock mu(Thread::Current(), intern_table_lock_);
Insert(image_strong_interns_, s, s->GetHashCode());
}
-void InternTable::Remove(Table& table, const String* s, uint32_t hash_code) {
+void InternTable::Remove(Table& table, const mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
typedef Table::iterator It; // TODO: C++0x auto
for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
@@ -80,7 +82,7 @@
}
}
-String* InternTable::Insert(String* s, bool is_strong) {
+mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
MutexLock mu(Thread::Current(), intern_table_lock_);
DCHECK(s != NULL);
@@ -88,12 +90,12 @@
if (is_strong) {
// Check the strong table for a match.
- String* strong = Lookup(strong_interns_, s, hash_code);
+ mirror::String* strong = Lookup(strong_interns_, s, hash_code);
if (strong != NULL) {
return strong;
}
// Check the image table for a match.
- String* image = Lookup(image_strong_interns_, s, hash_code);
+ mirror::String* image = Lookup(image_strong_interns_, s, hash_code);
if (image != NULL) {
return image;
}
@@ -102,7 +104,7 @@
Dirty();
// There is no match in the strong table, check the weak table.
- String* weak = Lookup(weak_interns_, s, hash_code);
+ mirror::String* weak = Lookup(weak_interns_, s, hash_code);
if (weak != NULL) {
// A match was found in the weak table. Promote to the strong table.
Remove(weak_interns_, weak, hash_code);
@@ -114,17 +116,17 @@
}
// Check the strong table for a match.
- String* strong = Lookup(strong_interns_, s, hash_code);
+ mirror::String* strong = Lookup(strong_interns_, s, hash_code);
if (strong != NULL) {
return strong;
}
// Check the image table for a match.
- String* image = Lookup(image_strong_interns_, s, hash_code);
+ mirror::String* image = Lookup(image_strong_interns_, s, hash_code);
if (image != NULL) {
return image;
}
// Check the weak table for a match.
- String* weak = Lookup(weak_interns_, s, hash_code);
+ mirror::String* weak = Lookup(weak_interns_, s, hash_code);
if (weak != NULL) {
return weak;
}
@@ -132,39 +134,39 @@
return Insert(weak_interns_, s, hash_code);
}
-String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
- return InternStrong(String::AllocFromModifiedUtf8(Thread::Current(), utf16_length, utf8_data));
+mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
+ return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf16_length, utf8_data));
}
-String* InternTable::InternStrong(const char* utf8_data) {
- return InternStrong(String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
+mirror::String* InternTable::InternStrong(const char* utf8_data) {
+ return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
-String* InternTable::InternStrong(String* s) {
+mirror::String* InternTable::InternStrong(mirror::String* s) {
if (s == NULL) {
return NULL;
}
return Insert(s, true);
}
-String* InternTable::InternWeak(String* s) {
+mirror::String* InternTable::InternWeak(mirror::String* s) {
if (s == NULL) {
return NULL;
}
return Insert(s, false);
}
-bool InternTable::ContainsWeak(String* s) {
+bool InternTable::ContainsWeak(mirror::String* s) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- const String* found = Lookup(weak_interns_, s, s->GetHashCode());
+ const mirror::String* found = Lookup(weak_interns_, s, s->GetHashCode());
return found == s;
}
-void InternTable::SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg) {
+void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
typedef Table::iterator It; // TODO: C++0x auto
for (It it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
- Object* object = it->second;
+ mirror::Object* object = it->second;
if (!is_marked(object, arg)) {
weak_interns_.erase(it++);
} else {
diff --git a/src/intern_table.h b/src/intern_table.h
index 06a2b89..3018317 100644
--- a/src/intern_table.h
+++ b/src/intern_table.h
@@ -17,14 +17,15 @@
#ifndef ART_SRC_INTERN_TABLE_H_
#define ART_SRC_INTERN_TABLE_H_
-#include <iosfwd>
-
#include "base/mutex.h"
-#include "heap.h"
-#include "object.h"
-#include "safe_map.h"
+#include "root_visitor.h"
+
+#include <map>
namespace art {
+namespace mirror {
+class String;
+} // namespace mirror
/**
* Used to intern strings.
@@ -41,31 +42,31 @@
InternTable();
// Interns a potentially new string in the 'strong' table. (See above.)
- String* InternStrong(int32_t utf16_length, const char* utf8_data)
+ mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. (See above.)
- String* InternStrong(const char* utf8_data)
+ mirror::String* InternStrong(const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. (See above.)
- String* InternStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Interns a potentially new string in the 'weak' table. (See above.)
- String* InternWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Register a String trusting that it is safe to intern.
// Used when reinitializing InternTable from an image.
- void RegisterStrong(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RegisterStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(Heap::IsMarkedTester is_marked, void* arg)
+ void SweepInternTableWeaks(IsMarkedTester is_marked, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- bool ContainsWeak(String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t Size() const;
- void VisitRoots(Heap::RootVisitor* visitor, void* arg);
+ void VisitRoots(RootVisitor* visitor, void* arg);
void DumpForSigQuit(std::ostream& os) const;
@@ -75,15 +76,15 @@
}
private:
- typedef std::multimap<int32_t, String*> Table;
+ typedef std::multimap<int32_t, mirror::String*> Table;
- String* Insert(String* s, bool is_strong)
+ mirror::String* Insert(mirror::String* s, bool is_strong)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- String* Lookup(Table& table, String* s, uint32_t hash_code)
+ mirror::String* Lookup(Table& table, mirror::String* s, uint32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- String* Insert(Table& table, String* s, uint32_t hash_code);
- void Remove(Table& table, const String* s, uint32_t hash_code);
+ mirror::String* Insert(Table& table, mirror::String* s, uint32_t hash_code);
+ void Remove(Table& table, const mirror::String* s, uint32_t hash_code);
mutable Mutex intern_table_lock_;
bool is_dirty_;
diff --git a/src/intern_table_test.cc b/src/intern_table_test.cc
index ee9165e..f6b040d 100644
--- a/src/intern_table_test.cc
+++ b/src/intern_table_test.cc
@@ -17,7 +17,7 @@
#include "intern_table.h"
#include "common_test.h"
-#include "object.h"
+#include "mirror/object.h"
#include "sirt_ref.h"
namespace art {
@@ -27,10 +27,10 @@
TEST_F(InternTableTest, Intern) {
ScopedObjectAccess soa(Thread::Current());
InternTable intern_table;
- SirtRef<String> foo_1(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<String> foo_2(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<String> foo_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<String> bar(soa.Self(), intern_table.InternStrong(3, "bar"));
+ SirtRef<mirror::String> foo_1(soa.Self(), intern_table.InternStrong(3, "foo"));
+ SirtRef<mirror::String> foo_2(soa.Self(), intern_table.InternStrong(3, "foo"));
+ SirtRef<mirror::String> foo_3(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> bar(soa.Self(), intern_table.InternStrong(3, "bar"));
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
@@ -47,7 +47,7 @@
InternTable t;
EXPECT_EQ(0U, t.Size());
t.InternStrong(3, "foo");
- SirtRef<String> foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
t.InternWeak(foo.get());
EXPECT_EQ(1U, t.Size());
t.InternStrong(3, "bar");
@@ -56,9 +56,9 @@
class TestPredicate {
public:
- bool IsMarked(const Object* s) const {
+ bool IsMarked(const mirror::Object* s) const {
bool erased = false;
- typedef std::vector<const String*>::iterator It; // TODO: C++0x auto
+ typedef std::vector<const mirror::String*>::iterator It; // TODO: C++0x auto
for (It it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
expected_.erase(it);
@@ -70,7 +70,7 @@
return false;
}
- void Expect(const String* s) {
+ void Expect(const mirror::String* s) {
expected_.push_back(s);
}
@@ -79,10 +79,10 @@
}
private:
- mutable std::vector<const String*> expected_;
+ mutable std::vector<const mirror::String*> expected_;
};
-bool IsMarked(const Object* object, void* arg) {
+bool IsMarked(const mirror::Object* object, void* arg) {
return reinterpret_cast<TestPredicate*>(arg)->IsMarked(object);
}
@@ -91,10 +91,12 @@
InternTable t;
t.InternStrong(3, "foo");
t.InternStrong(3, "bar");
- SirtRef<String> hello(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "hello"));
- SirtRef<String> world(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "world"));
- SirtRef<String> s0(soa.Self(), t.InternWeak(hello.get()));
- SirtRef<String> s1(soa.Self(), t.InternWeak(world.get()));
+ SirtRef<mirror::String> hello(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello"));
+ SirtRef<mirror::String> world(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "world"));
+ SirtRef<mirror::String> s0(soa.Self(), t.InternWeak(hello.get()));
+ SirtRef<mirror::String> s1(soa.Self(), t.InternWeak(world.get()));
EXPECT_EQ(4U, t.Size());
@@ -110,7 +112,8 @@
EXPECT_EQ(2U, t.Size());
// Just check that we didn't corrupt the map.
- SirtRef<String> still_here(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "still here"));
+ SirtRef<mirror::String> still_here(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here"));
t.InternWeak(still_here.get());
EXPECT_EQ(3U, t.Size());
}
@@ -120,9 +123,9 @@
{
// Strongs are never weak.
InternTable t;
- SirtRef<String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
+ SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
+ SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
}
@@ -130,11 +133,13 @@
{
// Weaks are always weak.
InternTable t;
- SirtRef<String> foo_1(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<String> foo_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> foo_1(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> foo_2(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
EXPECT_NE(foo_1.get(), foo_2.get());
- SirtRef<String> interned_foo_1(soa.Self(), t.InternWeak(foo_1.get()));
- SirtRef<String> interned_foo_2(soa.Self(), t.InternWeak(foo_2.get()));
+ SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo_1.get()));
+ SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo_2.get()));
EXPECT_TRUE(t.ContainsWeak(interned_foo_2.get()));
EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
}
@@ -142,10 +147,10 @@
{
// A weak can be promoted to a strong.
InternTable t;
- SirtRef<String> foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<String> interned_foo_1(soa.Self(), t.InternWeak(foo.get()));
+ SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo.get()));
EXPECT_TRUE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
+ SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
}
@@ -153,10 +158,11 @@
{
// Interning a weak after a strong gets you the strong.
InternTable t;
- SirtRef<String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
+ SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<String> foo(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<String> interned_foo_2(soa.Self(), t.InternWeak(foo.get()));
+ SirtRef<mirror::String> foo(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
+ SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo.get()));
EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
}
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 820348e..65729c9 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -19,18 +19,28 @@
#include <math.h>
#include "base/logging.h"
+#include "class_linker-inl.h"
#include "common_throws.h"
#include "debugger.h"
#include "dex_instruction.h"
+#include "gc/card_table-inl.h"
#include "invoke_arg_array_builder.h"
#include "nth_caller_visitor.h"
-#include "object.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "runtime_support.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
+using namespace art::mirror;
+
namespace art {
namespace interpreter {
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index 6990458..eee13dc 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -21,18 +21,20 @@
#include "locks.h"
namespace art {
-
+namespace mirror {
class AbstractMethod;
+class Object;
+} // namespace mirror
+
union JValue;
class MethodHelper;
-class Object;
class ShadowFrame;
class Thread;
namespace interpreter {
-extern void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* receiver,
- JValue* args, JValue* result)
+extern void EnterInterpreterFromInvoke(Thread* self, mirror::AbstractMethod* method,
+ mirror::Object* receiver, JValue* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern JValue EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame& shadow_frame,
diff --git a/src/invoke_arg_array_builder.h b/src/invoke_arg_array_builder.h
index 16eedfe..19c42ac 100644
--- a/src/invoke_arg_array_builder.h
+++ b/src/invoke_arg_array_builder.h
@@ -17,7 +17,7 @@
#ifndef ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_
#define ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_
-#include "object.h"
+#include "mirror/object.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -31,7 +31,7 @@
} else if (ch == 'L') {
// Argument is a reference or an array. The shorty descriptor
// does not distinguish between these types.
- num_bytes += sizeof(Object*);
+ num_bytes += sizeof(mirror::Object*);
} else {
num_bytes += 4;
}
@@ -78,7 +78,7 @@
arg_array_[offset].SetF(va_arg(ap, jdouble));
break;
case 'L':
- arg_array_[offset].SetL(soa.Decode<Object*>(va_arg(ap, jobject)));
+ arg_array_[offset].SetL(soa.Decode<mirror::Object*>(va_arg(ap, jobject)));
break;
case 'D':
arg_array_[offset].SetD(va_arg(ap, jdouble));
@@ -113,7 +113,7 @@
arg_array_[offset].SetF(args[offset].f);
break;
case 'L':
- arg_array_[offset].SetL(soa.Decode<Object*>(args[offset].l));
+ arg_array_[offset].SetL(soa.Decode<mirror::Object*>(args[offset].l));
break;
case 'D':
arg_array_[offset].SetD(args[offset].d);
diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h
index 6cac0f6..71bae08 100644
--- a/src/jdwp/jdwp.h
+++ b/src/jdwp/jdwp.h
@@ -30,8 +30,9 @@
struct iovec;
namespace art {
-
+namespace mirror {
class AbstractMethod;
+} // namespace mirror
class Thread;
namespace JDWP {
diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc
index ba2d8d2..71e91d4 100644
--- a/src/jdwp/jdwp_event.cc
+++ b/src/jdwp/jdwp_event.cc
@@ -28,6 +28,7 @@
#include "jdwp/jdwp_expand_buf.h"
#include "jdwp/jdwp_handler.h"
#include "jdwp/jdwp_priv.h"
+#include "thread.h"
/*
General notes:
diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc
index cb13695..aa5a8a0 100644
--- a/src/jdwp/jdwp_handler.cc
+++ b/src/jdwp/jdwp_handler.cc
@@ -42,6 +42,9 @@
#include "jdwp/jdwp_event.h"
#include "jdwp/jdwp_expand_buf.h"
#include "jdwp/jdwp_priv.h"
+#include "runtime.h"
+#include "thread.h"
+#include "UniquePtr.h"
namespace art {
diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc
index 16976d4..4ed7898 100644
--- a/src/jni_compiler_test.cc
+++ b/src/jni_compiler_test.cc
@@ -21,6 +21,11 @@
#include "in