Merge "ART: Fix class-linker handling" into lmp-dev
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 5ddafb4..e57c0c0 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -17,14 +17,8 @@
 #ifndef ART_RUNTIME_ATOMIC_H_
 #define ART_RUNTIME_ATOMIC_H_
 
-#ifdef __clang__
-#define ART_HAVE_STDATOMIC 1
-#endif
-
 #include <stdint.h>
-#if ART_HAVE_STDATOMIC
 #include <atomic>
-#endif
 #include <limits>
 #include <vector>
 
@@ -157,8 +151,6 @@
     return kNeedSwapMutexes;
   }
 
-  #if ART_HAVE_STDATOMIC
-
   static void ThreadFenceAcquire() {
     std::atomic_thread_fence(std::memory_order_acquire);
   }
@@ -179,66 +171,6 @@
     std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
-  #else
-
-  static void ThreadFenceAcquire() {
-  #if defined(__arm__) || defined(__aarch64__)
-    __asm__ __volatile__("dmb ish" : : : "memory");
-    // Could possibly use dmb ishld on aarch64
-    // But currently we also use this on volatile loads
-    // to enforce store atomicity.  Ishld is
-    // insufficient for that purpose.
-  #elif defined(__i386__) || defined(__x86_64__)
-    __asm__ __volatile__("" : : : "memory");
-  #elif defined(__mips__)
-    __asm__ __volatile__("sync" : : : "memory");
-  #else
-  #error Unexpected architecture
-  #endif
-  }
-
-  static void ThreadFenceRelease() {
-  #if defined(__arm__) || defined(__aarch64__)
-    __asm__ __volatile__("dmb ish" : : : "memory");
-    // ishst doesn't order load followed by store.
-  #elif defined(__i386__) || defined(__x86_64__)
-    __asm__ __volatile__("" : : : "memory");
-  #elif defined(__mips__)
-    __asm__ __volatile__("sync" : : : "memory");
-  #else
-  #error Unexpected architecture
-  #endif
-  }
-
-  // Fence at the end of a constructor with final fields
-  // or allocation.  We believe this
-  // only has to order stores, and can thus be weaker than
-  // release on aarch64.
-  static void ThreadFenceForConstructor() {
-  #if defined(__arm__) || defined(__aarch64__)
-    __asm__ __volatile__("dmb ishst" : : : "memory");
-  #elif defined(__i386__) || defined(__x86_64__)
-    __asm__ __volatile__("" : : : "memory");
-  #elif defined(__mips__)
-    __asm__ __volatile__("sync" : : : "memory");
-  #else
-  #error Unexpected architecture
-  #endif
-  }
-
-  static void ThreadFenceSequentiallyConsistent() {
-  #if defined(__arm__) || defined(__aarch64__)
-    __asm__ __volatile__("dmb ish" : : : "memory");
-  #elif defined(__i386__) || defined(__x86_64__)
-    __asm__ __volatile__("mfence" : : : "memory");
-  #elif defined(__mips__)
-    __asm__ __volatile__("sync" : : : "memory");
-  #else
-  #error Unexpected architecture
-  #endif
-  }
-  #endif
-
  private:
   static Mutex* GetSwapMutex(const volatile int64_t* addr);
   static int64_t SwapMutexRead64(volatile const int64_t* addr);
@@ -252,11 +184,10 @@
   DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
 };
 
-#if ART_HAVE_STDATOMIC
 template<typename T>
-class Atomic : public std::atomic<T> {
+class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
  public:
-  Atomic<T>() : std::atomic<T>() { }
+  Atomic<T>() : std::atomic<T>(0) { }
 
   explicit Atomic<T>(T value) : std::atomic<T>(value) { }
 
@@ -360,292 +291,20 @@
   }
 };
 
-#else
-
-template<typename T> class Atomic;
-
-// Helper class for Atomic to deal separately with size 8 and small
-// objects.  Should not be used directly.
-
-template<int SZ, class T> struct AtomicHelper {
-  friend class Atomic<T>;
-
- private:
-  COMPILE_ASSERT(sizeof(T) <= 4, bad_atomic_helper_arg);
-
-  static T LoadRelaxed(const volatile T* loc) {
-    // sizeof(T) <= 4
-    return *loc;
-  }
-
-  static void StoreRelaxed(volatile T* loc, T desired) {
-    // sizeof(T) <= 4
-    *loc = desired;
-  }
-
-  static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
-                                                  T expected_value, T desired_value) {
-    // sizeof(T) <= 4
-    return __sync_bool_compare_and_swap(loc, expected_value, desired_value);
-  }
-};
-
-// Interpret the bit pattern of input (type U) as type V. Requires the size
-// of V >= size of U (compile-time checked).
-// Reproduced here from utils.h to keep dependencies small.
-template<typename U, typename V>
-static inline V bit_cast_atomic(U in) {
-  COMPILE_ASSERT(sizeof(U) == sizeof(V), size_of_u_not_eq_size_of_v);
-  union {
-    U u;
-    V v;
-  } tmp;
-  tmp.u = in;
-  return tmp.v;
-}
-
-template<class T> struct AtomicHelper<8, T> {
-  friend class Atomic<T>;
-
- private:
-  COMPILE_ASSERT(sizeof(T) == 8, bad_large_atomic_helper_arg);
-
-  static T LoadRelaxed(const volatile T* loc) {
-    // sizeof(T) == 8
-    volatile const int64_t* loc_ptr =
-              reinterpret_cast<volatile const int64_t*>(loc);
-    return bit_cast_atomic<int64_t, T>(QuasiAtomic::Read64(loc_ptr));
-  }
-
-  static void StoreRelaxed(volatile T* loc, T desired) {
-    // sizeof(T) == 8
-    volatile int64_t* loc_ptr =
-                reinterpret_cast<volatile int64_t*>(loc);
-    QuasiAtomic::Write64(loc_ptr, bit_cast_atomic<T, int64_t>(desired));
-  }
-
-
-  static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
-                                                  T expected_value, T desired_value) {
-    // sizeof(T) == 8
-    volatile int64_t* loc_ptr = reinterpret_cast<volatile int64_t*>(loc);
-    return QuasiAtomic::Cas64(bit_cast_atomic<T, int64_t>(expected_value),
-                              bit_cast_atomic<T, int64_t>(desired_value),
-                              loc_ptr);
-  }
-};
-
-template<typename T>
-class PACKED(sizeof(T)) Atomic {
- private:
-  COMPILE_ASSERT(sizeof(T) <= 4 || sizeof(T) == 8, bad_atomic_arg);
-
- public:
-  Atomic<T>() : value_(0) { }
-
-  explicit Atomic<T>(T value) : value_(value) { }
-
-  // Load from memory without ordering or synchronization constraints.
-  T LoadRelaxed() const {
-    return AtomicHelper<sizeof(T), T>::LoadRelaxed(&value_);
-  }
-
-  // Word tearing allowed, but may race.
-  T LoadJavaData() const {
-    return value_;
-  }
-
-  // Load from memory with a total ordering.
-  T LoadSequentiallyConsistent() const;
-
-  // Store to memory without ordering or synchronization constraints.
-  void StoreRelaxed(T desired) {
-    AtomicHelper<sizeof(T), T>::StoreRelaxed(&value_, desired);
-  }
-
-  // Word tearing allowed, but may race.
-  void StoreJavaData(T desired) {
-    value_ = desired;
-  }
-
-  // Store to memory with release ordering.
-  void StoreRelease(T desired);
-
-  // Store to memory with a total ordering.
-  void StoreSequentiallyConsistent(T desired);
-
-  // Atomically replace the value with desired value if it matches the expected value.
-  // Participates in total ordering of atomic operations.
-  bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
-    return AtomicHelper<sizeof(T), T>::
-        CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
-  }
-
-  // The same, but may fail spuriously.
-  bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
-    // TODO: Take advantage of the fact that it may fail spuriously.
-    return AtomicHelper<sizeof(T), T>::
-        CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
-  }
-
-  // Atomically replace the value with desired value if it matches the expected value. Doesn't
-  // imply ordering or synchronization constraints.
-  bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
-    // TODO: make this relaxed.
-    return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
-  }
-
-  // The same, but may fail spuriously.
-  bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
-    // TODO: Take advantage of the fact that it may fail spuriously.
-    // TODO: make this relaxed.
-    return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
-  }
-
-  // Atomically replace the value with desired value if it matches the expected value. Prior accesses
-  // made to other memory locations by the thread that did the release become visible in this
-  // thread.
-  bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
-    // TODO: make this acquire.
-    return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
-  }
-
-  // Atomically replace the value with desired value if it matches the expected value. Prior accesses
-  // to other memory locations become visible to the threads that do a consume or an acquire on the
-  // same location.
-  bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
-    // TODO: make this release.
-    return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
-  }
-
-  volatile T* Address() {
-    return &value_;
-  }
-
-  T FetchAndAddSequentiallyConsistent(const T value) {
-    if (sizeof(T) <= 4) {
-      return __sync_fetch_and_add(&value_, value);  // Return old value.
-    } else {
-      T expected;
-      do {
-        expected = LoadRelaxed();
-      } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected + value));
-      return expected;
-    }
-  }
-
-  T FetchAndSubSequentiallyConsistent(const T value) {
-    if (sizeof(T) <= 4) {
-      return __sync_fetch_and_sub(&value_, value);  // Return old value.
-    } else {
-      return FetchAndAddSequentiallyConsistent(-value);
-    }
-  }
-
-  T FetchAndOrSequentiallyConsistent(const T value) {
-    if (sizeof(T) <= 4) {
-      return __sync_fetch_and_or(&value_, value);  // Return old value.
-    } else {
-      T expected;
-      do {
-        expected = LoadRelaxed();
-      } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected | value));
-      return expected;
-    }
-  }
-
-  T FetchAndAndSequentiallyConsistent(const T value) {
-    if (sizeof(T) <= 4) {
-      return __sync_fetch_and_and(&value_, value);  // Return old value.
-    } else {
-      T expected;
-      do {
-        expected = LoadRelaxed();
-      } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected & value));
-      return expected;
-    }
-  }
-
-  T operator++() {  // Prefix operator.
-    if (sizeof(T) <= 4) {
-      return __sync_add_and_fetch(&value_, 1);  // Return new value.
-    } else {
-      return FetchAndAddSequentiallyConsistent(1) + 1;
-    }
-  }
-
-  T operator++(int) {  // Postfix operator.
-    return FetchAndAddSequentiallyConsistent(1);
-  }
-
-  T operator--() {  // Prefix operator.
-    if (sizeof(T) <= 4) {
-      return __sync_sub_and_fetch(&value_, 1);  // Return new value.
-    } else {
-      return FetchAndSubSequentiallyConsistent(1) - 1;
-    }
-  }
-
-  T operator--(int) {  // Postfix operator.
-    return FetchAndSubSequentiallyConsistent(1);
-  }
-
-  static T MaxValue() {
-    return std::numeric_limits<T>::max();
-  }
-
-
- private:
-  volatile T value_;
-};
-#endif
-
 typedef Atomic<int32_t> AtomicInteger;
 
 COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size);
 COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t),
                atomic_int_alignment_differs_from_that_of_underlying_type);
 COMPILE_ASSERT(sizeof(Atomic<int64_t>) == sizeof(int64_t), weird_atomic_int64_size);
+
+// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
+// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
+// aligned.
 #if defined(__LP64__)
   COMPILE_ASSERT(alignof(Atomic<int64_t>) == alignof(int64_t),
                  atomic_int64_alignment_differs_from_that_of_underlying_type);
 #endif
-// The above fails on x86-32.
-// This is OK, since we explicitly arrange for alignment of 8-byte fields.
-
-
-#if !ART_HAVE_STDATOMIC
-template<typename T>
-inline T Atomic<T>::LoadSequentiallyConsistent() const {
-  T result = value_;
-  if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
-    QuasiAtomic::ThreadFenceAcquire();
-    // We optimistically assume this suffices for store atomicity.
-    // On ARMv8 we strengthen ThreadFenceAcquire to make that true.
-  }
-  return result;
-}
-
-template<typename T>
-inline void Atomic<T>::StoreRelease(T desired) {
-  if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
-    QuasiAtomic::ThreadFenceRelease();
-  }
-  StoreRelaxed(desired);
-}
-
-template<typename T>
-inline void Atomic<T>::StoreSequentiallyConsistent(T desired) {
-  if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
-    QuasiAtomic::ThreadFenceRelease();
-  }
-  StoreRelaxed(desired);
-  if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
-    QuasiAtomic::ThreadFenceSequentiallyConsistent();
-  }
-}
-
-#endif
 
 }  // namespace art
 
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f158463..e3341ce 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -448,7 +448,7 @@
     if (m != nullptr) {
       shorty = m->GetShorty(&len);
     }
-    return NativeBridge::GetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+    return NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
   }
 
   void VisitRoots(RootCallback* visitor, void* arg) {
@@ -3309,8 +3309,8 @@
   void* handle = dlopen(path_str, RTLD_LAZY);
   bool needs_native_bridge = false;
   if (handle == nullptr) {
-    if (NativeBridge::IsSupported(path_str)) {
-      handle = NativeBridge::LoadLibrary(path_str, RTLD_LAZY);
+    if (NativeBridgeIsSupported(path_str)) {
+      handle = NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
       needs_native_bridge = true;
     }
   }
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 5f718ba..047e9f6 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -76,6 +76,10 @@
   ScopedFastNativeObjectAccess soa(env);
   NthCallerVisitor visitor(soa.Self(), 2);
   visitor.WalkStack();
+  if (UNLIKELY(visitor.caller == nullptr)) {
+    // The caller is an attached native thread.
+    return nullptr;
+  }
   return soa.AddLocalReference<jobject>(visitor.caller->GetDeclaringClass()->GetClassLoader());
 }
 
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
index ad26ee4..d0b516b 100644
--- a/runtime/native_bridge.cc
+++ b/runtime/native_bridge.cc
@@ -34,25 +34,20 @@
 
 namespace art {
 
-// Is native-bridge support enabled?
-static constexpr bool kNativeBridgeEnabled = true;
-
-// Default library name for native-bridge.
-static constexpr const char* kDefaultNativeBridge = "libnativebridge.so";
-
-#ifdef HAVE_ANDROID_OS
-// TODO: This will be removed once we have native-bridge command-line arguments.
-
-// Property that defines the library name of native-bridge.
-static constexpr const char* kPropNativeBridge = "persist.native.bridge";
-
-// Property that enables native-bridge.
-static constexpr const char* kPropEnableNativeBridge = "persist.enable.native.bridge";
-#endif
-
 // The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
 static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
 
+// The library name we are supposed to load.
+static std::string native_bridge_library_string = "";
+
+// Whether a native bridge is available (loaded and ready).
+static bool available = false;
+// Whether we have already initialized (or tried to).
+static bool initialized = false;
+
+struct NativeBridgeCallbacks;
+static NativeBridgeCallbacks* callbacks = nullptr;
+
 // ART interfaces to native-bridge.
 struct NativeBridgeArtCallbacks {
   // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
@@ -71,7 +66,7 @@
   //   clazz [IN] Java class object.
   // Returns:
   //   number of native methods.
-  int (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+  uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
 
   // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
   // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
@@ -83,7 +78,8 @@
   //   method_count [IN] max number of elements in methods.
   // Returns:
   //   number of method it actually wrote to methods.
-  int (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods, uint32_t method_count);
+  uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+                               uint32_t method_count);
 };
 
 // Native-bridge interfaces to ART
@@ -135,148 +131,137 @@
   return mh.GetShorty();
 }
 
-static int GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
   if (clazz == nullptr)
     return 0;
 
   ScopedObjectAccess soa(env);
   mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
 
-  size_t method_count = 0;
-  for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+  uint32_t native_method_count = 0;
+  for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
     mirror::ArtMethod* m = c->GetDirectMethod(i);
-    if (m->IsNative())
-      method_count++;
+    if (m->IsNative()) {
+      native_method_count++;
+    }
   }
-  for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+  for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
     mirror::ArtMethod* m = c->GetVirtualMethod(i);
-    if (m->IsNative())
-      method_count++;
+    if (m->IsNative()) {
+      native_method_count++;
+    }
   }
-  return method_count;
+  return native_method_count;
 }
 
-static int GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
-                            uint32_t method_count) {
-  if ((clazz == nullptr) || (methods == nullptr))
+static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+                               uint32_t method_count) {
+  if ((clazz == nullptr) || (methods == nullptr)) {
     return 0;
-
+  }
   ScopedObjectAccess soa(env);
   mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
 
-  size_t count = 0;
-  for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+  uint32_t count = 0;
+  for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
     mirror::ArtMethod* m = c->GetDirectMethod(i);
-    if (m->IsNative() && count < method_count) {
-      methods[count].name = m->GetName();
-      methods[count].signature = m->GetShorty();
-      methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
-      count++;
+    if (m->IsNative()) {
+      if (count < method_count) {
+        methods[count].name = m->GetName();
+        methods[count].signature = m->GetShorty();
+        methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+        count++;
+      } else {
+        LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+      }
     }
   }
-  for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+  for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
     mirror::ArtMethod* m = c->GetVirtualMethod(i);
-    if (m->IsNative() && count < method_count) {
-      methods[count].name = m->GetName();
-      methods[count].signature = m->GetShorty();
-      methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
-      count++;
+    if (m->IsNative()) {
+      if (count < method_count) {
+        methods[count].name = m->GetName();
+        methods[count].signature = m->GetShorty();
+        methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+        count++;
+      } else {
+        LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+      }
     }
   }
   return count;
 }
 
-NativeBridgeArtCallbacks NativeBridgeArtItf = {
+static NativeBridgeArtCallbacks NativeBridgeArtItf = {
   GetMethodShorty,
   GetNativeMethodCount,
   GetNativeMethods
 };
 
-void NativeBridge::SetNativeBridgeLibraryString(std::string& native_bridge_library_string) {
-  native_bridge_library_string_ = native_bridge_library_string;
-  // TODO: when given an empty string, set initialized_ to true and available_ to false. This
-  //       change is dependent on the property removal in Initialize().
+void SetNativeBridgeLibraryString(const std::string& nb_library_string) {
+  // This is called when the runtime starts and nothing is working concurrently
+  // so we don't need a lock here.
+
+  native_bridge_library_string = nb_library_string;
+
+  if (native_bridge_library_string.empty()) {
+    initialized = true;
+    available = false;
+  }
 }
 
-bool NativeBridge::Initialize() {
-  if (!kNativeBridgeEnabled) {
-    return false;
-  }
+static bool NativeBridgeInitialize() {
+  // TODO: Missing annotalysis static lock ordering of DEFAULT_MUTEX_ACQUIRED, place lock into
+  // global order or remove.
+  static Mutex lock("native bridge lock");
+  MutexLock mu(Thread::Current(), lock);
 
-  MutexLock mu(Thread::Current(), lock_);
-
-  if (initialized_) {
+  if (initialized) {
     // Somebody did it before.
-    return available_;
+    return available;
   }
 
-  available_ = false;
+  available = false;
 
-  const char* libnb_path;
-
-  if (!native_bridge_library_string_.empty()) {
-    libnb_path = native_bridge_library_string_.c_str();
-  } else {
-    // TODO: Remove this once the frameworks side is completely implemented.
-
-    libnb_path = kDefaultNativeBridge;
-#ifdef HAVE_ANDROID_OS
-    char prop_buf[PROP_VALUE_MAX];
-    property_get(kPropEnableNativeBridge, prop_buf, "false");
-    if (strcmp(prop_buf, "true") != 0) {
-      initialized_ = true;
-      return false;
-    }
-
-    // If prop persist.native.bridge set, overwrite the default name.
-    int name_len = property_get(kPropNativeBridge, prop_buf, kDefaultNativeBridge);
-    if (name_len > 0)
-      libnb_path = prop_buf;
-#endif
-  }
-
-  void* handle = dlopen(libnb_path, RTLD_LAZY);
+  void* handle = dlopen(native_bridge_library_string.c_str(), RTLD_LAZY);
   if (handle != nullptr) {
-    callbacks_ = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
-                                                                kNativeBridgeInterfaceSymbol));
+    callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
+                                                               kNativeBridgeInterfaceSymbol));
 
-    if (callbacks_ != nullptr) {
-      available_ = callbacks_->initialize(&NativeBridgeArtItf);
+    if (callbacks != nullptr) {
+      available = callbacks->initialize(&NativeBridgeArtItf);
     }
 
-    if (!available_) {
+    if (!available) {
       dlclose(handle);
     }
   }
 
-  initialized_ = true;
+  initialized = true;
 
-  return available_;
+  return available;
 }
 
-void* NativeBridge::LoadLibrary(const char* libpath, int flag) {
-  if (Initialize())
-    return callbacks_->loadLibrary(libpath, flag);
+void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
+  if (NativeBridgeInitialize()) {
+    return callbacks->loadLibrary(libpath, flag);
+  }
   return nullptr;
 }
 
-void* NativeBridge::GetTrampoline(void* handle, const char* name, const char* shorty,
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
                                   uint32_t len) {
-  if (Initialize())
-    return callbacks_->getTrampoline(handle, name, shorty, len);
+  if (NativeBridgeInitialize()) {
+    return callbacks->getTrampoline(handle, name, shorty, len);
+  }
   return nullptr;
 }
 
-bool NativeBridge::IsSupported(const char* libpath) {
-  if (Initialize())
-    return callbacks_->isSupported(libpath);
+bool NativeBridgeIsSupported(const char* libpath) {
+  if (NativeBridgeInitialize()) {
+    return callbacks->isSupported(libpath);
+  }
   return false;
 }
 
-bool NativeBridge::available_ = false;
-bool NativeBridge::initialized_ = false;
-Mutex NativeBridge::lock_("native bridge lock");
-std::string NativeBridge::native_bridge_library_string_ = "";
-NativeBridgeCallbacks* NativeBridge::callbacks_ = nullptr;
-
 };  // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
index 3d20fe4..be647fc 100644
--- a/runtime/native_bridge.h
+++ b/runtime/native_bridge.h
@@ -17,42 +17,22 @@
 #ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
 #define ART_RUNTIME_NATIVE_BRIDGE_H_
 
-#include "base/mutex.h"
-
 #include <string>
 
 namespace art {
 
-struct NativeBridgeCallbacks;
+// Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
+// signals that we do not want to load a native bridge.
+void SetNativeBridgeLibraryString(const std::string& native_bridge_library_string);
 
-class NativeBridge {
- public:
-  // Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
-  // signals that we do not want to load a native bridge.
-  static void SetNativeBridgeLibraryString(std::string& native_bridge_library_string);
+// Load a shared library that is supported by the native-bridge.
+void* NativeBridgeLoadLibrary(const char* libpath, int flag);
 
-  // Load a shared library that is supported by the native-bridge.
-  static void* LoadLibrary(const char* libpath, int flag);
-  // Get a native-bridge trampoline for specified native method.
-  static void* GetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
-  // True if native library is valid and is for an ABI that is supported by native-bridge.
-  static bool IsSupported(const char* libpath);
+// Get a native-bridge trampoline for specified native method.
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
 
- private:
-  static bool Initialize();
-
-  // The library name we are supposed to load.
-  static std::string native_bridge_library_string_;
-
-  // Whether we have already initialized (or tried to).
-  static bool initialized_ GUARDED_BY(lock_);
-  static Mutex lock_;
-
-  // Whether a native bridge is available (loaded and ready).
-  static bool available_;
-
-  static NativeBridgeCallbacks* callbacks_;
-};
+// True if native library is valid and is for an ABI that is supported by native-bridge.
+bool NativeBridgeIsSupported(const char* libpath);
 
 };  // namespace art
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e0c0d63..6a3673c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -708,7 +708,7 @@
   self->ClearException();
 
   // Look for a native bridge.
-  NativeBridge::SetNativeBridgeLibraryString(options->native_bridge_library_string_);
+  SetNativeBridgeLibraryString(options->native_bridge_library_string_);
 
   VLOG(startup) << "Runtime::Init exiting";
   return true;
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 554712a..9a2fbdf 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -28,162 +28,133 @@
 static JavaVM* jvm = NULL;
 
 extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
-  assert(vm != NULL);
-  assert(jvm == NULL);
+  assert(vm != nullptr);
+  assert(jvm == nullptr);
   jvm = vm;
   return JNI_VERSION_1_6;
 }
 
-static void* testFindClassOnAttachedNativeThread(void*) {
-  assert(jvm != NULL);
+static void* AttachHelper(void* arg) {
+  assert(jvm != nullptr);
 
-  JNIEnv* env = NULL;
+  JNIEnv* env = nullptr;
   JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
   int attach_result = jvm->AttachCurrentThread(&env, &args);
   assert(attach_result == 0);
 
-  jclass clazz = env->FindClass("Main");
-  assert(clazz != NULL);
-  assert(!env->ExceptionCheck());
-
-  jobjectArray array = env->NewObjectArray(0, clazz, NULL);
-  assert(array != NULL);
-  assert(!env->ExceptionCheck());
+  typedef void (*Fn)(JNIEnv*);
+  Fn fn = reinterpret_cast<Fn>(arg);
+  fn(env);
 
   int detach_result = jvm->DetachCurrentThread();
   assert(detach_result == 0);
-  return NULL;
+  return nullptr;
+}
+
+static void PthreadHelper(void (*fn)(JNIEnv*)) {
+  pthread_t pthread;
+  int pthread_create_result = pthread_create(&pthread, nullptr, AttachHelper,
+                                             reinterpret_cast<void*>(fn));
+  assert(pthread_create_result == 0);
+  int pthread_join_result = pthread_join(pthread, nullptr);
+  assert(pthread_join_result == 0);
+}
+
+static void testFindClassOnAttachedNativeThread(JNIEnv* env) {
+  jclass clazz = env->FindClass("Main");
+  assert(clazz != nullptr);
+  assert(!env->ExceptionCheck());
+
+  jobjectArray array = env->NewObjectArray(0, clazz, nullptr);
+  assert(array != nullptr);
+  assert(!env->ExceptionCheck());
 }
 
 // http://b/10994325
-extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*,
-                                                                                   jclass) {
-  pthread_t pthread;
-  int pthread_create_result = pthread_create(&pthread,
-                                             NULL,
-                                             testFindClassOnAttachedNativeThread,
-                                             NULL);
-  assert(pthread_create_result == 0);
-  int pthread_join_result = pthread_join(pthread, NULL);
-  assert(pthread_join_result == 0);
+extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*, jclass) {
+  PthreadHelper(&testFindClassOnAttachedNativeThread);
 }
 
-static void* testFindFieldOnAttachedNativeThread(void*) {
-  assert(jvm != NULL);
-
-  JNIEnv* env = NULL;
-  JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
-  int attach_result = jvm->AttachCurrentThread(&env, &args);
-  assert(attach_result == 0);
-
+static void testFindFieldOnAttachedNativeThread(JNIEnv* env) {
   jclass clazz = env->FindClass("Main");
-  assert(clazz != NULL);
+  assert(clazz != nullptr);
   assert(!env->ExceptionCheck());
 
   jfieldID field = env->GetStaticFieldID(clazz, "testFindFieldOnAttachedNativeThreadField", "Z");
-  assert(field != NULL);
+  assert(field != nullptr);
   assert(!env->ExceptionCheck());
 
   env->SetStaticBooleanField(clazz, field, JNI_TRUE);
-
-  int detach_result = jvm->DetachCurrentThread();
-  assert(detach_result == 0);
-  return NULL;
 }
 
 extern "C" JNIEXPORT void JNICALL Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv*,
-                                                                                         jclass) {
-  pthread_t pthread;
-  int pthread_create_result = pthread_create(&pthread,
-                                             NULL,
-                                             testFindFieldOnAttachedNativeThread,
-                                             NULL);
-  assert(pthread_create_result == 0);
-  int pthread_join_result = pthread_join(pthread, NULL);
-  assert(pthread_join_result == 0);
+                                                                                      jclass) {
+  PthreadHelper(&testFindFieldOnAttachedNativeThread);
 }
 
-static void* testReflectFieldGetFromAttachedNativeThread(void*) {
-  assert(jvm != NULL);
-
-  JNIEnv* env = NULL;
-  JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
-  int attach_result = jvm->AttachCurrentThread(&env, &args);
-  assert(attach_result == 0);
-
+static void testReflectFieldGetFromAttachedNativeThread(JNIEnv* env) {
   jclass clazz = env->FindClass("Main");
-  assert(clazz != NULL);
+  assert(clazz != nullptr);
   assert(!env->ExceptionCheck());
 
   jclass class_clazz = env->FindClass("java/lang/Class");
-  assert(class_clazz != NULL);
+  assert(class_clazz != nullptr);
   assert(!env->ExceptionCheck());
 
   jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField",
                                                "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
-  assert(getFieldMetodId != NULL);
+  assert(getFieldMetodId != nullptr);
   assert(!env->ExceptionCheck());
 
   jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField");
-  assert(field_name != NULL);
+  assert(field_name != nullptr);
   assert(!env->ExceptionCheck());
 
   jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name);
-  assert(field != NULL);
+  assert(field != nullptr);
   assert(!env->ExceptionCheck());
 
   jclass field_clazz = env->FindClass("java/lang/reflect/Field");
-  assert(field_clazz != NULL);
+  assert(field_clazz != nullptr);
   assert(!env->ExceptionCheck());
 
   jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean",
                                                  "(Ljava/lang/Object;)Z");
-  assert(getBooleanMetodId != NULL);
+  assert(getBooleanMetodId != nullptr);
   assert(!env->ExceptionCheck());
 
   jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz);
   assert(value == false);
   assert(!env->ExceptionCheck());
-
-  int detach_result = jvm->DetachCurrentThread();
-  assert(detach_result == 0);
-  return NULL;
 }
 
 // http://b/15539150
 extern "C" JNIEXPORT void JNICALL Java_Main_testReflectFieldGetFromAttachedNativeThreadNative(
     JNIEnv*, jclass) {
-  pthread_t pthread;
-  int pthread_create_result = pthread_create(&pthread,
-                                             NULL,
-                                             testReflectFieldGetFromAttachedNativeThread,
-                                             NULL);
-  assert(pthread_create_result == 0);
-  int pthread_join_result = pthread_join(pthread, NULL);
-  assert(pthread_join_result == 0);
+  PthreadHelper(&testReflectFieldGetFromAttachedNativeThread);
 }
 
 
 // http://b/11243757
 extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
-                                                                                        jclass) {
+                                                                                     jclass) {
   jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass");
-  assert(super_class != NULL);
+  assert(super_class != nullptr);
 
   jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V");
-  assert(execute != NULL);
+  assert(execute != nullptr);
 
   jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass");
-  assert(sub_class != NULL);
+  assert(sub_class != nullptr);
 
   env->CallStaticVoidMethod(sub_class, execute);
 }
 
 extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) {
   jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract");
-  assert(abstract_class != NULL);
+  assert(abstract_class != nullptr);
   jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z");
-  assert(miranda_method != NULL);
+  assert(miranda_method != nullptr);
   return env->ToReflectedMethod(abstract_class, miranda_method, JNI_FALSE);
 }
 
@@ -191,7 +162,7 @@
 extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
   std::vector<uint8_t> buffer(1);
   jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0);
-  assert(byte_buffer != NULL);
+  assert(byte_buffer != nullptr);
   assert(!env->ExceptionCheck());
 
   assert(env->GetDirectBufferAddress(byte_buffer) == &buffer[0]);
@@ -202,8 +173,8 @@
 jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
 
 extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
-                                                    jbyte b3, jbyte b4, jbyte b5, jbyte b6,
-                                                    jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+                                              jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+                                              jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
   // We use b1 to drive the output.
   assert(b2 == 2);
   assert(b3 == -3);
@@ -227,8 +198,8 @@
 // The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
 
 extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
-                                                    jshort s3, jshort s4, jshort s5, jshort s6,
-                                                    jshort s7, jshort s8, jshort s9, jshort s10) {
+                                                jshort s3, jshort s4, jshort s5, jshort s6,
+                                                jshort s7, jshort s8, jshort s9, jshort s10) {
   // We use s1 to drive the output.
   assert(s2 == 2);
   assert(s3 == -3);
@@ -247,9 +218,9 @@
 }
 
 extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
-                                                       jboolean b2, jboolean b3, jboolean b4,
-                                                       jboolean b5, jboolean b6, jboolean b7,
-                                                       jboolean b8, jboolean b9, jboolean b10) {
+                                                    jboolean b2, jboolean b3, jboolean b4,
+                                                    jboolean b5, jboolean b6, jboolean b7,
+                                                    jboolean b8, jboolean b9, jboolean b10) {
   // We use b1 to drive the output.
   assert(b2 == JNI_TRUE);
   assert(b3 == JNI_FALSE);
@@ -269,8 +240,8 @@
 jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
 
 extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
-                                                    jchar c3, jchar c4, jchar c5, jchar c6,
-                                                    jchar c7, jchar c8, jchar c9, jchar c10) {
+                                              jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
+                                              jchar c8, jchar c9, jchar c10) {
   // We use c1 to drive the output.
   assert(c2 == 'a');
   assert(c3 == 'b');
@@ -291,3 +262,57 @@
                                                                        jclass from, jclass to) {
   return env->IsAssignableFrom(from, to);
 }
+
+static void testShallowGetCallingClassLoader(JNIEnv* env) {
+  // Test direct call.
+  {
+    jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
+    assert(vmstack_clazz != nullptr);
+    assert(!env->ExceptionCheck());
+
+    jmethodID getCallingClassLoaderMethodId = env->GetStaticMethodID(vmstack_clazz,
+                                                                     "getCallingClassLoader",
+                                                                     "()Ljava/lang/ClassLoader;");
+    assert(getCallingClassLoaderMethodId != nullptr);
+    assert(!env->ExceptionCheck());
+
+    jobject class_loader = env->CallStaticObjectMethod(vmstack_clazz,
+                                                       getCallingClassLoaderMethodId);
+    assert(class_loader == nullptr);
+    assert(!env->ExceptionCheck());
+  }
+
+  // Test one-level call. Use System.loadLibrary().
+  {
+    jclass system_clazz = env->FindClass("java/lang/System");
+    assert(system_clazz != nullptr);
+    assert(!env->ExceptionCheck());
+
+    jmethodID loadLibraryMethodId = env->GetStaticMethodID(system_clazz, "loadLibrary",
+                                                           "(Ljava/lang/String;)V");
+    assert(loadLibraryMethodId != nullptr);
+    assert(!env->ExceptionCheck());
+
+    // Create a string object.
+    jobject library_string = env->NewStringUTF("arttest");
+    assert(library_string != nullptr);
+    assert(!env->ExceptionCheck());
+
+    env->CallStaticVoidMethod(system_clazz, loadLibraryMethodId, library_string);
+    if (env->ExceptionCheck()) {
+      // At most we expect UnsatisfiedLinkError.
+      jthrowable thrown = env->ExceptionOccurred();
+      env->ExceptionClear();
+
+      jclass unsatisfied_link_error_clazz = env->FindClass("java/lang/UnsatisfiedLinkError");
+      jclass thrown_class = env->GetObjectClass(thrown);
+      assert(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class));
+    }
+  }
+}
+
+// http://b/16867274
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+                                                                                   jclass) {
+  PthreadHelper(&testShallowGetCallingClassLoader);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index ae133be..6d7d647 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -30,6 +30,7 @@
         testBooleanMethod();
         testCharMethod();
         testIsAssignableFromOnPrimitiveTypes();
+        testShallowGetCallingClassLoader();
     }
 
     private static native void testFindClassOnAttachedNativeThread();
@@ -167,4 +168,10 @@
     }
 
     native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
+
+    static void testShallowGetCallingClassLoader() {
+        nativeTestShallowGetCallingClassLoader();
+    }
+
+    native static void nativeTestShallowGetCallingClassLoader();
 }
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 82211a5..268f0be 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -32,9 +32,9 @@
 
 struct NativeBridgeArtCallbacks {
   const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
-  int (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
-  int (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
-       uint32_t method_count);
+  uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+  uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+                               uint32_t method_count);
 };
 
 struct NativeBridgeCallbacks {