Native allocation accounting

Added two new functions: registerNativeAllocation and registerNativeFree.
These functions should be used to let the GC know about native allocations
which are held live by Java objects and released in finalizers. GC are performed
or requested from within registerNativeAllocation when the total number of native
bytes accounted for exceeds a certain threshold. After a GC occurs in
registerNativeAllocation, finalizers are run so that the native memory is freed.
Added a test which shows how to use these functions.

Change-Id: I40f3c79e1c02d5008dec7d58d61c5bb97ec2fc1b
diff --git a/build/Android.oattest.mk b/build/Android.oattest.mk
index 6ee8492..726298f 100644
--- a/build/Android.oattest.mk
+++ b/build/Android.oattest.mk
@@ -40,7 +40,7 @@
 TEST_OAT_DIRECTORIES := \
 	Main \
 	HelloWorld \
-	\
+	NativeAllocations \
 	ParallelGC \
 	ReferenceMap \
 	StackWalk \
diff --git a/src/gc/heap.cc b/src/gc/heap.cc
index a68cc02..eae1520 100644
--- a/src/gc/heap.cc
+++ b/src/gc/heap.cc
@@ -25,6 +25,7 @@
 #include <vector>
 
 #include "base/stl_util.h"
+#include "common_throws.h"
 #include "cutils/sched_policy.h"
 #include "debugger.h"
 #include "gc/accounting/atomic_stack.h"
@@ -170,12 +171,15 @@
       capacity_(capacity),
       growth_limit_(growth_limit),
       max_allowed_footprint_(initial_size),
+      native_footprint_gc_watermark_(initial_size),
+      native_footprint_limit_(2 * initial_size),
       concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
                                             :  std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       large_object_threshold_(3 * kPageSize),
       num_bytes_allocated_(0),
+      native_bytes_allocated_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
       verify_pre_gc_heap_(false),
@@ -569,9 +573,6 @@
       Dbg::RecordAllocation(c, byte_count);
     }
     if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
-      // We already have a request pending, no reason to start more until we update
-      // concurrent_start_bytes_.
-      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
       // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
       SirtRef<mirror::Object> ref(self, obj);
       RequestConcurrentGC(self);
@@ -1690,6 +1691,19 @@
   max_allowed_footprint_ = max_allowed_footprint;
 }
 
+void Heap::UpdateMaxNativeFootprint() {
+  size_t native_size = native_bytes_allocated_;
+  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
+  size_t target_size = native_size / GetTargetHeapUtilization();
+  if (target_size > native_size + max_free_) {
+    target_size = native_size + max_free_;
+  } else if (target_size < native_size + min_free_) {
+    target_size = native_size + min_free_;
+  }
+  native_footprint_gc_watermark_ = target_size;
+  native_footprint_limit_ = 2 * target_size - native_size;
+}
+
 void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
   // We know what our utilization is at this moment.
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
@@ -1746,6 +1760,8 @@
     DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
     DCHECK_LE(max_allowed_footprint_, growth_limit_);
   }
+
+  UpdateMaxNativeFootprint();
 }
 
 void Heap::ClearGrowthLimit() {
@@ -1881,6 +1897,10 @@
     return;
   }
 
+  // We already have a request pending, no reason to start more until we update
+  // concurrent_start_bytes_.
+  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
@@ -1958,5 +1978,64 @@
   return alloc_space_->Trim();
 }
 
+bool Heap::IsGCRequestPending() const {
+  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
+}
+
+void Heap::RegisterNativeAllocation(int bytes) {
+  // Total number of native bytes allocated.
+  native_bytes_allocated_ += bytes;
+  Thread* self = Thread::Current();
+  if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+    // The second watermark is higher than the gc watermark. If you hit this it means you are
+    // allocating native objects faster than the GC can keep up with.
+    if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+        JNIEnv* env = self->GetJniEnv();
+        // Can't do this in WellKnownClasses::Init since System is not properly set up at that
+        // point.
+        if (WellKnownClasses::java_lang_System_runFinalization == NULL) {
+          DCHECK(WellKnownClasses::java_lang_System != NULL);
+          WellKnownClasses::java_lang_System_runFinalization =
+              CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
+          assert(WellKnownClasses::java_lang_System_runFinalization != NULL);
+        }
+        if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) {
+          // Just finished a GC, attempt to run finalizers.
+          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+                                    WellKnownClasses::java_lang_System_runFinalization);
+          CHECK(!env->ExceptionCheck());
+        }
+
+        // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+        if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+          CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+                                    WellKnownClasses::java_lang_System_runFinalization);
+          CHECK(!env->ExceptionCheck());
+        }
+        // We have just run finalizers, update the native watermark since it is very likely that
+        // finalizers released native managed allocations.
+        UpdateMaxNativeFootprint();
+    } else {
+      if (!IsGCRequestPending()) {
+        RequestConcurrentGC(self);
+      }
+    }
+  }
+}
+
+void Heap::RegisterNativeFree(int bytes) {
+  int expected_size, new_size;
+  do {
+      expected_size = native_bytes_allocated_.get();
+      new_size = expected_size - bytes;
+      if (new_size < 0) {
+        ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated",
+                              bytes, expected_size);
+        break;
+      }
+  } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/src/gc/heap.h b/src/gc/heap.h
index 790ab02..980f3bc 100644
--- a/src/gc/heap.h
+++ b/src/gc/heap.h
@@ -126,6 +126,10 @@
   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void RegisterNativeAllocation(int bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RegisterNativeFree(int bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
   void VerifyObjectImpl(const mirror::Object* o);
   void VerifyObject(const mirror::Object* o) {
@@ -403,6 +407,7 @@
 
   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+  bool IsGCRequestPending() const;
 
   void RecordAllocation(size_t size, mirror::Object* object)
       LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
@@ -421,6 +426,10 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void PostGcVerification(collector::GarbageCollector* gc);
 
+  // Update the watermark for the native allocated bytes based on the current number of native
+  // bytes allocated and the target utilization ratio.
+  void UpdateMaxNativeFootprint();
+
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
   // collection.
@@ -498,6 +507,10 @@
   // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
   // a GC should be triggered.
   size_t max_allowed_footprint_;
+  // The watermark at which a concurrent GC is requested by registerNativeAllocation.
+  size_t native_footprint_gc_watermark_;
+  // The watermark at which a GC is performed inside of registerNativeAllocation.
+  size_t native_footprint_limit_;
 
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
@@ -515,6 +528,9 @@
   // Number of bytes allocated.  Adjusted after each allocation and free.
   AtomicInteger num_bytes_allocated_;
 
+  // Bytes which are allocated and managed by native code but still need to be accounted for.
+  AtomicInteger native_bytes_allocated_;
+
   // Heap verification flags.
   const bool verify_missing_card_marks_;
   const bool verify_system_weaks_;
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index ce3cc93..baae8a3 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -165,6 +165,24 @@
   }
 }
 
+static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+  ScopedObjectAccess soa(env);
+  if (bytes < 0) {
+    ThrowRuntimeException("allocation size negative %d", bytes);
+    return;
+  }
+  Runtime::Current()->GetHeap()->RegisterNativeAllocation(bytes);
+}
+
+static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+  ScopedObjectAccess soa(env);
+  if (bytes < 0) {
+    ThrowRuntimeException("allocation size negative %d", bytes);
+    return;
+  }
+  Runtime::Current()->GetHeap()->RegisterNativeFree(bytes);
+}
+
 static void VMRuntime_trimHeap(JNIEnv*, jobject) {
   uint64_t start_ns = NanoTime();
 
@@ -210,10 +228,13 @@
   NATIVE_METHOD(VMRuntime, newNonMovableArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, setTargetSdkVersion, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
   NATIVE_METHOD(VMRuntime, startJitCompilation, "()V"),
   NATIVE_METHOD(VMRuntime, trimHeap, "()V"),
   NATIVE_METHOD(VMRuntime, vmVersion, "()Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, vmLibrary, "()Ljava/lang/String;"),
+
 };
 
 void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/src/well_known_classes.cc b/src/well_known_classes.cc
index 4d34c73..434fcf0 100644
--- a/src/well_known_classes.cc
+++ b/src/well_known_classes.cc
@@ -37,6 +37,7 @@
 jclass WellKnownClasses::java_lang_reflect_Proxy;
 jclass WellKnownClasses::java_lang_RuntimeException;
 jclass WellKnownClasses::java_lang_StackOverflowError;
+jclass WellKnownClasses::java_lang_System;
 jclass WellKnownClasses::java_lang_Thread;
 jclass WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler;
 jclass WellKnownClasses::java_lang_ThreadGroup;
@@ -63,6 +64,7 @@
 jmethodID WellKnownClasses::java_lang_reflect_InvocationHandler_invoke;
 jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
 jmethodID WellKnownClasses::java_lang_Short_valueOf;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
 jmethodID WellKnownClasses::java_lang_Thread_init;
 jmethodID WellKnownClasses::java_lang_Thread_run;
 jmethodID WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException;
@@ -105,7 +107,7 @@
   return fid;
 }
 
-static jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
+jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
   jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : env->GetMethodID(c, name, signature);
   if (mid == NULL) {
     LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\"";
@@ -132,6 +134,7 @@
   java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy");
   java_lang_RuntimeException = CacheClass(env, "java/lang/RuntimeException");
   java_lang_StackOverflowError = CacheClass(env, "java/lang/StackOverflowError");
+  java_lang_System = CacheClass(env, "java/lang/System");
   java_lang_Thread = CacheClass(env, "java/lang/Thread");
   java_lang_Thread$UncaughtExceptionHandler = CacheClass(env, "java/lang/Thread$UncaughtExceptionHandler");
   java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
diff --git a/src/well_known_classes.h b/src/well_known_classes.h
index 8170520..6e19f86 100644
--- a/src/well_known_classes.h
+++ b/src/well_known_classes.h
@@ -29,6 +29,8 @@
 // them up. Similar to libcore's JniConstants (except there's no overlap, so
 // we keep them separate).
 
+jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature);
+
 struct WellKnownClasses {
   static void InitClasses(JNIEnv* env);
   static void Init(JNIEnv* env);  // Run before native methods are registered.
@@ -49,6 +51,7 @@
   static jclass java_lang_reflect_Proxy;
   static jclass java_lang_RuntimeException;
   static jclass java_lang_StackOverflowError;
+  static jclass java_lang_System;
   static jclass java_lang_Thread;
   static jclass java_lang_ThreadGroup;
   static jclass java_lang_Thread$UncaughtExceptionHandler;
@@ -75,6 +78,7 @@
   static jmethodID java_lang_reflect_InvocationHandler_invoke;
   static jmethodID java_lang_Runtime_nativeLoad;
   static jmethodID java_lang_Short_valueOf;
+  static jmethodID java_lang_System_runFinalization;
   static jmethodID java_lang_Thread_init;
   static jmethodID java_lang_Thread_run;
   static jmethodID java_lang_Thread$UncaughtExceptionHandler_uncaughtException;
diff --git a/test/NativeAllocations/NativeAllocations.java b/test/NativeAllocations/NativeAllocations.java
new file mode 100644
index 0000000..9423b91
--- /dev/null
+++ b/test/NativeAllocations/NativeAllocations.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+class NativeAllocations {
+    static Object nativeLock = new Object();
+    static int nativeBytes = 0;
+    static Object runtime;
+    static Method register_native_allocation;
+    static Method register_native_free;
+    static int maxMem = 64 * 1024 * 1024;
+
+    static class NativeAllocation {
+        private int bytes;
+
+        NativeAllocation(int bytes) throws Exception {
+            this.bytes = bytes;
+            register_native_allocation.invoke(runtime, bytes);
+            synchronized (nativeLock) {
+                nativeBytes += bytes;
+                if (nativeBytes > maxMem) {
+                    throw new OutOfMemoryError();
+                }
+            }
+        }
+
+        protected void finalize() throws Exception {
+            synchronized (nativeLock) {
+                nativeBytes -= bytes;
+            }
+            register_native_free.invoke(runtime, bytes);
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+        Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+        runtime = get_runtime.invoke(null);
+        register_native_allocation = vm_runtime.getDeclaredMethod("registerNativeAllocation", Integer.TYPE);
+        register_native_free = vm_runtime.getDeclaredMethod("registerNativeFree", Integer.TYPE);
+        int count = 16;
+        int size = 512 * 0x400;
+        int allocation_count = 256;
+        NativeAllocation[] allocations = new NativeAllocation[count];
+        for (int i = 0; i < allocation_count; ++i) {
+            allocations[i % count] = new NativeAllocation(size);
+        }
+        System.out.println("Test complete");
+    }
+}
+