GoogleGit

blob: 82c2b9e9517c176caad32b26e23c36989b9e45f6 [file] [log] [blame]
  1. /*
  2. * Copyright (C) 2008 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <stdint.h>
  17. #include <sys/mman.h>
  18. #include <errno.h>
  19. #include <cutils/ashmem.h>
  20. #define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
  21. #include "Dalvik.h"
  22. #include "alloc/DlMalloc.h"
  23. #include "alloc/Heap.h"
  24. #include "alloc/HeapInternal.h"
  25. #include "alloc/HeapSource.h"
  26. #include "alloc/HeapBitmap.h"
  27. #include "alloc/HeapBitmapInlines.h"
  28. static void dvmHeapSourceUpdateMaxNativeFootprint();
  29. static void snapIdealFootprint();
  30. static void setIdealFootprint(size_t max);
  31. static size_t getMaximumSize(const HeapSource *hs);
  32. static void trimHeaps();
  33. #define HEAP_UTILIZATION_MAX 1024
  34. /* How long to wait after a GC before performing a heap trim
  35. * operation to reclaim unused pages.
  36. */
  37. #define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
  38. /* Start a concurrent collection when free memory falls under this
  39. * many bytes.
  40. */
  41. #define CONCURRENT_START (128 << 10)
  42. /* The next GC will not be concurrent when free memory after a GC is
  43. * under this many bytes.
  44. */
  45. #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
  46. #define HS_BOILERPLATE() \
  47. do { \
  48. assert(gDvm.gcHeap != NULL); \
  49. assert(gDvm.gcHeap->heapSource != NULL); \
  50. assert(gHs == gDvm.gcHeap->heapSource); \
  51. } while (0)
  52. struct Heap {
  53. /* The mspace to allocate from.
  54. */
  55. mspace msp;
  56. /* The largest size that this heap is allowed to grow to.
  57. */
  58. size_t maximumSize;
  59. /* Number of bytes allocated from this mspace for objects,
  60. * including any overhead. This value is NOT exact, and
  61. * should only be used as an input for certain heuristics.
  62. */
  63. size_t bytesAllocated;
  64. /* Number of bytes allocated from this mspace at which a
  65. * concurrent garbage collection will be started.
  66. */
  67. size_t concurrentStartBytes;
  68. /* Number of objects currently allocated from this mspace.
  69. */
  70. size_t objectsAllocated;
  71. /*
  72. * The lowest address of this heap, inclusive.
  73. */
  74. char *base;
  75. /*
  76. * The highest address of this heap, exclusive.
  77. */
  78. char *limit;
  79. /*
  80. * If the heap has an mspace, the current high water mark in
  81. * allocations requested via dvmHeapSourceMorecore.
  82. */
  83. char *brk;
  84. };
  85. struct HeapSource {
  86. /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
  87. */
  88. size_t targetUtilization;
  89. /* The starting heap size.
  90. */
  91. size_t startSize;
  92. /* The largest that the heap source as a whole is allowed to grow.
  93. */
  94. size_t maximumSize;
  95. /*
  96. * The largest size we permit the heap to grow. This value allows
  97. * the user to limit the heap growth below the maximum size. This
  98. * is a work around until we can dynamically set the maximum size.
  99. * This value can range between the starting size and the maximum
  100. * size but should never be set below the current footprint of the
  101. * heap.
  102. */
  103. size_t growthLimit;
  104. /* The desired max size of the heap source as a whole.
  105. */
  106. size_t idealSize;
  107. /* The maximum number of bytes allowed to be allocated from the
  108. * active heap before a GC is forced. This is used to "shrink" the
  109. * heap in lieu of actual compaction.
  110. */
  111. size_t softLimit;
  112. /* Minimum number of free bytes. Used with the target utilization when
  113. * setting the softLimit. Never allows less bytes than this to be free
  114. * when the heap size is below the maximum size or growth limit.
  115. */
  116. size_t minFree;
  117. /* Maximum number of free bytes. Used with the target utilization when
  118. * setting the softLimit. Never allows more bytes than this to be free
  119. * when the heap size is below the maximum size or growth limit.
  120. */
  121. size_t maxFree;
  122. /* The heaps; heaps[0] is always the active heap,
  123. * which new objects should be allocated from.
  124. */
  125. Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
  126. /* The current number of heaps.
  127. */
  128. size_t numHeaps;
  129. /* True if zygote mode was active when the HeapSource was created.
  130. */
  131. bool sawZygote;
  132. /*
  133. * The base address of the virtual memory reservation.
  134. */
  135. char *heapBase;
  136. /*
  137. * The length in bytes of the virtual memory reservation.
  138. */
  139. size_t heapLength;
  140. /*
  141. * The live object bitmap.
  142. */
  143. HeapBitmap liveBits;
  144. /*
  145. * The mark bitmap.
  146. */
  147. HeapBitmap markBits;
  148. /*
  149. * Native allocations.
  150. */
  151. int32_t nativeBytesAllocated;
  152. size_t nativeFootprintGCWatermark;
  153. size_t nativeFootprintLimit;
  154. bool nativeNeedToRunFinalization;
  155. /*
  156. * State for the GC daemon.
  157. */
  158. bool hasGcThread;
  159. pthread_t gcThread;
  160. bool gcThreadShutdown;
  161. pthread_mutex_t gcThreadMutex;
  162. pthread_cond_t gcThreadCond;
  163. bool gcThreadTrimNeeded;
  164. };
  165. #define hs2heap(hs_) (&((hs_)->heaps[0]))
  166. /*
  167. * Returns true iff a soft limit is in effect for the active heap.
  168. */
  169. static bool isSoftLimited(const HeapSource *hs)
  170. {
  171. /* softLimit will be either SIZE_MAX or the limit for the
  172. * active mspace. idealSize can be greater than softLimit
  173. * if there is more than one heap. If there is only one
  174. * heap, a non-SIZE_MAX softLimit should always be the same
  175. * as idealSize.
  176. */
  177. return hs->softLimit <= hs->idealSize;
  178. }
  179. /*
  180. * Returns approximately the maximum number of bytes allowed to be
  181. * allocated from the active heap before a GC is forced.
  182. */
  183. static size_t getAllocLimit(const HeapSource *hs)
  184. {
  185. if (isSoftLimited(hs)) {
  186. return hs->softLimit;
  187. } else {
  188. return mspace_footprint_limit(hs2heap(hs)->msp);
  189. }
  190. }
  191. /*
  192. * Returns the current footprint of all heaps. If includeActive
  193. * is false, don't count the heap at index 0.
  194. */
  195. static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
  196. {
  197. size_t footprint = 0;
  198. size_t i;
  199. if (includeActive) {
  200. i = 0;
  201. } else {
  202. i = 1;
  203. }
  204. for (/* i = i */; i < hs->numHeaps; i++) {
  205. //TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max
  206. footprint += mspace_footprint(hs->heaps[i].msp);
  207. }
  208. return footprint;
  209. }
  210. /*
  211. * Returns the heap that <ptr> could have come from, or NULL
  212. * if it could not have come from any heap.
  213. */
  214. static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
  215. {
  216. const size_t numHeaps = hs->numHeaps;
  217. if (ptr != NULL) {
  218. for (size_t i = 0; i < numHeaps; i++) {
  219. const Heap *const heap = &hs->heaps[i];
  220. if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
  221. return (Heap *)heap;
  222. }
  223. }
  224. }
  225. return NULL;
  226. }
  227. /*
  228. * Functions to update heapSource->bytesAllocated when an object
  229. * is allocated or freed. mspace_usable_size() will give
  230. * us a much more accurate picture of heap utilization than
  231. * the requested byte sizes would.
  232. *
  233. * These aren't exact, and should not be treated as such.
  234. */
  235. static void countAllocation(Heap *heap, const void *ptr)
  236. {
  237. assert(heap->bytesAllocated < mspace_footprint(heap->msp));
  238. heap->bytesAllocated += mspace_usable_size(ptr) +
  239. HEAP_SOURCE_CHUNK_OVERHEAD;
  240. heap->objectsAllocated++;
  241. HeapSource* hs = gDvm.gcHeap->heapSource;
  242. dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
  243. assert(heap->bytesAllocated < mspace_footprint(heap->msp));
  244. }
  245. static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
  246. {
  247. size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
  248. assert(delta > 0);
  249. if (delta < heap->bytesAllocated) {
  250. heap->bytesAllocated -= delta;
  251. } else {
  252. heap->bytesAllocated = 0;
  253. }
  254. HeapSource* hs = gDvm.gcHeap->heapSource;
  255. dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
  256. if (heap->objectsAllocated > 0) {
  257. heap->objectsAllocated--;
  258. }
  259. *numBytes += delta;
  260. }
  261. static HeapSource *gHs = NULL;
  262. static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
  263. {
  264. // Clear errno to allow strerror on error.
  265. errno = 0;
  266. // Allow access to inital pages that will hold mspace.
  267. mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
  268. // Create mspace using our backing storage starting at begin and with a footprint of
  269. // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
  270. // exhausted morecore will be called.
  271. mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
  272. if (msp != NULL) {
  273. // Do not allow morecore requests to succeed beyond the starting size of the heap.
  274. mspace_set_footprint_limit(msp, startingSize);
  275. } else {
  276. ALOGE("create_mspace_with_base failed %s", strerror(errno));
  277. }
  278. return msp;
  279. }
  280. /*
  281. * Service request from DlMalloc to increase heap size.
  282. */
  283. void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
  284. {
  285. Heap* heap = NULL;
  286. for (size_t i = 0; i < gHs->numHeaps; i++) {
  287. if (gHs->heaps[i].msp == mspace) {
  288. heap = &gHs->heaps[i];
  289. break;
  290. }
  291. }
  292. if (heap == NULL) {
  293. ALOGE("Failed to find heap for mspace %p", mspace);
  294. dvmAbort();
  295. }
  296. char* original_brk = heap->brk;
  297. if (increment != 0) {
  298. char* new_brk = original_brk + increment;
  299. if (increment > 0) {
  300. // Should never be asked to increase the allocation beyond the capacity of the space.
  301. // Enforced by mspace_set_footprint_limit.
  302. assert(new_brk <= heap->limit);
  303. mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
  304. } else {
  305. // Should never be asked for negative footprint (ie before base).
  306. assert(original_brk + increment > heap->base);
  307. // Advise we don't need the pages and protect them.
  308. size_t size = -increment;
  309. madvise(new_brk, size, MADV_DONTNEED);
  310. mprotect(new_brk, size, PROT_NONE);
  311. }
  312. // Update brk.
  313. heap->brk = new_brk;
  314. }
  315. return original_brk;
  316. }
  317. const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
  318. /*
  319. * Add the initial heap. Returns false if the initial heap was
  320. * already added to the heap source.
  321. */
  322. static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
  323. {
  324. assert(hs != NULL);
  325. assert(msp != NULL);
  326. if (hs->numHeaps != 0) {
  327. return false;
  328. }
  329. hs->heaps[0].msp = msp;
  330. hs->heaps[0].maximumSize = maximumSize;
  331. hs->heaps[0].concurrentStartBytes = SIZE_MAX;
  332. hs->heaps[0].base = hs->heapBase;
  333. hs->heaps[0].limit = hs->heapBase + maximumSize;
  334. hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
  335. hs->numHeaps = 1;
  336. return true;
  337. }
  338. /*
  339. * A helper for addNewHeap(). Remap the new heap so that it will have
  340. * a separate ashmem region with possibly a different name, etc. In
  341. * practice, this is used to give the app heap a separate ashmem
  342. * region from the zygote heap's.
  343. */
  344. static bool remapNewHeap(HeapSource* hs, Heap* newHeap)
  345. {
  346. char* newHeapBase = newHeap->base;
  347. size_t rem_size = hs->heapBase + hs->heapLength - newHeapBase;
  348. munmap(newHeapBase, rem_size);
  349. int fd = ashmem_create_region("dalvik-heap", rem_size);
  350. if (fd == -1) {
  351. ALOGE("Unable to create an ashmem region for the new heap");
  352. return false;
  353. }
  354. void* addr = mmap(newHeapBase, rem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
  355. int ret = close(fd);
  356. if (addr == MAP_FAILED) {
  357. ALOGE("Unable to map an ashmem region for the new heap");
  358. return false;
  359. }
  360. if (ret == -1) {
  361. ALOGE("Unable to close fd for the ashmem region for the new heap");
  362. munmap(newHeapBase, rem_size);
  363. return false;
  364. }
  365. return true;
  366. }
  367. /*
  368. * Adds an additional heap to the heap source. Returns false if there
  369. * are too many heaps or insufficient free space to add another heap.
  370. */
  371. static bool addNewHeap(HeapSource *hs)
  372. {
  373. Heap heap;
  374. assert(hs != NULL);
  375. if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
  376. ALOGE("Attempt to create too many heaps (%zd >= %zd)",
  377. hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
  378. dvmAbort();
  379. return false;
  380. }
  381. memset(&heap, 0, sizeof(heap));
  382. /*
  383. * Heap storage comes from a common virtual memory reservation.
  384. * The new heap will start on the page after the old heap.
  385. */
  386. char *base = hs->heaps[0].brk;
  387. size_t overhead = base - hs->heaps[0].base;
  388. assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
  389. if (overhead + hs->minFree >= hs->maximumSize) {
  390. LOGE_HEAP("No room to create any more heaps "
  391. "(%zd overhead, %zd max)",
  392. overhead, hs->maximumSize);
  393. return false;
  394. }
  395. size_t morecoreStart = SYSTEM_PAGE_SIZE;
  396. heap.maximumSize = hs->growthLimit - overhead;
  397. heap.concurrentStartBytes = hs->minFree - CONCURRENT_START;
  398. heap.base = base;
  399. heap.limit = heap.base + heap.maximumSize;
  400. heap.brk = heap.base + morecoreStart;
  401. if (!remapNewHeap(hs, &heap)) {
  402. return false;
  403. }
  404. heap.msp = createMspace(base, morecoreStart, hs->minFree);
  405. if (heap.msp == NULL) {
  406. return false;
  407. }
  408. /* Don't let the soon-to-be-old heap grow any further.
  409. */
  410. hs->heaps[0].maximumSize = overhead;
  411. hs->heaps[0].limit = base;
  412. mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
  413. /* Put the new heap in the list, at heaps[0].
  414. * Shift existing heaps down.
  415. */
  416. memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
  417. hs->heaps[0] = heap;
  418. hs->numHeaps++;
  419. return true;
  420. }
  421. /*
  422. * The garbage collection daemon. Initiates a concurrent collection
  423. * when signaled. Also periodically trims the heaps when a few seconds
  424. * have elapsed since the last concurrent GC.
  425. */
  426. static void *gcDaemonThread(void* arg)
  427. {
  428. dvmChangeStatus(NULL, THREAD_VMWAIT);
  429. dvmLockMutex(&gHs->gcThreadMutex);
  430. while (gHs->gcThreadShutdown != true) {
  431. bool trim = false;
  432. if (gHs->gcThreadTrimNeeded) {
  433. int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
  434. HEAP_TRIM_IDLE_TIME_MS, 0);
  435. if (result == ETIMEDOUT) {
  436. /* Timed out waiting for a GC request, schedule a heap trim. */
  437. trim = true;
  438. }
  439. } else {
  440. dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
  441. }
  442. // Many JDWP requests cause allocation. We can't take the heap lock and wait to
  443. // transition to runnable so we can start a GC if a debugger is connected, because
  444. // we don't know that the JDWP thread isn't about to allocate and require the
  445. // heap lock itself, leading to deadlock. http://b/8191824.
  446. if (gDvm.debuggerConnected) {
  447. continue;
  448. }
  449. dvmLockHeap();
  450. /*
  451. * Another thread may have started a concurrent garbage
  452. * collection before we were scheduled. Check for this
  453. * condition before proceeding.
  454. */
  455. if (!gDvm.gcHeap->gcRunning) {
  456. dvmChangeStatus(NULL, THREAD_RUNNING);
  457. if (trim) {
  458. trimHeaps();
  459. gHs->gcThreadTrimNeeded = false;
  460. } else {
  461. dvmCollectGarbageInternal(GC_CONCURRENT);
  462. gHs->gcThreadTrimNeeded = true;
  463. }
  464. dvmChangeStatus(NULL, THREAD_VMWAIT);
  465. }
  466. dvmUnlockHeap();
  467. }
  468. dvmChangeStatus(NULL, THREAD_RUNNING);
  469. return NULL;
  470. }
  471. static bool gcDaemonStartup()
  472. {
  473. dvmInitMutex(&gHs->gcThreadMutex);
  474. pthread_cond_init(&gHs->gcThreadCond, NULL);
  475. gHs->gcThreadShutdown = false;
  476. gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
  477. gcDaemonThread, NULL);
  478. return gHs->hasGcThread;
  479. }
  480. static void gcDaemonShutdown()
  481. {
  482. if (gHs->hasGcThread) {
  483. dvmLockMutex(&gHs->gcThreadMutex);
  484. gHs->gcThreadShutdown = true;
  485. dvmSignalCond(&gHs->gcThreadCond);
  486. dvmUnlockMutex(&gHs->gcThreadMutex);
  487. pthread_join(gHs->gcThread, NULL);
  488. }
  489. }
  490. /*
  491. * Create a stack big enough for the worst possible case, where the
  492. * heap is perfectly full of the smallest object.
  493. * TODO: be better about memory usage; use a smaller stack with
  494. * overflow detection and recovery.
  495. */
  496. static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
  497. {
  498. const char *name = "dalvik-mark-stack";
  499. void *addr;
  500. assert(stack != NULL);
  501. stack->length = maximumSize * sizeof(Object*) /
  502. (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
  503. addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
  504. if (addr == NULL) {
  505. return false;
  506. }
  507. stack->base = (const Object **)addr;
  508. stack->limit = (const Object **)((char *)addr + stack->length);
  509. stack->top = NULL;
  510. madvise(stack->base, stack->length, MADV_DONTNEED);
  511. return true;
  512. }
  513. static void freeMarkStack(GcMarkStack *stack)
  514. {
  515. assert(stack != NULL);
  516. munmap(stack->base, stack->length);
  517. memset(stack, 0, sizeof(*stack));
  518. }
  519. /*
  520. * Initializes the heap source; must be called before any other
  521. * dvmHeapSource*() functions. Returns a GcHeap structure
  522. * allocated from the heap source.
  523. */
  524. GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
  525. size_t growthLimit)
  526. {
  527. GcHeap *gcHeap;
  528. HeapSource *hs;
  529. mspace msp;
  530. size_t length;
  531. void *base;
  532. assert(gHs == NULL);
  533. if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
  534. ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
  535. startSize, maximumSize, growthLimit);
  536. return NULL;
  537. }
  538. /*
  539. * Allocate a contiguous region of virtual memory to subdivided
  540. * among the heaps managed by the garbage collector.
  541. */
  542. length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
  543. base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap");
  544. if (base == NULL) {
  545. return NULL;
  546. }
  547. /* Create an unlocked dlmalloc mspace to use as
  548. * a heap source.
  549. */
  550. msp = createMspace(base, kInitialMorecoreStart, startSize);
  551. if (msp == NULL) {
  552. goto fail;
  553. }
  554. gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
  555. if (gcHeap == NULL) {
  556. LOGE_HEAP("Can't allocate heap descriptor");
  557. goto fail;
  558. }
  559. hs = (HeapSource *)calloc(1, sizeof(*hs));
  560. if (hs == NULL) {
  561. LOGE_HEAP("Can't allocate heap source");
  562. free(gcHeap);
  563. goto fail;
  564. }
  565. hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
  566. hs->minFree = gDvm.heapMinFree;
  567. hs->maxFree = gDvm.heapMaxFree;
  568. hs->startSize = startSize;
  569. hs->maximumSize = maximumSize;
  570. hs->growthLimit = growthLimit;
  571. hs->idealSize = startSize;
  572. hs->softLimit = SIZE_MAX; // no soft limit at first
  573. hs->numHeaps = 0;
  574. hs->sawZygote = gDvm.zygote;
  575. hs->nativeBytesAllocated = 0;
  576. hs->nativeFootprintGCWatermark = startSize;
  577. hs->nativeFootprintLimit = startSize * 2;
  578. hs->nativeNeedToRunFinalization = false;
  579. hs->hasGcThread = false;
  580. hs->heapBase = (char *)base;
  581. hs->heapLength = length;
  582. if (hs->maxFree > hs->maximumSize) {
  583. hs->maxFree = hs->maximumSize;
  584. }
  585. if (hs->minFree < CONCURRENT_START) {
  586. hs->minFree = CONCURRENT_START;
  587. } else if (hs->minFree > hs->maxFree) {
  588. hs->minFree = hs->maxFree;
  589. }
  590. if (!addInitialHeap(hs, msp, growthLimit)) {
  591. LOGE_HEAP("Can't add initial heap");
  592. goto fail;
  593. }
  594. if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
  595. LOGE_HEAP("Can't create liveBits");
  596. goto fail;
  597. }
  598. if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
  599. LOGE_HEAP("Can't create markBits");
  600. dvmHeapBitmapDelete(&hs->liveBits);
  601. goto fail;
  602. }
  603. if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
  604. ALOGE("Can't create markStack");
  605. dvmHeapBitmapDelete(&hs->markBits);
  606. dvmHeapBitmapDelete(&hs->liveBits);
  607. goto fail;
  608. }
  609. gcHeap->markContext.bitmap = &hs->markBits;
  610. gcHeap->heapSource = hs;
  611. gHs = hs;
  612. return gcHeap;
  613. fail:
  614. munmap(base, length);
  615. return NULL;
  616. }
  617. bool dvmHeapSourceStartupAfterZygote()
  618. {
  619. return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
  620. }
  621. /*
  622. * This is called while in zygote mode, right before we fork() for the
  623. * first time. We create a heap for all future zygote process allocations,
  624. * in an attempt to avoid touching pages in the zygote heap. (This would
  625. * probably be unnecessary if we had a compacting GC -- the source of our
  626. * troubles is small allocations filling in the gaps from larger ones.)
  627. */
  628. bool dvmHeapSourceStartupBeforeFork()
  629. {
  630. HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
  631. HS_BOILERPLATE();
  632. assert(gDvm.zygote);
  633. if (!gDvm.newZygoteHeapAllocated) {
  634. /* Ensure heaps are trimmed to minimize footprint pre-fork.
  635. */
  636. trimHeaps();
  637. /* Create a new heap for post-fork zygote allocations. We only
  638. * try once, even if it fails.
  639. */
  640. ALOGV("Splitting out new zygote heap");
  641. gDvm.newZygoteHeapAllocated = true;
  642. return addNewHeap(hs);
  643. }
  644. return true;
  645. }
  646. void dvmHeapSourceThreadShutdown()
  647. {
  648. if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
  649. gcDaemonShutdown();
  650. }
  651. }
  652. /*
  653. * Tears down the entire GcHeap structure and all of the substructures
  654. * attached to it. This call has the side effect of setting the given
  655. * gcHeap pointer and gHs to NULL.
  656. */
  657. void dvmHeapSourceShutdown(GcHeap **gcHeap)
  658. {
  659. assert(gcHeap != NULL);
  660. if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
  661. HeapSource *hs = (*gcHeap)->heapSource;
  662. dvmHeapBitmapDelete(&hs->liveBits);
  663. dvmHeapBitmapDelete(&hs->markBits);
  664. freeMarkStack(&(*gcHeap)->markContext.stack);
  665. munmap(hs->heapBase, hs->heapLength);
  666. free(hs);
  667. gHs = NULL;
  668. free(*gcHeap);
  669. *gcHeap = NULL;
  670. }
  671. }
  672. /*
  673. * Gets the begining of the allocation for the HeapSource.
  674. */
  675. void *dvmHeapSourceGetBase()
  676. {
  677. return gHs->heapBase;
  678. }
  679. /*
  680. * Returns a high water mark, between base and limit all objects must have been
  681. * allocated.
  682. */
  683. void *dvmHeapSourceGetLimit()
  684. {
  685. HeapSource *hs = gHs;
  686. void *max_brk = hs->heaps[0].brk;
  687. #ifndef NDEBUG
  688. for (size_t i = 1; i < hs->numHeaps; i++) {
  689. Heap *const heap = &hs->heaps[i];
  690. void *heap_brk = heap->brk;
  691. assert (max_brk > heap_brk);
  692. }
  693. #endif
  694. return max_brk;
  695. }
  696. /*
  697. * Returns the requested value. If the per-heap stats are requested, fill
  698. * them as well.
  699. *
  700. * Caller must hold the heap lock.
  701. */
  702. size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
  703. size_t arrayLen)
  704. {
  705. HeapSource *hs = gHs;
  706. size_t value = 0;
  707. size_t total = 0;
  708. HS_BOILERPLATE();
  709. assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
  710. for (size_t i = 0; i < hs->numHeaps; i++) {
  711. Heap *const heap = &hs->heaps[i];
  712. switch (spec) {
  713. case HS_FOOTPRINT:
  714. value = heap->brk - heap->base;
  715. assert(value == mspace_footprint(heap->msp));
  716. break;
  717. case HS_ALLOWED_FOOTPRINT:
  718. value = mspace_footprint_limit(heap->msp);
  719. break;
  720. case HS_BYTES_ALLOCATED:
  721. value = heap->bytesAllocated;
  722. break;
  723. case HS_OBJECTS_ALLOCATED:
  724. value = heap->objectsAllocated;
  725. break;
  726. default:
  727. // quiet gcc
  728. break;
  729. }
  730. if (perHeapStats) {
  731. perHeapStats[i] = value;
  732. }
  733. total += value;
  734. }
  735. return total;
  736. }
  737. void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
  738. {
  739. HeapSource *hs = gHs;
  740. HS_BOILERPLATE();
  741. assert(numHeaps <= hs->numHeaps);
  742. for (size_t i = 0; i < numHeaps; ++i) {
  743. base[i] = (uintptr_t)hs->heaps[i].base;
  744. max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
  745. }
  746. }
  747. /*
  748. * Get the bitmap representing all live objects.
  749. */
  750. HeapBitmap *dvmHeapSourceGetLiveBits()
  751. {
  752. HS_BOILERPLATE();
  753. return &gHs->liveBits;
  754. }
  755. /*
  756. * Get the bitmap representing all marked objects.
  757. */
  758. HeapBitmap *dvmHeapSourceGetMarkBits()
  759. {
  760. HS_BOILERPLATE();
  761. return &gHs->markBits;
  762. }
  763. void dvmHeapSourceSwapBitmaps()
  764. {
  765. HeapBitmap tmp = gHs->liveBits;
  766. gHs->liveBits = gHs->markBits;
  767. gHs->markBits = tmp;
  768. }
  769. void dvmHeapSourceZeroMarkBitmap()
  770. {
  771. HS_BOILERPLATE();
  772. dvmHeapBitmapZero(&gHs->markBits);
  773. }
  774. void dvmMarkImmuneObjects(const char *immuneLimit)
  775. {
  776. /*
  777. * Copy the contents of the live bit vector for immune object
  778. * range into the mark bit vector.
  779. */
  780. /* The only values generated by dvmHeapSourceGetImmuneLimit() */
  781. assert(immuneLimit == gHs->heaps[0].base ||
  782. immuneLimit == NULL);
  783. assert(gHs->liveBits.base == gHs->markBits.base);
  784. assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
  785. /* heap[0] is never immune */
  786. assert(gHs->heaps[0].base >= immuneLimit);
  787. assert(gHs->heaps[0].limit > immuneLimit);
  788. for (size_t i = 1; i < gHs->numHeaps; ++i) {
  789. if (gHs->heaps[i].base < immuneLimit) {
  790. assert(gHs->heaps[i].limit <= immuneLimit);
  791. /* Compute the number of words to copy in the bitmap. */
  792. size_t index = HB_OFFSET_TO_INDEX(
  793. (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
  794. /* Compute the starting offset in the live and mark bits. */
  795. char *src = (char *)(gHs->liveBits.bits + index);
  796. char *dst = (char *)(gHs->markBits.bits + index);
  797. /* Compute the number of bytes of the live bitmap to copy. */
  798. size_t length = HB_OFFSET_TO_BYTE_INDEX(
  799. gHs->heaps[i].limit - gHs->heaps[i].base);
  800. /* Do the copy. */
  801. memcpy(dst, src, length);
  802. /* Make sure max points to the address of the highest set bit. */
  803. if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
  804. gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
  805. }
  806. }
  807. }
  808. }
  809. /*
  810. * Allocates <n> bytes of zeroed data.
  811. */
  812. void* dvmHeapSourceAlloc(size_t n)
  813. {
  814. HS_BOILERPLATE();
  815. HeapSource *hs = gHs;
  816. Heap* heap = hs2heap(hs);
  817. if (heap->bytesAllocated + n > hs->softLimit) {
  818. /*
  819. * This allocation would push us over the soft limit; act as
  820. * if the heap is full.
  821. */
  822. LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
  823. FRACTIONAL_MB(hs->softLimit), n);
  824. return NULL;
  825. }
  826. void* ptr;
  827. if (gDvm.lowMemoryMode) {
  828. /* This is only necessary because mspace_calloc always memsets the
  829. * allocated memory to 0. This is bad for memory usage since it leads
  830. * to dirty zero pages. If low memory mode is enabled, we use
  831. * mspace_malloc which doesn't memset the allocated memory and madvise
  832. * the page aligned region back to the kernel.
  833. */
  834. ptr = mspace_malloc(heap->msp, n);
  835. if (ptr == NULL) {
  836. return NULL;
  837. }
  838. uintptr_t zero_begin = (uintptr_t)ptr;
  839. uintptr_t zero_end = (uintptr_t)ptr + n;
  840. /* Calculate the page aligned region.
  841. */
  842. uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
  843. uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
  844. /* If our allocation spans more than one page, we attempt to madvise.
  845. */
  846. if (begin < end) {
  847. /* madvise the page aligned region to kernel.
  848. */
  849. madvise((void*)begin, end - begin, MADV_DONTNEED);
  850. /* Zero the region after the page aligned region.
  851. */
  852. memset((void*)end, 0, zero_end - end);
  853. /* Zero out the region before the page aligned region.
  854. */
  855. zero_end = begin;
  856. }
  857. memset((void*)zero_begin, 0, zero_end - zero_begin);
  858. } else {
  859. ptr = mspace_calloc(heap->msp, 1, n);
  860. if (ptr == NULL) {
  861. return NULL;
  862. }
  863. }
  864. countAllocation(heap, ptr);
  865. /*
  866. * Check to see if a concurrent GC should be initiated.
  867. */
  868. if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
  869. /*
  870. * The garbage collector thread is already running or has yet
  871. * to be started. Do nothing.
  872. */
  873. return ptr;
  874. }
  875. if (heap->bytesAllocated > heap->concurrentStartBytes) {
  876. /*
  877. * We have exceeded the allocation threshold. Wake up the
  878. * garbage collector.
  879. */
  880. dvmSignalCond(&gHs->gcThreadCond);
  881. }
  882. return ptr;
  883. }
  884. /* Remove any hard limits, try to allocate, and shrink back down.
  885. * Last resort when trying to allocate an object.
  886. */
  887. static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
  888. {
  889. /* Grow as much as possible, but don't let the real footprint
  890. * go over the absolute max.
  891. */
  892. size_t max = heap->maximumSize;
  893. mspace_set_footprint_limit(heap->msp, max);
  894. void* ptr = dvmHeapSourceAlloc(n);
  895. /* Shrink back down as small as possible. Our caller may
  896. * readjust max_allowed to a more appropriate value.
  897. */
  898. mspace_set_footprint_limit(heap->msp,
  899. mspace_footprint(heap->msp));
  900. return ptr;
  901. }
  902. /*
  903. * Allocates <n> bytes of zeroed data, growing as much as possible
  904. * if necessary.
  905. */
  906. void* dvmHeapSourceAllocAndGrow(size_t n)
  907. {
  908. HS_BOILERPLATE();
  909. HeapSource *hs = gHs;
  910. Heap* heap = hs2heap(hs);
  911. void* ptr = dvmHeapSourceAlloc(n);
  912. if (ptr != NULL) {
  913. return ptr;
  914. }
  915. size_t oldIdealSize = hs->idealSize;
  916. if (isSoftLimited(hs)) {
  917. /* We're soft-limited. Try removing the soft limit to
  918. * see if we can allocate without actually growing.
  919. */
  920. hs->softLimit = SIZE_MAX;
  921. ptr = dvmHeapSourceAlloc(n);
  922. if (ptr != NULL) {
  923. /* Removing the soft limit worked; fix things up to
  924. * reflect the new effective ideal size.
  925. */
  926. snapIdealFootprint();
  927. return ptr;
  928. }
  929. // softLimit intentionally left at SIZE_MAX.
  930. }
  931. /* We're not soft-limited. Grow the heap to satisfy the request.
  932. * If this call fails, no footprints will have changed.
  933. */
  934. ptr = heapAllocAndGrow(hs, heap, n);
  935. if (ptr != NULL) {
  936. /* The allocation succeeded. Fix up the ideal size to
  937. * reflect any footprint modifications that had to happen.
  938. */
  939. snapIdealFootprint();
  940. } else {
  941. /* We just couldn't do it. Restore the original ideal size,
  942. * fixing up softLimit if necessary.
  943. */
  944. setIdealFootprint(oldIdealSize);
  945. }
  946. return ptr;
  947. }
  948. /*
  949. * Frees the first numPtrs objects in the ptrs list and returns the
  950. * amount of reclaimed storage. The list must contain addresses all in
  951. * the same mspace, and must be in increasing order. This implies that
  952. * there are no duplicates, and no entries are NULL.
  953. */
  954. size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
  955. {
  956. HS_BOILERPLATE();
  957. if (numPtrs == 0) {
  958. return 0;
  959. }
  960. assert(ptrs != NULL);
  961. assert(*ptrs != NULL);
  962. Heap* heap = ptr2heap(gHs, *ptrs);
  963. size_t numBytes = 0;
  964. if (heap != NULL) {
  965. mspace msp = heap->msp;
  966. // Calling mspace_free on shared heaps disrupts sharing too
  967. // much. For heap[0] -- the 'active heap' -- we call
  968. // mspace_free, but on the other heaps we only do some
  969. // accounting.
  970. if (heap == gHs->heaps) {
  971. // Count freed objects.
  972. for (size_t i = 0; i < numPtrs; i++) {
  973. assert(ptrs[i] != NULL);
  974. assert(ptr2heap(gHs, ptrs[i]) == heap);
  975. countFree(heap, ptrs[i], &numBytes);
  976. }
  977. // Bulk free ptrs.
  978. mspace_bulk_free(msp, ptrs, numPtrs);
  979. } else {
  980. // This is not an 'active heap'. Only do the accounting.
  981. for (size_t i = 0; i < numPtrs; i++) {
  982. assert(ptrs[i] != NULL);
  983. assert(ptr2heap(gHs, ptrs[i]) == heap);
  984. countFree(heap, ptrs[i], &numBytes);
  985. }
  986. }
  987. }
  988. return numBytes;
  989. }
  990. /*
  991. * Returns true iff <ptr> is in the heap source.
  992. */
  993. bool dvmHeapSourceContainsAddress(const void *ptr)
  994. {
  995. HS_BOILERPLATE();
  996. return (dvmHeapSourceGetBase() <= ptr) && (ptr <= dvmHeapSourceGetLimit());
  997. }
  998. /*
  999. * Returns true iff <ptr> was allocated from the heap source.
  1000. */
  1001. bool dvmHeapSourceContains(const void *ptr)
  1002. {
  1003. HS_BOILERPLATE();
  1004. if (dvmHeapSourceContainsAddress(ptr)) {
  1005. return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
  1006. }
  1007. return false;
  1008. }
  1009. bool dvmIsZygoteObject(const Object* obj)
  1010. {
  1011. HeapSource *hs = gHs;
  1012. HS_BOILERPLATE();
  1013. if (dvmHeapSourceContains(obj) && hs->sawZygote) {
  1014. Heap *heap = ptr2heap(hs, obj);
  1015. if (heap != NULL) {
  1016. /* If the object is not in the active heap, we assume that
  1017. * it was allocated as part of zygote.
  1018. */
  1019. return heap != hs->heaps;
  1020. }
  1021. }
  1022. /* The pointer is outside of any known heap, or we are not
  1023. * running in zygote mode.
  1024. */
  1025. return false;
  1026. }
  1027. /*
  1028. * Returns the number of usable bytes in an allocated chunk; the size
  1029. * may be larger than the size passed to dvmHeapSourceAlloc().
  1030. */
  1031. size_t dvmHeapSourceChunkSize(const void *ptr)
  1032. {
  1033. HS_BOILERPLATE();
  1034. Heap* heap = ptr2heap(gHs, ptr);
  1035. if (heap != NULL) {
  1036. return mspace_usable_size(ptr);
  1037. }
  1038. return 0;
  1039. }
  1040. /*
  1041. * Returns the number of bytes that the heap source has allocated
  1042. * from the system using sbrk/mmap, etc.
  1043. *
  1044. * Caller must hold the heap lock.
  1045. */
  1046. size_t dvmHeapSourceFootprint()
  1047. {
  1048. HS_BOILERPLATE();
  1049. //TODO: include size of bitmaps?
  1050. return oldHeapOverhead(gHs, true);
  1051. }
  1052. static size_t getMaximumSize(const HeapSource *hs)
  1053. {
  1054. return hs->growthLimit;
  1055. }
  1056. /*
  1057. * Returns the current maximum size of the heap source respecting any
  1058. * growth limits.
  1059. */
  1060. size_t dvmHeapSourceGetMaximumSize()
  1061. {
  1062. HS_BOILERPLATE();
  1063. return getMaximumSize(gHs);
  1064. }
  1065. /*
  1066. * Removes any growth limits. Allows the user to allocate up to the
  1067. * maximum heap size.
  1068. */
  1069. void dvmClearGrowthLimit()
  1070. {
  1071. HS_BOILERPLATE();
  1072. dvmLockHeap();
  1073. dvmWaitForConcurrentGcToComplete();
  1074. gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
  1075. gHs->growthLimit = gHs->maximumSize;
  1076. size_t overhead = oldHeapOverhead(gHs, false);
  1077. gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
  1078. gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
  1079. dvmUnlockHeap();
  1080. }
  1081. /*
  1082. * Return the real bytes used by old heaps plus the soft usage of the
  1083. * current heap. When a soft limit is in effect, this is effectively
  1084. * what it's compared against (though, in practice, it only looks at
  1085. * the current heap).
  1086. */
  1087. static size_t getSoftFootprint(bool includeActive)
  1088. {
  1089. HS_BOILERPLATE();
  1090. HeapSource *hs = gHs;
  1091. size_t ret = oldHeapOverhead(hs, false);
  1092. if (includeActive) {
  1093. ret += hs->heaps[0].bytesAllocated;
  1094. }
  1095. return ret;
  1096. }
  1097. /*
  1098. * Gets the maximum number of bytes that the heap source is allowed
  1099. * to allocate from the system.
  1100. */
  1101. size_t dvmHeapSourceGetIdealFootprint()
  1102. {
  1103. HeapSource *hs = gHs;
  1104. HS_BOILERPLATE();
  1105. return hs->idealSize;
  1106. }
  1107. /*
  1108. * Sets the soft limit, handling any necessary changes to the allowed
  1109. * footprint of the active heap.
  1110. */
  1111. static void setSoftLimit(HeapSource *hs, size_t softLimit)
  1112. {
  1113. /* Compare against the actual footprint, rather than the
  1114. * max_allowed, because the heap may not have grown all the
  1115. * way to the allowed size yet.
  1116. */
  1117. mspace msp = hs->heaps[0].msp;
  1118. size_t currentHeapSize = mspace_footprint(msp);
  1119. if (softLimit < currentHeapSize) {
  1120. /* Don't let the heap grow any more, and impose a soft limit.
  1121. */
  1122. mspace_set_footprint_limit(msp, currentHeapSize);
  1123. hs->softLimit = softLimit;
  1124. } else {
  1125. /* Let the heap grow to the requested max, and remove any
  1126. * soft limit, if set.
  1127. */
  1128. mspace_set_footprint_limit(msp, softLimit);
  1129. hs->softLimit = SIZE_MAX;
  1130. }
  1131. }
  1132. /*
  1133. * Sets the maximum number of bytes that the heap source is allowed
  1134. * to allocate from the system. Clamps to the appropriate maximum
  1135. * value.
  1136. */
  1137. static void setIdealFootprint(size_t max)
  1138. {
  1139. HS_BOILERPLATE();
  1140. HeapSource *hs = gHs;
  1141. size_t maximumSize = getMaximumSize(hs);
  1142. if (max > maximumSize) {
  1143. LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
  1144. FRACTIONAL_MB(max),
  1145. FRACTIONAL_MB(maximumSize));
  1146. max = maximumSize;
  1147. }
  1148. /* Convert max into a size that applies to the active heap.
  1149. * Old heaps will count against the ideal size.
  1150. */
  1151. size_t overhead = getSoftFootprint(false);
  1152. size_t activeMax;
  1153. if (overhead < max) {
  1154. activeMax = max - overhead;
  1155. } else {
  1156. activeMax = 0;
  1157. }
  1158. setSoftLimit(hs, activeMax);
  1159. hs->idealSize = max;
  1160. }
  1161. /*
  1162. * Make the ideal footprint equal to the current footprint.
  1163. */
  1164. static void snapIdealFootprint()
  1165. {
  1166. HS_BOILERPLATE();
  1167. setIdealFootprint(getSoftFootprint(true));
  1168. }
  1169. /*
  1170. * Gets the current ideal heap utilization, represented as a number
  1171. * between zero and one.
  1172. */
  1173. float dvmGetTargetHeapUtilization()
  1174. {
  1175. HeapSource *hs = gHs;
  1176. HS_BOILERPLATE();
  1177. return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
  1178. }
  1179. /*
  1180. * Sets the new ideal heap utilization, represented as a number
  1181. * between zero and one.
  1182. */
  1183. void dvmSetTargetHeapUtilization(float newTarget)
  1184. {
  1185. HeapSource *hs = gHs;
  1186. HS_BOILERPLATE();
  1187. /* Clamp it to a reasonable range.
  1188. */
  1189. // TODO: This may need some tuning.
  1190. if (newTarget < 0.2) {
  1191. newTarget = 0.2;
  1192. } else if (newTarget > 0.8) {
  1193. newTarget = 0.8;
  1194. }
  1195. hs->targetUtilization =
  1196. (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
  1197. ALOGV("Set heap target utilization to %zd/%d (%f)",
  1198. hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
  1199. }
  1200. /*
  1201. * Given the size of a live set, returns the ideal heap size given
  1202. * the current target utilization and MIN/MAX values.
  1203. */
  1204. static size_t getUtilizationTarget(const HeapSource* hs, size_t liveSize)
  1205. {
  1206. /* Use the current target utilization ratio to determine the
  1207. * ideal heap size based on the size of the live set.
  1208. */
  1209. size_t targetSize = (liveSize / hs->targetUtilization) * HEAP_UTILIZATION_MAX;
  1210. /* Cap the amount of free space, though, so we don't end up
  1211. * with, e.g., 8MB of free space when the live set size hits 8MB.
  1212. */
  1213. if (targetSize > liveSize + hs->maxFree) {
  1214. targetSize = liveSize + hs->maxFree;
  1215. } else if (targetSize < liveSize + hs->minFree) {
  1216. targetSize = liveSize + hs->minFree;
  1217. }
  1218. return targetSize;
  1219. }
  1220. /*
  1221. * Given the current contents of the active heap, increase the allowed
  1222. * heap footprint to match the target utilization ratio. This
  1223. * should only be called immediately after a full mark/sweep.
  1224. */
  1225. void dvmHeapSourceGrowForUtilization()
  1226. {
  1227. HS_BOILERPLATE();
  1228. HeapSource *hs = gHs;
  1229. Heap* heap = hs2heap(hs);
  1230. /* Use the current target utilization ratio to determine the
  1231. * ideal heap size based on the size of the live set.
  1232. * Note that only the active heap plays any part in this.
  1233. *
  1234. * Avoid letting the old heaps influence the target free size,
  1235. * because they may be full of objects that aren't actually
  1236. * in the working set. Just look at the allocated size of
  1237. * the current heap.
  1238. */
  1239. size_t currentHeapUsed = heap->bytesAllocated;
  1240. size_t targetHeapSize = getUtilizationTarget(hs, currentHeapUsed);
  1241. /* The ideal size includes the old heaps; add overhead so that
  1242. * it can be immediately subtracted again in setIdealFootprint().
  1243. * If the target heap size would exceed the max, setIdealFootprint()
  1244. * will clamp it to a legal value.
  1245. */
  1246. size_t overhead = getSoftFootprint(false);
  1247. setIdealFootprint(targetHeapSize + overhead);
  1248. size_t freeBytes = getAllocLimit(hs);
  1249. if (freeBytes < CONCURRENT_MIN_FREE) {
  1250. /* Not enough free memory to allow a concurrent GC. */
  1251. heap->concurrentStartBytes = SIZE_MAX;
  1252. } else {
  1253. heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
  1254. }
  1255. /* Mark that we need to run finalizers and update the native watermarks
  1256. * next time we attempt to register a native allocation.
  1257. */
  1258. gHs->nativeNeedToRunFinalization = true;
  1259. }
  1260. /*
  1261. * Return free pages to the system.
  1262. * TODO: move this somewhere else, especially the native heap part.
  1263. */
  1264. static void releasePagesInRange(void* start, void* end, size_t used_bytes,
  1265. void* releasedBytes)
  1266. {
  1267. if (used_bytes == 0) {
  1268. /*
  1269. * We have a range of memory we can try to madvise()
  1270. * back. Linux requires that the madvise() start address is
  1271. * page-aligned. We also align the end address.
  1272. */
  1273. start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
  1274. end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
  1275. if (end > start) {
  1276. size_t length = (char *)end - (char *)start;
  1277. madvise(start, length, MADV_DONTNEED);
  1278. *(size_t *)releasedBytes += length;
  1279. }
  1280. }
  1281. }
  1282. /*
  1283. * Return unused memory to the system if possible.
  1284. */
  1285. static void trimHeaps()
  1286. {
  1287. HS_BOILERPLATE();
  1288. HeapSource *hs = gHs;
  1289. size_t heapBytes = 0;
  1290. for (size_t i = 0; i < hs->numHeaps; i++) {
  1291. Heap *heap = &hs->heaps[i];
  1292. /* Return the wilderness chunk to the system. */
  1293. mspace_trim(heap->msp, 0);
  1294. /* Return any whole free pages to the system. */
  1295. mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
  1296. }
  1297. /* Same for the native heap. */
  1298. dlmalloc_trim(0);
  1299. size_t nativeBytes = 0;
  1300. dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
  1301. LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
  1302. heapBytes, nativeBytes, heapBytes + nativeBytes);
  1303. }
  1304. /*
  1305. * Walks over the heap source and passes every allocated and
  1306. * free chunk to the callback.
  1307. */
  1308. void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
  1309. size_t used_bytes, void* arg),
  1310. void *arg)
  1311. {
  1312. HS_BOILERPLATE();
  1313. /* Walk the heaps from oldest to newest.
  1314. */
  1315. //TODO: do this in address order
  1316. HeapSource *hs = gHs;
  1317. for (size_t i = hs->numHeaps; i > 0; --i) {
  1318. mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
  1319. callback(NULL, NULL, 0, arg); // Indicate end of a heap.
  1320. }
  1321. }
  1322. /*
  1323. * Gets the number of heaps available in the heap source.
  1324. *
  1325. * Caller must hold the heap lock, because gHs caches a field
  1326. * in gDvm.gcHeap.
  1327. */
  1328. size_t dvmHeapSourceGetNumHeaps()
  1329. {
  1330. HS_BOILERPLATE();
  1331. return gHs->numHeaps;
  1332. }
  1333. void *dvmHeapSourceGetImmuneLimit(bool isPartial)
  1334. {
  1335. if (isPartial) {
  1336. return hs2heap(gHs)->base;
  1337. } else {
  1338. return NULL;
  1339. }
  1340. }
  1341. static void dvmHeapSourceUpdateMaxNativeFootprint()
  1342. {
  1343. /* Use the current target utilization ratio to determine the new native GC
  1344. * watermarks.
  1345. */
  1346. size_t nativeSize = gHs->nativeBytesAllocated;
  1347. size_t targetSize =
  1348. (nativeSize / gHs->targetUtilization) * HEAP_UTILIZATION_MAX;
  1349. if (targetSize > nativeSize + gHs->maxFree) {
  1350. targetSize = nativeSize + gHs->maxFree;
  1351. } else if (targetSize < nativeSize + gHs->minFree) {
  1352. targetSize = nativeSize + gHs->minFree;
  1353. }
  1354. gHs->nativeFootprintGCWatermark = targetSize;
  1355. gHs->nativeFootprintLimit = 2 * targetSize - nativeSize;
  1356. }
  1357. void dvmHeapSourceRegisterNativeAllocation(int bytes)
  1358. {
  1359. /* If we have just done a GC, ensure that the finalizers are done and update
  1360. * the native watermarks.
  1361. */
  1362. if (gHs->nativeNeedToRunFinalization) {
  1363. dvmRunFinalization();
  1364. dvmHeapSourceUpdateMaxNativeFootprint();
  1365. gHs->nativeNeedToRunFinalization = false;
  1366. }
  1367. android_atomic_add(bytes, &gHs->nativeBytesAllocated);
  1368. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
  1369. /* The second watermark is higher than the gc watermark. If you hit
  1370. * this it means you are allocating native objects faster than the GC
  1371. * can keep up with. If this occurs, we do a GC for alloc.
  1372. */
  1373. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
  1374. Thread* self = dvmThreadSelf();
  1375. dvmRunFinalization();
  1376. if (dvmCheckException(self)) {
  1377. return;
  1378. }
  1379. dvmLockHeap();
  1380. bool waited = dvmWaitForConcurrentGcToComplete();
  1381. dvmUnlockHeap();
  1382. if (waited) {
  1383. // Just finished a GC, attempt to run finalizers.
  1384. dvmRunFinalization();
  1385. if (dvmCheckException(self)) {
  1386. return;
  1387. }
  1388. }
  1389. // If we still are over the watermark, attempt a GC for alloc and run finalizers.
  1390. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
  1391. dvmLockHeap();
  1392. dvmWaitForConcurrentGcToComplete();
  1393. dvmCollectGarbageInternal(GC_FOR_MALLOC);
  1394. dvmUnlockHeap();
  1395. dvmRunFinalization();
  1396. gHs->nativeNeedToRunFinalization = false;
  1397. if (dvmCheckException(self)) {
  1398. return;
  1399. }
  1400. }
  1401. /* We have just run finalizers, update the native watermark since
  1402. * it is very likely that finalizers released native managed
  1403. * allocations.
  1404. */
  1405. dvmHeapSourceUpdateMaxNativeFootprint();
  1406. } else {
  1407. dvmSignalCond(&gHs->gcThreadCond);
  1408. }
  1409. }
  1410. }
  1411. /*
  1412. * Called from VMRuntime.registerNativeFree.
  1413. */
  1414. void dvmHeapSourceRegisterNativeFree(int bytes)
  1415. {
  1416. int expected_size, new_size;
  1417. do {
  1418. expected_size = gHs->nativeBytesAllocated;
  1419. new_size = expected_size - bytes;
  1420. if (new_size < 0) {
  1421. break;
  1422. }
  1423. } while (android_atomic_cas(expected_size, new_size,
  1424. &gHs->nativeBytesAllocated));
  1425. }