/* * Functions to update heapSource->bytesAllocated when an object * is allocated or freed. mspace_usable_size() will give * us a much more accurate picture of heap utilization than * the requested byte sizes would. * * These aren't exact, and should not be treated as such. */ static void countAllocation(Heap *heap, const void *ptr) { assert(heap->bytesAllocated < mspace_footprint(heap->msp)); heap->bytesAllocated += mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD; heap->objectsAllocated++; HeapSource* hs = gDvm.gcHeap->heapSource; dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr); assert(heap->bytesAllocated < mspace_footprint(heap->msp)); }
void MemoryHeap::initialize() { mMspace = create_mspace_with_granularity(mPageSize, 0, this); mspace_track_large_chunks(mMspace, 1); lassert(mPageSize == mspace_footprint(mMspace)); }
/* * Adds an additional heap to the heap source. Returns false if there * are too many heaps or insufficient free space to add another heap. */ static bool addNewHeap(HeapSource *hs) { Heap heap; assert(hs != NULL); if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) { ALOGE("Attempt to create too many heaps (%zd >= %zd)", hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT); dvmAbort(); return false; } memset(&heap, 0, sizeof(heap)); /* * Heap storage comes from a common virtual memory reservation. * The new heap will start on the page after the old heap. */ void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp); char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0); size_t overhead = base - hs->heaps[0].base; assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0); if (overhead + HEAP_MIN_FREE >= hs->maximumSize) { LOGE_HEAP("No room to create any more heaps " "(%zd overhead, %zd max)", overhead, hs->maximumSize); return false; } size_t startSize = gDvm.heapStartingSize; heap.maximumSize = hs->growthLimit - overhead; heap.concurrentStartBytes = startSize - concurrentStart; heap.base = base; heap.limit = heap.base + heap.maximumSize; heap.msp = createMspace(base, startSize * 2, hs->maximumSize - overhead); if (heap.msp == NULL) { return false; } /* Don't let the soon-to-be-old heap grow any further. */ hs->heaps[0].maximumSize = overhead; hs->heaps[0].limit = base; mspace msp = hs->heaps[0].msp; mspace_set_max_allowed_footprint(msp, mspace_footprint(msp)); /* Put the new heap in the list, at heaps[0]. * Shift existing heaps down. */ memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0])); hs->heaps[0] = heap; hs->numHeaps++; return true; }
bool dvmHeapSourceStartupAfterZygote() { //For each new application forked, we need to reset softLimit and //concurrentStartBytes to be the correct expected value, not the one //inherit from Zygote HeapSource *hs = gHs; hs->softLimit=SIZE_MAX; hs->heaps[0].concurrentStartBytes = mspace_footprint(hs->heaps[0].msp) - concurrentStart; return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true; }
bool dvmHeapSourceStartupAfterZygote() { if(lowmem) { return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true; } else { HeapSource* hs = gHs; hs->softLimit=SIZE_MAX; hs->heaps[0].concurrentStartBytes = mspace_footprint(hs->heaps[0].msp) - concurrentStart; return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true; } }
/* * Returns the current footprint of all heaps. If includeActive * is false, don't count the heap at index 0. */ static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive) { size_t footprint = 0; size_t i; if (includeActive) { i = 0; } else { i = 1; } for (/* i = i */; i < hs->numHeaps; i++) { //TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max footprint += mspace_footprint(hs->heaps[i].msp); } return footprint; }
/* Remove any hard limits, try to allocate, and shrink back down. * Last resort when trying to allocate an object. */ static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n) { /* Grow as much as possible, but don't let the real footprint * go over the absolute max. */ size_t max = heap->maximumSize; mspace_set_footprint_limit(heap->msp, max); void* ptr = dvmHeapSourceAlloc(n); /* Shrink back down as small as possible. Our caller may * readjust max_allowed to a more appropriate value. */ mspace_set_footprint_limit(heap->msp, mspace_footprint(heap->msp)); return ptr; }
static jlong getHeapSize(JNIEnv *env, jobject clazz) { #if !NO_MALLINFO struct mallinfo info = mspace_mallinfo(sqlite3_get_mspace()); struct mallinfo info = dlmallinfo(); return (jlong) info.usmblks; #elif USE_MSPACE mspace space = sqlite3_get_mspace(); if (space != 0) { return mspace_footprint(space); } else { return 0; } #else return 0; #endif }
/* * Sets the soft limit, handling any necessary changes to the allowed * footprint of the active heap. */ static void setSoftLimit(HeapSource *hs, size_t softLimit) { /* Compare against the actual footprint, rather than the * max_allowed, because the heap may not have grown all the * way to the allowed size yet. */ mspace msp = hs->heaps[0].msp; size_t currentHeapSize = mspace_footprint(msp); if (softLimit < currentHeapSize) { /* Don't let the heap grow any more, and impose a soft limit. */ mspace_set_footprint_limit(msp, currentHeapSize); hs->softLimit = softLimit; } else { /* Let the heap grow to the requested max, and remove any * soft limit, if set. */ mspace_set_footprint_limit(msp, softLimit); hs->softLimit = SIZE_MAX; } }
/* * Returns the requested value. If the per-heap stats are requested, fill * them as well. * * Caller must hold the heap lock. */ size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[], size_t arrayLen) { HeapSource *hs = gHs; size_t value = 0; size_t total = 0; HS_BOILERPLATE(); assert(arrayLen >= hs->numHeaps || perHeapStats == NULL); for (size_t i = 0; i < hs->numHeaps; i++) { Heap *const heap = &hs->heaps[i]; switch (spec) { case HS_FOOTPRINT: value = heap->brk - heap->base; assert(value == mspace_footprint(heap->msp)); break; case HS_ALLOWED_FOOTPRINT: value = mspace_footprint_limit(heap->msp); break; case HS_BYTES_ALLOCATED: value = heap->bytesAllocated; break; case HS_OBJECTS_ALLOCATED: value = heap->objectsAllocated; break; default: // quiet gcc break; } if (perHeapStats) { perHeapStats[i] = value; } total += value; } return total; }
size_t mm_mspace_getfootprint(mm_mspace_t space) { return mspace_footprint(space.opaque); }