Ejemplo n.º 1
0
/*
 * Return unused memory to the system if possible.
 */
static void trimHeaps()
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    size_t heapBytes = 0;
    for (size_t i = 0; i < hs->numHeaps; i++) {
        Heap *heap = &hs->heaps[i];

        /* Return the wilderness chunk to the system. */
        mspace_trim(heap->msp, 0);

        /* Return any whole free pages to the system. */
        mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
    }

    /* Same for the native heap. */
    dlmalloc_trim(0);
    size_t nativeBytes = 0;
    dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);

    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
            heapBytes, nativeBytes, heapBytes + nativeBytes);
}
Ejemplo n.º 2
0
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr;
    if (gDvm.lowMemoryMode) {
        /* This is only necessary because mspace_calloc always memsets the
         * allocated memory to 0. This is bad for memory usage since it leads
         * to dirty zero pages. If low memory mode is enabled, we use
         * mspace_malloc which doesn't memset the allocated memory and madvise
         * the page aligned region back to the kernel.
         */
        ptr = mspace_malloc(heap->msp, n);
        if (ptr == NULL) {
            return NULL;
        }
        uintptr_t zero_begin = (uintptr_t)ptr;
        uintptr_t zero_end = (uintptr_t)ptr + n;
        /* Calculate the page aligned region.
         */
        uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
        uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
        /* If our allocation spans more than one page, we attempt to madvise.
         */
        if (begin < end) {
            /* madvise the page aligned region to kernel.
             */
            madvise((void*)begin, end - begin, MADV_DONTNEED);
            /* Zero the region after the page aligned region.
             */
            memset((void*)end, 0, zero_end - end);
            /* Zero out the region before the page aligned region.
             */
            zero_end = begin;
        }
        memset((void*)zero_begin, 0, zero_end - zero_begin);
    } else {
        ptr = mspace_calloc(heap->msp, 1, n);
        if (ptr == NULL) {
            return NULL;
        }
    }

    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Ejemplo n.º 3
0
void dvmHeapSourceZeroMarkBitmap()
{
    HS_BOILERPLATE();

    dvmHeapBitmapZero(&gHs->markBits);
}
Ejemplo n.º 4
0
/*
 * Get the bitmap representing all marked objects.
 */
HeapBitmap *dvmHeapSourceGetMarkBits()
{
    HS_BOILERPLATE();

    return &gHs->markBits;
}
Ejemplo n.º 5
0
/*
 * Get the bitmap representing all live objects.
 */
HeapBitmap *dvmHeapSourceGetLiveBits()
{
    HS_BOILERPLATE();

    return &gHs->liveBits;
}
Ejemplo n.º 6
0
/*
 * Gets the number of heaps available in the heap source.
 *
 * Caller must hold the heap lock, because gHs caches a field
 * in gDvm.gcHeap.
 */
size_t dvmHeapSourceGetNumHeaps()
{
    HS_BOILERPLATE();

    return gHs->numHeaps;
}
Ejemplo n.º 7
0
/*
 * Make the ideal footprint equal to the current footprint.
 */
static void snapIdealFootprint()
{
    HS_BOILERPLATE();

    setIdealFootprint(getSoftFootprint(true));
}
Ejemplo n.º 8
0
/*
 * Returns the current maximum size of the heap source respecting any
 * growth limits.
 */
size_t dvmHeapSourceGetMaximumSize()
{
    HS_BOILERPLATE();
    return getMaximumSize(gHs);
}
Ejemplo n.º 9
0
/*
 * Returns true iff <ptr> is in the heap source.
 */
bool dvmHeapSourceContainsAddress(const void *ptr)
{
    HS_BOILERPLATE();

    return (dvmHeapSourceGetBase() <= ptr) && (ptr <= dvmHeapSourceGetLimit());
}
Ejemplo n.º 10
0
/*
 * Returns true iff <ptr> is in the heap source.
 */
bool dvmHeapSourceContainsAddress(const void *ptr)
{
    HS_BOILERPLATE();

    return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
}
Ejemplo n.º 11
0
/*
 * Frees the first numPtrs objects in the ptrs list and returns the
 * amount of reclaimed storage. The list must contain addresses all in
 * the same mspace, and must be in increasing order. This implies that
 * there are no duplicates, and no entries are NULL.
 */
size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
{
    HS_BOILERPLATE();

    if (numPtrs == 0) {
        return 0;
    }

    assert(ptrs != NULL);
    assert(*ptrs != NULL);
    Heap* heap = ptr2heap(gHs, *ptrs);
    size_t numBytes = 0;
    if (heap != NULL) {
        mspace msp = heap->msp;
        // Calling mspace_free on shared heaps disrupts sharing too
        // much. For heap[0] -- the 'active heap' -- we call
        // mspace_free, but on the other heaps we only do some
        // accounting.
        if (heap == gHs->heaps) {
            // mspace_merge_objects takes two allocated objects, and
            // if the second immediately follows the first, will merge
            // them, returning a larger object occupying the same
            // memory. This is a local operation, and doesn't require
            // dlmalloc to manipulate any freelists. It's pretty
            // inexpensive compared to free().

            // ptrs is an array of objects all in memory order, and if
            // client code has been allocating lots of short-lived
            // objects, this is likely to contain runs of objects all
            // now garbage, and thus highly amenable to this optimization.

            // Unroll the 0th iteration around the loop below,
            // countFree ptrs[0] and initializing merged.
            assert(ptrs[0] != NULL);
            assert(ptr2heap(gHs, ptrs[0]) == heap);
            countFree(heap, ptrs[0], &numBytes);
            void *merged = ptrs[0];
            for (size_t i = 1; i < numPtrs; i++) {
                assert(merged != NULL);
                assert(ptrs[i] != NULL);
                assert((intptr_t)merged < (intptr_t)ptrs[i]);
                assert(ptr2heap(gHs, ptrs[i]) == heap);
                countFree(heap, ptrs[i], &numBytes);
                // Try to merge. If it works, merged now includes the
                // memory of ptrs[i]. If it doesn't, free merged, and
                // see if ptrs[i] starts a new run of adjacent
                // objects to merge.
                if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
                    mspace_free(msp, merged);
                    merged = ptrs[i];
                }
            }
            assert(merged != NULL);
            mspace_free(msp, merged);
        } else {
            // This is not an 'active heap'. Only do the accounting.
            for (size_t i = 0; i < numPtrs; i++) {
                assert(ptrs[i] != NULL);
                assert(ptr2heap(gHs, ptrs[i]) == heap);
                countFree(heap, ptrs[i], &numBytes);
            }
        }
    }
    return numBytes;
}
Ejemplo n.º 12
0
/*
 * Gets concurrentStart
 */
int dvmGetTargetHeapConcurrentStart()
{
    HS_BOILERPLATE();
    LOGD_HEAP("dvmGetTargetHeapConcurrentStart %d", concurrentStart );
    return concurrentStart;
}
Ejemplo n.º 13
0
/*
 * Gets TargetHeapMinFree
 */
int dvmGetTargetHeapMinFree()
{
    HS_BOILERPLATE();
    LOGD_HEAP("dvmGetTargetHeapIdealFree %d", gHs->minFree );
    return gHs->minFree;
}
Ejemplo n.º 14
0
/*
 * Sets TargetHeapMinFree
 */
void dvmSetTargetHeapMinFree(size_t size)
{
    HS_BOILERPLATE();
    gHs->minFree = size;
    LOGD_HEAP("dvmSetTargetHeapIdealFree %d", gHs->minFree );
}