Esempio n. 1
0
/*
 * Given the current contents of the active heap, increase the allowed
 * heap footprint to match the target utilization ratio.  This
 * should only be called immediately after a full mark/sweep.
 */
void dvmHeapSourceGrowForUtilization()
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);

    /* Use the current target utilization ratio to determine the
     * ideal heap size based on the size of the live set.
     * Note that only the active heap plays any part in this.
     *
     * Avoid letting the old heaps influence the target free size,
     * because they may be full of objects that aren't actually
     * in the working set.  Just look at the allocated size of
     * the current heap.
     */
    size_t currentHeapUsed = heap->bytesAllocated;
    size_t targetHeapSize = getUtilizationTarget(hs, currentHeapUsed);

    /* The ideal size includes the old heaps; add overhead so that
     * it can be immediately subtracted again in setIdealFootprint().
     * If the target heap size would exceed the max, setIdealFootprint()
     * will clamp it to a legal value.
     */
    size_t overhead = getSoftFootprint(false);
    setIdealFootprint(targetHeapSize + overhead);

    size_t freeBytes = getAllocLimit(hs);
    if (freeBytes < CONCURRENT_MIN_FREE) {
        /* Not enough free memory to allow a concurrent GC. */
        heap->concurrentStartBytes = SIZE_MAX;
    } else {
        heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
    }
}
Esempio n. 2
0
/*
 * Returns approximately the maximum number of bytes allowed to be
 * allocated from the active heap before a GC is forced.
 */
static size_t getAllocLimit(const HeapSource *hs)
{
    if (isSoftLimited(hs)) {
        return hs->softLimit;
    } else {
        return mspace_footprint_limit(hs2heap(hs)->msp);
    }
}
Esempio n. 3
0
void *dvmHeapSourceGetImmuneLimit(bool isPartial)
{
    if (isPartial) {
        return hs2heap(gHs)->base;
    } else {
        return NULL;
    }
}
Esempio n. 4
0
/*
 * Allocates <n> bytes of zeroed data, growing as much as possible
 * if necessary.
 */
void* dvmHeapSourceAllocAndGrow(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    void* ptr = dvmHeapSourceAlloc(n);
    if (ptr != NULL) {
        return ptr;
    }

    size_t oldIdealSize = hs->idealSize;
    if (isSoftLimited(hs)) {
        /* We're soft-limited.  Try removing the soft limit to
         * see if we can allocate without actually growing.
         */
        hs->softLimit = SIZE_MAX;
        ptr = dvmHeapSourceAlloc(n);
        if (ptr != NULL) {
            /* Removing the soft limit worked;  fix things up to
             * reflect the new effective ideal size.
             */
            snapIdealFootprint();
            return ptr;
        }
        // softLimit intentionally left at SIZE_MAX.
    }

    /* We're not soft-limited.  Grow the heap to satisfy the request.
     * If this call fails, no footprints will have changed.
     */
    ptr = heapAllocAndGrow(hs, heap, n);
    if (ptr != NULL) {
        /* The allocation succeeded.  Fix up the ideal size to
         * reflect any footprint modifications that had to happen.
         */
        snapIdealFootprint();
    } else {
        /* We just couldn't do it.  Restore the original ideal size,
         * fixing up softLimit if necessary.
         */
        setIdealFootprint(oldIdealSize);
    }
    return ptr;
}
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr = mspace_calloc(heap->msp, 1, n);
    if (ptr == NULL) {
        return NULL;
    }
    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Esempio n. 6
0
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr;
    if (gDvm.lowMemoryMode) {
        /* This is only necessary because mspace_calloc always memsets the
         * allocated memory to 0. This is bad for memory usage since it leads
         * to dirty zero pages. If low memory mode is enabled, we use
         * mspace_malloc which doesn't memset the allocated memory and madvise
         * the page aligned region back to the kernel.
         */
        ptr = mspace_malloc(heap->msp, n);
        if (ptr == NULL) {
            return NULL;
        }
        uintptr_t zero_begin = (uintptr_t)ptr;
        uintptr_t zero_end = (uintptr_t)ptr + n;
        /* Calculate the page aligned region.
         */
        uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
        uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
        /* If our allocation spans more than one page, we attempt to madvise.
         */
        if (begin < end) {
            /* madvise the page aligned region to kernel.
             */
            madvise((void*)begin, end - begin, MADV_DONTNEED);
            /* Zero the region after the page aligned region.
             */
            memset((void*)end, 0, zero_end - end);
            /* Zero out the region before the page aligned region.
             */
            zero_end = begin;
        }
        memset((void*)zero_begin, 0, zero_end - zero_begin);
    } else {
        ptr = mspace_calloc(heap->msp, 1, n);
        if (ptr == NULL) {
            return NULL;
        }
    }

    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}