Esempio n. 1
0
static bool
createMarkStack(GcMarkStack *stack)
{
    const Object **limit;
    size_t size;
    int fd;

    /* Create a stack big enough for the worst possible case,
     * where the heap is perfectly full of the smallest object.
     * TODO: be better about memory usage; use a smaller stack with
     *       overflow detection and recovery.
     */
    size = dvmHeapSourceGetIdealFootprint() /
            (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
    size = ALIGN_UP_TO_PAGE_SIZE(size);
    fd = ashmem_create_region("dalvik-heap-markstack", size);
    if (fd < 0) {
        LOGE_GC("Could not create %d-byte ashmem mark stack\n", size);
        return false;
    }
    limit = (const Object **)mmap(NULL, size, PROT_READ | PROT_WRITE,
            MAP_PRIVATE, fd, 0);
    close(fd);
    if (limit == MAP_FAILED) {
        LOGE_GC("Could not mmap %d-byte ashmem mark stack\n", size);
        return false;
    }

    memset(stack, 0, sizeof(*stack));
    stack->limit = limit;
    stack->base = (const Object **)((uintptr_t)limit + size);
    stack->top = stack->base;

    return true;
}
Esempio n. 2
0
static bool createMarkStack(GcMarkStack *stack)
{
    const Object **limit;
    const char *name;
    size_t size;

    /* Create a stack big enough for the worst possible case,
     * where the heap is perfectly full of the smallest object.
     * TODO: be better about memory usage; use a smaller stack with
     *       overflow detection and recovery.
     */
    size = dvmHeapSourceGetIdealFootprint() * sizeof(Object*) /
            (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
    size = ALIGN_UP_TO_PAGE_SIZE(size);
    name = "dalvik-mark-stack";
    limit = dvmAllocRegion(size, PROT_READ | PROT_WRITE, name);
    if (limit == NULL) {
        LOGE_GC("Could not mmap %zd-byte ashmem region '%s'", size, name);
        return false;
    }
    stack->limit = limit;
    stack->base = (const Object **)((uintptr_t)limit + size);
    stack->top = stack->base;
    return true;
}
/*
 * Adds an additional heap to the heap source.  Returns false if there
 * are too many heaps or insufficient free space to add another heap.
 */
static bool addNewHeap(HeapSource *hs)
{
    Heap heap;

    assert(hs != NULL);
    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
        ALOGE("Attempt to create too many heaps (%zd >= %zd)",
                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
        dvmAbort();
        return false;
    }

    memset(&heap, 0, sizeof(heap));

    /*
     * Heap storage comes from a common virtual memory reservation.
     * The new heap will start on the page after the old heap.
     */
    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
    size_t overhead = base - hs->heaps[0].base;
    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);

    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
        LOGE_HEAP("No room to create any more heaps "
                  "(%zd overhead, %zd max)",
                  overhead, hs->maximumSize);
        return false;
    }

    size_t startSize = gDvm.heapStartingSize;
    heap.maximumSize = hs->growthLimit - overhead;
    heap.concurrentStartBytes = startSize - concurrentStart;
    heap.base = base;
    heap.limit = heap.base + heap.maximumSize;
    heap.msp = createMspace(base, startSize * 2, hs->maximumSize - overhead);
    if (heap.msp == NULL) {
        return false;
    }

    /* Don't let the soon-to-be-old heap grow any further.
     */
    hs->heaps[0].maximumSize = overhead;
    hs->heaps[0].limit = base;
    mspace msp = hs->heaps[0].msp;
    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));

    /* Put the new heap in the list, at heaps[0].
     * Shift existing heaps down.
     */
    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
    hs->heaps[0] = heap;
    hs->numHeaps++;

    return true;
}
Esempio n. 4
0
/*
 * Clean up any resources associated with the bitmap.
 */
void
dvmHeapBitmapDelete(HeapBitmap *hb)
{
    assert(hb != NULL);

    if (hb->bits != NULL) {
        // Re-calculate the size we passed to mmap().
        size_t allocLen = ALIGN_UP_TO_PAGE_SIZE(hb->bitsLen);
        munmap((char *)hb->bits, allocLen);
    }
    memset(hb, 0, sizeof(*hb));
}
/*
 * Return free pages to the system.
 * TODO: move this somewhere else, especially the native heap part.
 */
static void releasePagesInRange(void *start, void *end, void *nbytes)
{
    /* Linux requires that the madvise() start address is page-aligned.
    * We also align the end address.
    */
    start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
    end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
    if (start < end) {
        size_t length = (char *)end - (char *)start;
        madvise(start, length, MADV_DONTNEED);
        *(size_t *)nbytes += length;
    }
}
Esempio n. 6
0
/*
 * Return free pages to the system.
 * TODO: move this somewhere else, especially the native heap part.
 */
static void releasePagesInRange(void* start, void* end, size_t used_bytes,
                                void* releasedBytes)
{
    if (used_bytes == 0) {
        /*
         * We have a range of memory we can try to madvise()
         * back. Linux requires that the madvise() start address is
         * page-aligned.  We also align the end address.
         */
        start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
        end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
        if (end > start) {
            size_t length = (char *)end - (char *)start;
            madvise(start, length, MADV_DONTNEED);
            *(size_t *)releasedBytes += length;
        }
    }
}
Esempio n. 7
0
/*
 *  Allocates a memory region using ashmem and mmap, initialized to
 *  zero.  Actual allocation rounded up to page multiple.  Returns
 *  NULL on failure.
 */
void *dvmAllocRegion(size_t byteCount, int prot, const char *name) {
    void *base;
    int fd, ret;

    byteCount = ALIGN_UP_TO_PAGE_SIZE(byteCount);
    fd = ashmem_create_region(name, byteCount);
    if (fd == -1) {
        return NULL;
    }
    base = mmap(NULL, byteCount, prot, MAP_PRIVATE, fd, 0);
    ret = close(fd);
    if (base == MAP_FAILED) {
        return NULL;
    }
    if (ret == -1) {
        return NULL;
    }
    return base;
}
Esempio n. 8
0
/*
 * Initialize a HeapBitmap so that it points to a bitmap large
 * enough to cover a heap at <base> of <maxSize> bytes, where
 * objects are guaranteed to be HB_OBJECT_ALIGNMENT-aligned.
 */
bool
dvmHeapBitmapInit(HeapBitmap *hb, const void *base, size_t maxSize,
        const char *name)
{
    void *bits;
    size_t bitsLen;
    size_t allocLen;
    int fd;
    char nameBuf[ASHMEM_NAME_LEN] = HB_ASHMEM_NAME;

    assert(hb != NULL);

    bitsLen = HB_OFFSET_TO_INDEX(maxSize) * sizeof(*hb->bits);
    allocLen = ALIGN_UP_TO_PAGE_SIZE(bitsLen);   // required by ashmem

    if (name != NULL) {
        snprintf(nameBuf, sizeof(nameBuf), HB_ASHMEM_NAME "/%s", name);
    }
    fd = ashmem_create_region(nameBuf, allocLen);
    if (fd < 0) {
        LOGE("Could not create %zu-byte ashmem region \"%s\" to cover "
                "%zu-byte heap (%d)\n",
                allocLen, nameBuf, maxSize, fd);
        return false;
    }

    bits = mmap(NULL, bitsLen, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
    close(fd);
    if (bits == MAP_FAILED) {
        LOGE("Could not mmap %d-byte ashmem region \"%s\"\n",
                bitsLen, nameBuf);
        return false;
    }

    memset(hb, 0, sizeof(*hb));
    hb->bits = bits;
    hb->bitsLen = bitsLen;
    hb->base = (uintptr_t)base;
    hb->max = hb->base - 1;

    return true;
}
Esempio n. 9
0
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr;
    if (gDvm.lowMemoryMode) {
        /* This is only necessary because mspace_calloc always memsets the
         * allocated memory to 0. This is bad for memory usage since it leads
         * to dirty zero pages. If low memory mode is enabled, we use
         * mspace_malloc which doesn't memset the allocated memory and madvise
         * the page aligned region back to the kernel.
         */
        ptr = mspace_malloc(heap->msp, n);
        if (ptr == NULL) {
            return NULL;
        }
        uintptr_t zero_begin = (uintptr_t)ptr;
        uintptr_t zero_end = (uintptr_t)ptr + n;
        /* Calculate the page aligned region.
         */
        uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
        uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
        /* If our allocation spans more than one page, we attempt to madvise.
         */
        if (begin < end) {
            /* madvise the page aligned region to kernel.
             */
            madvise((void*)begin, end - begin, MADV_DONTNEED);
            /* Zero the region after the page aligned region.
             */
            memset((void*)end, 0, zero_end - end);
            /* Zero out the region before the page aligned region.
             */
            zero_end = begin;
        }
        memset((void*)zero_begin, 0, zero_end - zero_begin);
    } else {
        ptr = mspace_calloc(heap->msp, 1, n);
        if (ptr == NULL) {
            return NULL;
        }
    }

    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Esempio n. 10
0
/*
 * Initializes the heap source; must be called before any other
 * dvmHeapSource*() functions.  Returns a GcHeap structure
 * allocated from the heap source.
 */
GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
                             size_t growthLimit)
{
    GcHeap *gcHeap = NULL;
    HeapSource *hs = NULL;
    mspace msp;
    size_t length;
    void *base;

    assert(gHs == NULL);

    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
        ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
              startSize, maximumSize, growthLimit);
        return NULL;
    }

    /*
     * Allocate a contiguous region of virtual memory to subdivided
     * among the heaps managed by the garbage collector.
     */
    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
    base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap");
    if (base == NULL) {
        dvmAbort();
    }

    /* Create an unlocked dlmalloc mspace to use as
     * a heap source.
     */
    msp = createMspace(base, kInitialMorecoreStart, startSize);
    if (msp == NULL) {
        dvmAbort();
    }

    gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
    if (gcHeap == NULL) {
        LOGE_HEAP("Can't allocate heap descriptor");
        dvmAbort();
    }

    hs = (HeapSource *)calloc(1, sizeof(*hs));
    if (hs == NULL) {
        LOGE_HEAP("Can't allocate heap source");
        dvmAbort();
    }

    hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
    hs->minFree = gDvm.heapMinFree;
    hs->maxFree = gDvm.heapMaxFree;
    hs->startSize = startSize;
    hs->maximumSize = maximumSize;
    hs->growthLimit = growthLimit;
    hs->idealSize = startSize;
    hs->softLimit = SIZE_MAX;    // no soft limit at first
    hs->numHeaps = 0;
    hs->sawZygote = gDvm.zygote;
    hs->nativeBytesAllocated = 0;
    hs->nativeFootprintGCWatermark = startSize;
    hs->nativeFootprintLimit = startSize * 2;
    hs->nativeNeedToRunFinalization = false;
    hs->hasGcThread = false;
    hs->heapBase = (char *)base;
    hs->heapLength = length;

    if (hs->maxFree > hs->maximumSize) {
        hs->maxFree = hs->maximumSize;
    }
    if (hs->minFree < CONCURRENT_START) {
        hs->minFree = CONCURRENT_START;
    } else if (hs->minFree > hs->maxFree) {
        hs->minFree = hs->maxFree;
    }

    if (!addInitialHeap(hs, msp, growthLimit)) {
        LOGE_HEAP("Can't add initial heap");
        dvmAbort();
    }
    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
        LOGE_HEAP("Can't create liveBits");
        dvmAbort();
    }
    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
        LOGE_HEAP("Can't create markBits");
        dvmHeapBitmapDelete(&hs->liveBits);
        dvmAbort();
    }
    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
        ALOGE("Can't create markStack");
        dvmHeapBitmapDelete(&hs->markBits);
        dvmHeapBitmapDelete(&hs->liveBits);
        dvmAbort();
    }
    gcHeap->markContext.bitmap = &hs->markBits;
    gcHeap->heapSource = hs;

    gHs = hs;
    return gcHeap;
}
/*
 * Initializes the heap source; must be called before any other
 * dvmHeapSource*() functions.  Returns a GcHeap structure
 * allocated from the heap source.
 */
GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
                             size_t growthLimit)
{
    GcHeap *gcHeap;
    HeapSource *hs;
    mspace msp;
    size_t length;
    void *base;

    assert(gHs == NULL);

    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
        ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
             startSize, maximumSize, growthLimit);
        return NULL;
    }

    /*
     * Allocate a contiguous region of virtual memory to subdivided
     * among the heaps managed by the garbage collector.
     */
    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
    if (base == NULL) {
        return NULL;
    }

    /* Create an unlocked dlmalloc mspace to use as
     * a heap source.
     */
    msp = createMspace(base, startSize, maximumSize);
    if (msp == NULL) {
        goto fail;
    }

    gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
    if (gcHeap == NULL) {
        LOGE_HEAP("Can't allocate heap descriptor");
        goto fail;
    }

    hs = (HeapSource *)calloc(1, sizeof(*hs));
    if (hs == NULL) {
        LOGE_HEAP("Can't allocate heap source");
        free(gcHeap);
        goto fail;
    }

    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
    hs->startSize = startSize;
    hs->maximumSize = maximumSize;
    hs->growthLimit = growthLimit;
    hs->idealSize = startSize;
    hs->softLimit = SIZE_MAX;    // no soft limit at first
    hs->numHeaps = 0;
    hs->sawZygote = gDvm.zygote;
    hs->hasGcThread = false;
    hs->heapBase = (char *)base;
    hs->heapLength = length;
    if (!addInitialHeap(hs, msp, growthLimit)) {
        LOGE_HEAP("Can't add initial heap");
        goto fail;
    }
    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
        LOGE_HEAP("Can't create liveBits");
        goto fail;
    }
    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
        LOGE_HEAP("Can't create markBits");
        dvmHeapBitmapDelete(&hs->liveBits);
        goto fail;
    }
    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
        ALOGE("Can't create markStack");
        dvmHeapBitmapDelete(&hs->markBits);
        dvmHeapBitmapDelete(&hs->liveBits);
        goto fail;
    }
    gcHeap->markContext.bitmap = &hs->markBits;
    gcHeap->heapSource = hs;

    gHs = hs;
    return gcHeap;

fail:
    munmap(base, length);
    return NULL;
}