static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
{
    /* Create an unlocked dlmalloc mspace to use as
     * a heap source.
     *
     * We start off reserving startSize / 2 bytes but
     * letting the heap grow to startSize.  This saves
     * memory in the case where a process uses even less
     * than the starting size.
     */
    LOGV_HEAP("Creating VM heap of size %zu", startSize);
    errno = 0;

    mspace msp = create_contiguous_mspace_with_base(startSize/2,
            maximumSize, /*locked=*/false, base);
    if (msp != NULL) {
        /* Don't let the heap grow past the starting size without
         * our intervention.
         */
        mspace_set_max_allowed_footprint(msp, startSize);
    } else {
        /* There's no guarantee that errno has meaning when the call
         * fails, but it often does.
         */
        LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
            startSize/2, maximumSize, strerror(errno));
    }

    return msp;
}
예제 #2
0
/*
 * Initialize the GC heap.
 *
 * Returns true if successful, false otherwise.
 */
bool dvmHeapStartup()
{
    GcHeap *gcHeap;

    if (gDvm.heapGrowthLimit == 0) {
        gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
    }

    gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
                                  gDvm.heapMaximumSize,
                                  gDvm.heapGrowthLimit);
    if (gcHeap == NULL) {
        return false;
    }
    gcHeap->ddmHpifWhen = 0;
    gcHeap->ddmHpsgWhen = 0;
    gcHeap->ddmHpsgWhat = 0;
    gcHeap->ddmNhsgWhen = 0;
    gcHeap->ddmNhsgWhat = 0;
    gDvm.gcHeap = gcHeap;

    /* Set up the lists we'll use for cleared reference objects.
     */
    gcHeap->clearedReferences = NULL;

    if (!dvmCardTableStartup(gDvm.heapMaximumSize, gDvm.heapGrowthLimit)) {
        LOGE_HEAP("card table startup failed.");
        return false;
    }

    return true;
}
예제 #3
0
/*
 * Adds an additional heap to the heap source.  Returns false if there
 * are too many heaps or insufficient free space to add another heap.
 */
static bool addNewHeap(HeapSource *hs)
{
    Heap heap;

    assert(hs != NULL);
    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
        ALOGE("Attempt to create too many heaps (%zd >= %zd)",
              hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
        dvmAbort();
        return false;
    }

    memset(&heap, 0, sizeof(heap));

    /*
     * Heap storage comes from a common virtual memory reservation.
     * The new heap will start on the page after the old heap.
     */
    char *base = hs->heaps[0].brk;
    size_t overhead = base - hs->heaps[0].base;
    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);

    if (overhead + hs->minFree >= hs->maximumSize) {
        LOGE_HEAP("No room to create any more heaps "
                  "(%zd overhead, %zd max)",
                  overhead, hs->maximumSize);
        return false;
    }
    size_t morecoreStart = SYSTEM_PAGE_SIZE;
    heap.maximumSize = hs->growthLimit - overhead;
    heap.concurrentStartBytes = hs->minFree - CONCURRENT_START;
    heap.base = base;
    heap.limit = heap.base + heap.maximumSize;
    heap.brk = heap.base + morecoreStart;
    if (!remapNewHeap(hs, &heap)) {
        return false;
    }
    heap.msp = createMspace(base, morecoreStart, hs->minFree);
    if (heap.msp == NULL) {
        return false;
    }

    /* Don't let the soon-to-be-old heap grow any further.
     */
    hs->heaps[0].maximumSize = overhead;
    hs->heaps[0].limit = base;
    mspace_set_footprint_limit(hs->heaps[0].msp, overhead);

    /* Put the new heap in the list, at heaps[0].
     * Shift existing heaps down.
     */
    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
    hs->heaps[0] = heap;
    hs->numHeaps++;

    return true;
}
/*
 * Adds an additional heap to the heap source.  Returns false if there
 * are too many heaps or insufficient free space to add another heap.
 */
static bool addNewHeap(HeapSource *hs)
{
    Heap heap;

    assert(hs != NULL);
    if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
        ALOGE("Attempt to create too many heaps (%zd >= %zd)",
                hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
        dvmAbort();
        return false;
    }

    memset(&heap, 0, sizeof(heap));

    /*
     * Heap storage comes from a common virtual memory reservation.
     * The new heap will start on the page after the old heap.
     */
    void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
    char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
    size_t overhead = base - hs->heaps[0].base;
    assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);

    if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
        LOGE_HEAP("No room to create any more heaps "
                  "(%zd overhead, %zd max)",
                  overhead, hs->maximumSize);
        return false;
    }

    size_t startSize = gDvm.heapStartingSize;
    heap.maximumSize = hs->growthLimit - overhead;
    heap.concurrentStartBytes = startSize - concurrentStart;
    heap.base = base;
    heap.limit = heap.base + heap.maximumSize;
    heap.msp = createMspace(base, startSize * 2, hs->maximumSize - overhead);
    if (heap.msp == NULL) {
        return false;
    }

    /* Don't let the soon-to-be-old heap grow any further.
     */
    hs->heaps[0].maximumSize = overhead;
    hs->heaps[0].limit = base;
    mspace msp = hs->heaps[0].msp;
    mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));

    /* Put the new heap in the list, at heaps[0].
     * Shift existing heaps down.
     */
    memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
    hs->heaps[0] = heap;
    hs->numHeaps++;

    return true;
}
예제 #5
0
bool dvmHeapAddTableToLargeTable(LargeHeapRefTable **tableP, HeapRefTable *refs)
{
    LargeHeapRefTable *table;

    /* Allocate a node.
     */
    table = calloc(1, sizeof(LargeHeapRefTable));
    if (table == NULL) {
        LOGE_HEAP("Can't allocate a new large ref table\n");
        return false;
    }
    table->refs = *refs;

    /* Insert the table into the list.
     */
    table->next = *tableP;
    *tableP = table;

    return true;
}
예제 #6
0
/*
 * Initializes the heap source; must be called before any other
 * dvmHeapSource*() functions.  Returns a GcHeap structure
 * allocated from the heap source.
 */
GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
                             size_t growthLimit)
{
    GcHeap *gcHeap = NULL;
    HeapSource *hs = NULL;
    mspace msp;
    size_t length;
    void *base;

    assert(gHs == NULL);

    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
        ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
              startSize, maximumSize, growthLimit);
        return NULL;
    }

    /*
     * Allocate a contiguous region of virtual memory to subdivided
     * among the heaps managed by the garbage collector.
     */
    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
    base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap");
    if (base == NULL) {
        dvmAbort();
    }

    /* Create an unlocked dlmalloc mspace to use as
     * a heap source.
     */
    msp = createMspace(base, kInitialMorecoreStart, startSize);
    if (msp == NULL) {
        dvmAbort();
    }

    gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
    if (gcHeap == NULL) {
        LOGE_HEAP("Can't allocate heap descriptor");
        dvmAbort();
    }

    hs = (HeapSource *)calloc(1, sizeof(*hs));
    if (hs == NULL) {
        LOGE_HEAP("Can't allocate heap source");
        dvmAbort();
    }

    hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
    hs->minFree = gDvm.heapMinFree;
    hs->maxFree = gDvm.heapMaxFree;
    hs->startSize = startSize;
    hs->maximumSize = maximumSize;
    hs->growthLimit = growthLimit;
    hs->idealSize = startSize;
    hs->softLimit = SIZE_MAX;    // no soft limit at first
    hs->numHeaps = 0;
    hs->sawZygote = gDvm.zygote;
    hs->nativeBytesAllocated = 0;
    hs->nativeFootprintGCWatermark = startSize;
    hs->nativeFootprintLimit = startSize * 2;
    hs->nativeNeedToRunFinalization = false;
    hs->hasGcThread = false;
    hs->heapBase = (char *)base;
    hs->heapLength = length;

    if (hs->maxFree > hs->maximumSize) {
        hs->maxFree = hs->maximumSize;
    }
    if (hs->minFree < CONCURRENT_START) {
        hs->minFree = CONCURRENT_START;
    } else if (hs->minFree > hs->maxFree) {
        hs->minFree = hs->maxFree;
    }

    if (!addInitialHeap(hs, msp, growthLimit)) {
        LOGE_HEAP("Can't add initial heap");
        dvmAbort();
    }
    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
        LOGE_HEAP("Can't create liveBits");
        dvmAbort();
    }
    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
        LOGE_HEAP("Can't create markBits");
        dvmHeapBitmapDelete(&hs->liveBits);
        dvmAbort();
    }
    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
        ALOGE("Can't create markStack");
        dvmHeapBitmapDelete(&hs->markBits);
        dvmHeapBitmapDelete(&hs->liveBits);
        dvmAbort();
    }
    gcHeap->markContext.bitmap = &hs->markBits;
    gcHeap->heapSource = hs;

    gHs = hs;
    return gcHeap;
}
예제 #7
0
/*
 * Initiate garbage collection.
 *
 * NOTES:
 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
 *   be added to the thread list while we work.  The thread should NOT
 *   start executing, so this is only interesting when we start chasing
 *   thread stacks.  (Before we do so, grab the lock.)
 *
 * We are not allowed to GC when the debugger has suspended the VM, which
 * is awkward because debugger requests can cause allocations.  The easiest
 * way to enforce this is to refuse to GC on an allocation made by the
 * JDWP thread -- we have to expand the heap or fail.
 */
void dvmCollectGarbageInternal(const GcSpec* spec)
{
    GcHeap *gcHeap = gDvm.gcHeap;
    u4 gcEnd = 0;
    u4 rootStart = 0 , rootEnd = 0;
    u4 dirtyStart = 0, dirtyEnd = 0;
    size_t numObjectsFreed, numBytesFreed;
    size_t currAllocated, currFootprint;
    size_t percentFree;
    int oldThreadPriority = INT_MAX;

    /* The heap lock must be held.
     */

    if (gcHeap->gcRunning) {
        LOGW_HEAP("Attempted recursive GC");
        return;
    }

    gcHeap->gcRunning = true;

    rootStart = dvmGetRelativeTimeMsec();
    dvmSuspendAllThreads(SUSPEND_FOR_GC);

    /*
     * If we are not marking concurrently raise the priority of the
     * thread performing the garbage collection.
     */
    if (!spec->isConcurrent) {
        oldThreadPriority = os_raiseThreadPriority();
    }
    if (gDvm.preVerify) {
        LOGV_HEAP("Verifying roots and heap before GC");
        verifyRootsAndHeap();
    }

    dvmMethodTraceGCBegin();

    /* Set up the marking context.
     */
    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
        dvmAbort();
    }

    /* Mark the set of objects that are strongly reachable from the roots.
     */
    LOGD_HEAP("Marking...");
    dvmHeapMarkRootSet();

    /* dvmHeapScanMarkedObjects() will build the lists of known
     * instances of the Reference classes.
     */
    assert(gcHeap->softReferences == NULL);
    assert(gcHeap->weakReferences == NULL);
    assert(gcHeap->finalizerReferences == NULL);
    assert(gcHeap->phantomReferences == NULL);
    assert(gcHeap->clearedReferences == NULL);

    if (spec->isConcurrent) {
        /*
         * Resume threads while tracing from the roots.  We unlock the
         * heap to allow mutator threads to allocate from free space.
         */
        dvmClearCardTable();
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        rootEnd = dvmGetRelativeTimeMsec();
    }

    /* Recursively mark any objects that marked objects point to strongly.
     * If we're not collecting soft references, soft-reachable
     * objects will also be marked.
     */
    LOGD_HEAP("Recursing...");
    dvmHeapScanMarkedObjects();

    if (spec->isConcurrent) {
        /*
         * Re-acquire the heap lock and perform the final thread
         * suspension.
         */
        dirtyStart = dvmGetRelativeTimeMsec();
        dvmLockHeap();
        dvmSuspendAllThreads(SUSPEND_FOR_GC);
        /*
         * As no barrier intercepts root updates, we conservatively
         * assume all roots may be gray and re-mark them.
         */
        dvmHeapReMarkRootSet();
        /*
         * With the exception of reference objects and weak interned
         * strings, all gray objects should now be on dirty cards.
         */
        if (gDvm.verifyCardTable) {
            dvmVerifyCardTable();
        }
        /*
         * Recursively mark gray objects pointed to by the roots or by
         * heap objects dirtied during the concurrent mark.
         */
        dvmHeapReScanMarkedObjects();
    }

    /*
     * All strongly-reachable objects have now been marked.  Process
     * weakly-reachable objects discovered while tracing.
     */
    dvmHeapProcessReferences(&gcHeap->softReferences,
                             spec->doPreserve == false,
                             &gcHeap->weakReferences,
                             &gcHeap->finalizerReferences,
                             &gcHeap->phantomReferences);

#if defined(WITH_JIT)
    /*
     * Patching a chaining cell is very cheap as it only updates 4 words. It's
     * the overhead of stopping all threads and synchronizing the I/D cache
     * that makes it expensive.
     *
     * Therefore we batch those work orders in a queue and go through them
     * when threads are suspended for GC.
     */
    dvmCompilerPerformSafePointChecks();
#endif

    LOGD_HEAP("Sweeping...");

    dvmHeapSweepSystemWeaks();

    /*
     * Live objects have a bit set in the mark bitmap, swap the mark
     * and live bitmaps.  The sweep can proceed concurrently viewing
     * the new live bitmap as the old mark bitmap, and vice versa.
     */
    dvmHeapSourceSwapBitmaps();

    if (gDvm.postVerify) {
        LOGV_HEAP("Verifying roots and heap after GC");
        verifyRootsAndHeap();
    }

    if (spec->isConcurrent) {
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
    }
    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
                                &numObjectsFreed, &numBytesFreed);
    LOGD_HEAP("Cleaning up...");
    dvmHeapFinishMarkStep();
    if (spec->isConcurrent) {
        dvmLockHeap();
    }

    LOGD_HEAP("Done.");

    /* Now's a good time to adjust the heap size, since
     * we know what our utilization is.
     *
     * This doesn't actually resize any memory;
     * it just lets the heap grow more when necessary.
     */
    dvmHeapSourceGrowForUtilization();

    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);

    dvmMethodTraceGCEnd();
    LOGV_HEAP("GC finished");

    gcHeap->gcRunning = false;

    LOGV_HEAP("Resuming threads");

    if (spec->isConcurrent) {
        /*
         * Wake-up any threads that blocked after a failed allocation
         * request.
         */
        dvmBroadcastCond(&gDvm.gcHeapCond);
    }

    if (!spec->isConcurrent) {
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
        /*
         * Restore the original thread scheduling priority if it was
         * changed at the start of the current garbage collection.
         */
        if (oldThreadPriority != INT_MAX) {
            os_lowerThreadPriority(oldThreadPriority);
        }
    }

    /*
     * Move queue of pending references back into Java.
     */
    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);

    gcEnd = dvmGetRelativeTimeMsec();
    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    if (!spec->isConcurrent) {
        u4 markSweepTime = dirtyEnd - rootStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             markSweepTime, gcTime);
    } else {
        u4 rootTime = rootEnd - rootStart;
        u4 dirtyTime = dirtyEnd - dirtyStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             rootTime, dirtyTime, gcTime);
    }
    if (gcHeap->ddmHpifWhen != 0) {
        LOGD_HEAP("Sending VM heap info to DDM");
        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    }
    if (gcHeap->ddmHpsgWhen != 0) {
        LOGD_HEAP("Dumping VM heap to DDM");
        dvmDdmSendHeapSegments(false, false);
    }
    if (gcHeap->ddmNhsgWhen != 0) {
        LOGD_HEAP("Dumping native heap to DDM");
        dvmDdmSendHeapSegments(false, true);
    }
}
예제 #8
0
/* Try as hard as possible to allocate some memory.
 */
static void *tryMalloc(size_t size)
{
    void *ptr;

    /* Don't try too hard if there's no way the allocation is
     * going to succeed.  We have to collect SoftReferences before
     * throwing an OOME, though.
     */
    if (size >= gDvm.heapGrowthLimit) {
        ALOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
             size, gDvm.heapGrowthLimit);
        ptr = NULL;
        goto collect_soft_refs;
    }

//TODO: figure out better heuristics
//    There will be a lot of churn if someone allocates a bunch of
//    big objects in a row, and we hit the frag case each time.
//    A full GC for each.
//    Maybe we grow the heap in bigger leaps
//    Maybe we skip the GC if the size is large and we did one recently
//      (number of allocations ago) (watch for thread effects)
//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
//      (or, at least, there are only 0-5 objects swept each time)

    ptr = dvmHeapSourceAlloc(size);
    if (ptr != NULL) {
        return ptr;
    }

    /*
     * The allocation failed.  If the GC is running, block until it
     * completes and retry.
     */
    if (gDvm.gcHeap->gcRunning) {
        /*
         * The GC is concurrently tracing the heap.  Release the heap
         * lock, wait for the GC to complete, and retrying allocating.
         */
        dvmWaitForConcurrentGcToComplete();
        ptr = dvmHeapSourceAlloc(size);
        if (ptr != NULL) {
            return ptr;
        }
    }
    /*
     * Another failure.  Our thread was starved or there may be too
     * many live objects.  Try a foreground GC.  This will have no
     * effect if the concurrent GC is already running.
     */
    gcForMalloc(false);
    ptr = dvmHeapSourceAlloc(size);
    if (ptr != NULL) {
        return ptr;
    }

    /* Even that didn't work;  this is an exceptional state.
     * Try harder, growing the heap if necessary.
     */
    ptr = dvmHeapSourceAllocAndGrow(size);
    if (ptr != NULL) {
        size_t newHeapSize;

        newHeapSize = dvmHeapSourceGetIdealFootprint();
//TODO: may want to grow a little bit more so that the amount of free
//      space is equal to the old free space + the utilization slop for
//      the new allocation.
        LOGI_HEAP("Grow heap (frag case) to "
                "%zu.%03zuMB for %zu-byte allocation",
                FRACTIONAL_MB(newHeapSize), size);
        return ptr;
    }

    /* Most allocations should have succeeded by now, so the heap
     * is really full, really fragmented, or the requested size is
     * really big.  Do another GC, collecting SoftReferences this
     * time.  The VM spec requires that all SoftReferences have
     * been collected and cleared before throwing an OOME.
     */
//TODO: wait for the finalizers from the previous GC to finish
collect_soft_refs:
    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
            size);
    gcForMalloc(true);
    ptr = dvmHeapSourceAllocAndGrow(size);
    if (ptr != NULL) {
        return ptr;
    }
//TODO: maybe wait for finalizers and try one last time

    LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
//TODO: tell the HeapSource to dump its state
    dvmDumpThread(dvmThreadSelf(), false);

    return NULL;
}
예제 #9
0
/* All objects for stronger reference levels have been
 * marked before this is called.
 */
void dvmHeapHandleReferences(Object *refListHead, enum RefType refType)
{
    Object *reference;
    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
    const int offVmData = gDvm.offJavaLangRefReference_vmData;
    const int offReferent = gDvm.offJavaLangRefReference_referent;
    bool workRequired = false;

size_t numCleared = 0;
size_t numEnqueued = 0;
    reference = refListHead;
    while (reference != NULL) {
        Object *next;
        Object *referent;

        /* Pull the interesting fields out of the Reference object.
         */
        next = dvmGetFieldObject(reference, offVmData);
        referent = dvmGetFieldObject(reference, offReferent);

        //TODO: when handling REF_PHANTOM, unlink any references
        //      that fail this initial if().  We need to re-walk
        //      the list, and it would be nice to avoid the extra
        //      work.
        if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
            bool schedClear, schedEnqueue;

            /* This is the strongest reference that refers to referent.
             * Do the right thing.
             */
            switch (refType) {
            case REF_SOFT:
            case REF_WEAK:
                schedClear = clearReference(reference);
                schedEnqueue = enqueueReference(reference);
                break;
            case REF_PHANTOM:
                /* PhantomReferences are not cleared automatically.
                 * Until someone clears it (or the reference itself
                 * is collected), the referent must remain alive.
                 *
                 * It's necessary to fully mark the referent because
                 * it will still be present during the next GC, and
                 * all objects that it points to must be valid.
                 * (The referent will be marked outside of this loop,
                 * after handing all references of this strength, in
                 * case multiple references point to the same object.)
                 */
                schedClear = false;

                /* A PhantomReference is only useful with a
                 * queue, but since it's possible to create one
                 * without a queue, we need to check.
                 */
                schedEnqueue = enqueueReference(reference);
                break;
            default:
                assert(!"Bad reference type");
                schedClear = false;
                schedEnqueue = false;
                break;
            }
numCleared += schedClear ? 1 : 0;
numEnqueued += schedEnqueue ? 1 : 0;

            if (schedClear || schedEnqueue) {
                uintptr_t workBits;

                /* Stuff the clear/enqueue bits in the bottom of
                 * the pointer.  Assumes that objects are 8-byte
                 * aligned.
                 *
                 * Note that we are adding the *Reference* (which
                 * is by definition already marked at this point) to
                 * this list; we're not adding the referent (which
                 * has already been cleared).
                 */
                assert(((intptr_t)reference & 3) == 0);
                assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0);
                workBits = (schedClear ? WORKER_CLEAR : 0) |
                           (schedEnqueue ? WORKER_ENQUEUE : 0);
                if (!dvmHeapAddRefToLargeTable(
                        &gDvm.gcHeap->referenceOperations,
                        (Object *)((uintptr_t)reference | workBits)))
                {
                    LOGE_HEAP("dvmMalloc(): no room for any more "
                            "reference operations\n");
                    dvmAbort();
                }
                workRequired = true;
            }

            if (refType != REF_PHANTOM) {
                /* Let later GCs know not to reschedule this reference.
                 */
                dvmSetFieldObject(reference, offVmData,
                        SCHEDULED_REFERENCE_MAGIC);
            } // else this is handled later for REF_PHANTOM

        } // else there was a stronger reference to the referent.

        reference = next;
    }
#define refType2str(r) \
    ((r) == REF_SOFT ? "soft" : ( \
     (r) == REF_WEAK ? "weak" : ( \
     (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" )))
LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType));

    /* Walk though the reference list again, and mark any non-clear/marked
     * referents.  Only PhantomReferences can have non-clear referents
     * at this point.
     */
    if (refType == REF_PHANTOM) {
        bool scanRequired = false;

        HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
        reference = refListHead;
        while (reference != NULL) {
            Object *next;
            Object *referent;

            /* Pull the interesting fields out of the Reference object.
             */
            next = dvmGetFieldObject(reference, offVmData);
            referent = dvmGetFieldObject(reference, offReferent);

            if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
                markObjectNonNull(referent, markContext);
                scanRequired = true;

                /* Let later GCs know not to reschedule this reference.
                 */
                dvmSetFieldObject(reference, offVmData,
                        SCHEDULED_REFERENCE_MAGIC);
            }

            reference = next;
        }
        HPROF_CLEAR_GC_SCAN_STATE();

        if (scanRequired) {
            processMarkStack(markContext);
        }
    }

    if (workRequired) {
        dvmSignalHeapWorker(false);
    }
}
/*
 * Initializes the heap source; must be called before any other
 * dvmHeapSource*() functions.  Returns a GcHeap structure
 * allocated from the heap source.
 */
GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
                             size_t growthLimit)
{
    GcHeap *gcHeap;
    HeapSource *hs;
    mspace msp;
    size_t length;
    void *base;

    assert(gHs == NULL);

    if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
        ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
             startSize, maximumSize, growthLimit);
        return NULL;
    }

    /*
     * Allocate a contiguous region of virtual memory to subdivided
     * among the heaps managed by the garbage collector.
     */
    length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
    base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
    if (base == NULL) {
        return NULL;
    }

    /* Create an unlocked dlmalloc mspace to use as
     * a heap source.
     */
    msp = createMspace(base, startSize, maximumSize);
    if (msp == NULL) {
        goto fail;
    }

    gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
    if (gcHeap == NULL) {
        LOGE_HEAP("Can't allocate heap descriptor");
        goto fail;
    }

    hs = (HeapSource *)calloc(1, sizeof(*hs));
    if (hs == NULL) {
        LOGE_HEAP("Can't allocate heap source");
        free(gcHeap);
        goto fail;
    }

    hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
    hs->startSize = startSize;
    hs->maximumSize = maximumSize;
    hs->growthLimit = growthLimit;
    hs->idealSize = startSize;
    hs->softLimit = SIZE_MAX;    // no soft limit at first
    hs->numHeaps = 0;
    hs->sawZygote = gDvm.zygote;
    hs->hasGcThread = false;
    hs->heapBase = (char *)base;
    hs->heapLength = length;
    if (!addInitialHeap(hs, msp, growthLimit)) {
        LOGE_HEAP("Can't add initial heap");
        goto fail;
    }
    if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
        LOGE_HEAP("Can't create liveBits");
        goto fail;
    }
    if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
        LOGE_HEAP("Can't create markBits");
        dvmHeapBitmapDelete(&hs->liveBits);
        goto fail;
    }
    if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
        ALOGE("Can't create markStack");
        dvmHeapBitmapDelete(&hs->markBits);
        dvmHeapBitmapDelete(&hs->liveBits);
        goto fail;
    }
    gcHeap->markContext.bitmap = &hs->markBits;
    gcHeap->heapSource = hs;

    gHs = hs;
    return gcHeap;

fail:
    munmap(base, length);
    return NULL;
}
예제 #11
0
bool dvmHeapAddRefToLargeTable(LargeHeapRefTable **tableP, Object *ref)
{
    LargeHeapRefTable *table;

    assert(tableP != NULL);
    assert(ref != NULL);

    /* Make sure that a table with a free slot is
     * at the head of the list.
     */
    if (*tableP != NULL) {
        table = *tableP;
        LargeHeapRefTable *prevTable;

        /* Find an empty slot for this reference.
         */
        prevTable = NULL;
        while (table != NULL && heapRefTableIsFull(&table->refs)) {
            prevTable = table;
            table = table->next;
        }
        if (table != NULL) {
            if (prevTable != NULL) {
                /* Move the table to the head of the list.
                 */
                prevTable->next = table->next;
                table->next = *tableP;
                *tableP = table;
            }
            /* else it's already at the head. */

            goto insert;
        }
        /* else all tables are already full;
         * fall through to the alloc case.
         */
    }

    /* Allocate a new table.
     */
    table = calloc(1, sizeof(LargeHeapRefTable));
    if (table == NULL) {
        LOGE_HEAP("Can't allocate a new large ref table\n");
        return false;
    }
    if (!dvmInitReferenceTable(&table->refs,
                               kLargeHeapRefTableNElems,
                               INT_MAX)) {
        LOGE_HEAP("Can't initialize a new large ref table\n");
        dvmHeapHeapTableFree(table);
        return false;
    }

    /* Stick it at the head.
     */
    table->next = *tableP;
    *tableP = table;

insert:
    /* Insert the reference.
     */
    assert(table == *tableP);
    assert(table != NULL);
    assert(!heapRefTableIsFull(&table->refs));
    *table->refs.nextEntry++ = ref;

    return true;
}