/*
 * Return unused memory to the system if possible.
 */
static void trimHeaps()
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    size_t heapBytes = 0;
    for (size_t i = 0; i < hs->numHeaps; i++) {
        Heap *heap = &hs->heaps[i];

        /* Return the wilderness chunk to the system.
         */
        mspace_trim(heap->msp, 0);

        /* Return any whole free pages to the system.
         */
        mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
    }

    /* Same for the native heap.
     */
    dlmalloc_trim(0);
    size_t nativeBytes = 0;
    dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);

    LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
            heapBytes, nativeBytes, heapBytes + nativeBytes);
}
/*
 * Sets concurrentStart
 */
void dvmSetTargetHeapConcurrentStart(size_t size)
{

    HS_BOILERPLATE();
    concurrentStart = size;
    LOGD_HEAP("dvmSetTargetHeapConcurrentStart %d", size );
}
/*
 * Gets concurrentStart
 */
int dvmGetTargetHeapConcurrentStart()
{

    HS_BOILERPLATE();
    LOGD_HEAP("dvmGetTargetHeapConcurrentStart %d", concurrentStart );
    return concurrentStart;
}
/*
 * Gets heapIdeaFree
 */
int dvmGetTargetHeapIdealFree()
{

    HS_BOILERPLATE();
    LOGD_HEAP("dvmGetTargetHeapIdealFree %d", heapIdeaFree );
    return heapIdeaFree;
}
/*
 * Sets heapIdeaFree
 */
void dvmSetTargetHeapIdealFree(size_t size)
{

    HS_BOILERPLATE();
    heapIdeaFree = size;
    LOGD_HEAP("dvmSetTargetHeapIdealFree %d", size );
}
Exemplo n.º 6
0
/*
 * Initiate garbage collection.
 *
 * NOTES:
 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
 *   be added to the thread list while we work.  The thread should NOT
 *   start executing, so this is only interesting when we start chasing
 *   thread stacks.  (Before we do so, grab the lock.)
 *
 * We are not allowed to GC when the debugger has suspended the VM, which
 * is awkward because debugger requests can cause allocations.  The easiest
 * way to enforce this is to refuse to GC on an allocation made by the
 * JDWP thread -- we have to expand the heap or fail.
 */
void dvmCollectGarbageInternal(const GcSpec* spec)
{
    GcHeap *gcHeap = gDvm.gcHeap;
    u4 gcEnd = 0;
    u4 rootStart = 0 , rootEnd = 0;
    u4 dirtyStart = 0, dirtyEnd = 0;
    size_t numObjectsFreed, numBytesFreed;
    size_t currAllocated, currFootprint;
    size_t percentFree;
    int oldThreadPriority = INT_MAX;

    /* The heap lock must be held.
     */

    if (gcHeap->gcRunning) {
        LOGW_HEAP("Attempted recursive GC");
        return;
    }

    gcHeap->gcRunning = true;

    rootStart = dvmGetRelativeTimeMsec();
    dvmSuspendAllThreads(SUSPEND_FOR_GC);

    /*
     * If we are not marking concurrently raise the priority of the
     * thread performing the garbage collection.
     */
    if (!spec->isConcurrent) {
        oldThreadPriority = os_raiseThreadPriority();
    }
    if (gDvm.preVerify) {
        LOGV_HEAP("Verifying roots and heap before GC");
        verifyRootsAndHeap();
    }

    dvmMethodTraceGCBegin();

    /* Set up the marking context.
     */
    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
        dvmAbort();
    }

    /* Mark the set of objects that are strongly reachable from the roots.
     */
    LOGD_HEAP("Marking...");
    dvmHeapMarkRootSet();

    /* dvmHeapScanMarkedObjects() will build the lists of known
     * instances of the Reference classes.
     */
    assert(gcHeap->softReferences == NULL);
    assert(gcHeap->weakReferences == NULL);
    assert(gcHeap->finalizerReferences == NULL);
    assert(gcHeap->phantomReferences == NULL);
    assert(gcHeap->clearedReferences == NULL);

    if (spec->isConcurrent) {
        /*
         * Resume threads while tracing from the roots.  We unlock the
         * heap to allow mutator threads to allocate from free space.
         */
        dvmClearCardTable();
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        rootEnd = dvmGetRelativeTimeMsec();
    }

    /* Recursively mark any objects that marked objects point to strongly.
     * If we're not collecting soft references, soft-reachable
     * objects will also be marked.
     */
    LOGD_HEAP("Recursing...");
    dvmHeapScanMarkedObjects();

    if (spec->isConcurrent) {
        /*
         * Re-acquire the heap lock and perform the final thread
         * suspension.
         */
        dirtyStart = dvmGetRelativeTimeMsec();
        dvmLockHeap();
        dvmSuspendAllThreads(SUSPEND_FOR_GC);
        /*
         * As no barrier intercepts root updates, we conservatively
         * assume all roots may be gray and re-mark them.
         */
        dvmHeapReMarkRootSet();
        /*
         * With the exception of reference objects and weak interned
         * strings, all gray objects should now be on dirty cards.
         */
        if (gDvm.verifyCardTable) {
            dvmVerifyCardTable();
        }
        /*
         * Recursively mark gray objects pointed to by the roots or by
         * heap objects dirtied during the concurrent mark.
         */
        dvmHeapReScanMarkedObjects();
    }

    /*
     * All strongly-reachable objects have now been marked.  Process
     * weakly-reachable objects discovered while tracing.
     */
    dvmHeapProcessReferences(&gcHeap->softReferences,
                             spec->doPreserve == false,
                             &gcHeap->weakReferences,
                             &gcHeap->finalizerReferences,
                             &gcHeap->phantomReferences);

#if defined(WITH_JIT)
    /*
     * Patching a chaining cell is very cheap as it only updates 4 words. It's
     * the overhead of stopping all threads and synchronizing the I/D cache
     * that makes it expensive.
     *
     * Therefore we batch those work orders in a queue and go through them
     * when threads are suspended for GC.
     */
    dvmCompilerPerformSafePointChecks();
#endif

    LOGD_HEAP("Sweeping...");

    dvmHeapSweepSystemWeaks();

    /*
     * Live objects have a bit set in the mark bitmap, swap the mark
     * and live bitmaps.  The sweep can proceed concurrently viewing
     * the new live bitmap as the old mark bitmap, and vice versa.
     */
    dvmHeapSourceSwapBitmaps();

    if (gDvm.postVerify) {
        LOGV_HEAP("Verifying roots and heap after GC");
        verifyRootsAndHeap();
    }

    if (spec->isConcurrent) {
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
    }
    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
                                &numObjectsFreed, &numBytesFreed);
    LOGD_HEAP("Cleaning up...");
    dvmHeapFinishMarkStep();
    if (spec->isConcurrent) {
        dvmLockHeap();
    }

    LOGD_HEAP("Done.");

    /* Now's a good time to adjust the heap size, since
     * we know what our utilization is.
     *
     * This doesn't actually resize any memory;
     * it just lets the heap grow more when necessary.
     */
    dvmHeapSourceGrowForUtilization();

    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);

    dvmMethodTraceGCEnd();
    LOGV_HEAP("GC finished");

    gcHeap->gcRunning = false;

    LOGV_HEAP("Resuming threads");

    if (spec->isConcurrent) {
        /*
         * Wake-up any threads that blocked after a failed allocation
         * request.
         */
        dvmBroadcastCond(&gDvm.gcHeapCond);
    }

    if (!spec->isConcurrent) {
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
        /*
         * Restore the original thread scheduling priority if it was
         * changed at the start of the current garbage collection.
         */
        if (oldThreadPriority != INT_MAX) {
            os_lowerThreadPriority(oldThreadPriority);
        }
    }

    /*
     * Move queue of pending references back into Java.
     */
    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);

    gcEnd = dvmGetRelativeTimeMsec();
    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    if (!spec->isConcurrent) {
        u4 markSweepTime = dirtyEnd - rootStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             markSweepTime, gcTime);
    } else {
        u4 rootTime = rootEnd - rootStart;
        u4 dirtyTime = dirtyEnd - dirtyStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             rootTime, dirtyTime, gcTime);
    }
    if (gcHeap->ddmHpifWhen != 0) {
        LOGD_HEAP("Sending VM heap info to DDM");
        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    }
    if (gcHeap->ddmHpsgWhen != 0) {
        LOGD_HEAP("Dumping VM heap to DDM");
        dvmDdmSendHeapSegments(false, false);
    }
    if (gcHeap->ddmNhsgWhen != 0) {
        LOGD_HEAP("Dumping native heap to DDM");
        dvmDdmSendHeapSegments(false, true);
    }
}
Exemplo n.º 7
0
/* All objects for stronger reference levels have been
 * marked before this is called.
 */
void dvmHeapHandleReferences(Object *refListHead, enum RefType refType)
{
    Object *reference;
    GcMarkContext *markContext = &gDvm.gcHeap->markContext;
    const int offVmData = gDvm.offJavaLangRefReference_vmData;
    const int offReferent = gDvm.offJavaLangRefReference_referent;
    bool workRequired = false;

size_t numCleared = 0;
size_t numEnqueued = 0;
    reference = refListHead;
    while (reference != NULL) {
        Object *next;
        Object *referent;

        /* Pull the interesting fields out of the Reference object.
         */
        next = dvmGetFieldObject(reference, offVmData);
        referent = dvmGetFieldObject(reference, offReferent);

        //TODO: when handling REF_PHANTOM, unlink any references
        //      that fail this initial if().  We need to re-walk
        //      the list, and it would be nice to avoid the extra
        //      work.
        if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
            bool schedClear, schedEnqueue;

            /* This is the strongest reference that refers to referent.
             * Do the right thing.
             */
            switch (refType) {
            case REF_SOFT:
            case REF_WEAK:
                schedClear = clearReference(reference);
                schedEnqueue = enqueueReference(reference);
                break;
            case REF_PHANTOM:
                /* PhantomReferences are not cleared automatically.
                 * Until someone clears it (or the reference itself
                 * is collected), the referent must remain alive.
                 *
                 * It's necessary to fully mark the referent because
                 * it will still be present during the next GC, and
                 * all objects that it points to must be valid.
                 * (The referent will be marked outside of this loop,
                 * after handing all references of this strength, in
                 * case multiple references point to the same object.)
                 */
                schedClear = false;

                /* A PhantomReference is only useful with a
                 * queue, but since it's possible to create one
                 * without a queue, we need to check.
                 */
                schedEnqueue = enqueueReference(reference);
                break;
            default:
                assert(!"Bad reference type");
                schedClear = false;
                schedEnqueue = false;
                break;
            }
numCleared += schedClear ? 1 : 0;
numEnqueued += schedEnqueue ? 1 : 0;

            if (schedClear || schedEnqueue) {
                uintptr_t workBits;

                /* Stuff the clear/enqueue bits in the bottom of
                 * the pointer.  Assumes that objects are 8-byte
                 * aligned.
                 *
                 * Note that we are adding the *Reference* (which
                 * is by definition already marked at this point) to
                 * this list; we're not adding the referent (which
                 * has already been cleared).
                 */
                assert(((intptr_t)reference & 3) == 0);
                assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0);
                workBits = (schedClear ? WORKER_CLEAR : 0) |
                           (schedEnqueue ? WORKER_ENQUEUE : 0);
                if (!dvmHeapAddRefToLargeTable(
                        &gDvm.gcHeap->referenceOperations,
                        (Object *)((uintptr_t)reference | workBits)))
                {
                    LOGE_HEAP("dvmMalloc(): no room for any more "
                            "reference operations\n");
                    dvmAbort();
                }
                workRequired = true;
            }

            if (refType != REF_PHANTOM) {
                /* Let later GCs know not to reschedule this reference.
                 */
                dvmSetFieldObject(reference, offVmData,
                        SCHEDULED_REFERENCE_MAGIC);
            } // else this is handled later for REF_PHANTOM

        } // else there was a stronger reference to the referent.

        reference = next;
    }
#define refType2str(r) \
    ((r) == REF_SOFT ? "soft" : ( \
     (r) == REF_WEAK ? "weak" : ( \
     (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" )))
LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType));

    /* Walk though the reference list again, and mark any non-clear/marked
     * referents.  Only PhantomReferences can have non-clear referents
     * at this point.
     */
    if (refType == REF_PHANTOM) {
        bool scanRequired = false;

        HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0);
        reference = refListHead;
        while (reference != NULL) {
            Object *next;
            Object *referent;

            /* Pull the interesting fields out of the Reference object.
             */
            next = dvmGetFieldObject(reference, offVmData);
            referent = dvmGetFieldObject(reference, offReferent);

            if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
                markObjectNonNull(referent, markContext);
                scanRequired = true;

                /* Let later GCs know not to reschedule this reference.
                 */
                dvmSetFieldObject(reference, offVmData,
                        SCHEDULED_REFERENCE_MAGIC);
            }

            reference = next;
        }
        HPROF_CLEAR_GC_SCAN_STATE();

        if (scanRequired) {
            processMarkStack(markContext);
        }
    }

    if (workRequired) {
        dvmSignalHeapWorker(false);
    }
}
Exemplo n.º 8
0
/*
 * Sets concurrentStart
 */
void dvmSetTargetHeapConcurrentStart(size_t size)
{
    concurrentStart = size;
    LOGD_HEAP("dvmSetTargetHeapConcurrentStart %d", size );
}
Exemplo n.º 9
0
/*
 * Gets TargetHeapMinFree
 */
int dvmGetTargetHeapMinFree()
{
    HS_BOILERPLATE();
    LOGD_HEAP("dvmGetTargetHeapIdealFree %d", gHs->minFree );
    return gHs->minFree;
}
Exemplo n.º 10
0
/*
 * Sets TargetHeapMinFree
 */
void dvmSetTargetHeapMinFree(size_t size)
{
    HS_BOILERPLATE();
    gHs->minFree = size;
    LOGD_HEAP("dvmSetTargetHeapIdealFree %d", gHs->minFree );
}