Пример #1
0
/*
 * Initiate garbage collection.
 *
 * NOTES:
 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
 *   be added to the thread list while we work.  The thread should NOT
 *   start executing, so this is only interesting when we start chasing
 *   thread stacks.  (Before we do so, grab the lock.)
 *
 * We are not allowed to GC when the debugger has suspended the VM, which
 * is awkward because debugger requests can cause allocations.  The easiest
 * way to enforce this is to refuse to GC on an allocation made by the
 * JDWP thread -- we have to expand the heap or fail.
 */
void dvmCollectGarbageInternal(const GcSpec* spec)
{
    GcHeap *gcHeap = gDvm.gcHeap;
    u4 gcEnd = 0;
    u4 rootStart = 0 , rootEnd = 0;
    u4 dirtyStart = 0, dirtyEnd = 0;
    size_t numObjectsFreed, numBytesFreed;
    size_t currAllocated, currFootprint;
    size_t percentFree;
    int oldThreadPriority = INT_MAX;

    /* The heap lock must be held.
     */

    if (gcHeap->gcRunning) {
        LOGW_HEAP("Attempted recursive GC");
        return;
    }

    gcHeap->gcRunning = true;

    rootStart = dvmGetRelativeTimeMsec();
    dvmSuspendAllThreads(SUSPEND_FOR_GC);

    /*
     * If we are not marking concurrently raise the priority of the
     * thread performing the garbage collection.
     */
    if (!spec->isConcurrent) {
        oldThreadPriority = os_raiseThreadPriority();
    }
    if (gDvm.preVerify) {
        LOGV_HEAP("Verifying roots and heap before GC");
        verifyRootsAndHeap();
    }

    dvmMethodTraceGCBegin();

    /* Set up the marking context.
     */
    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
        dvmAbort();
    }

    /* Mark the set of objects that are strongly reachable from the roots.
     */
    LOGD_HEAP("Marking...");
    dvmHeapMarkRootSet();

    /* dvmHeapScanMarkedObjects() will build the lists of known
     * instances of the Reference classes.
     */
    assert(gcHeap->softReferences == NULL);
    assert(gcHeap->weakReferences == NULL);
    assert(gcHeap->finalizerReferences == NULL);
    assert(gcHeap->phantomReferences == NULL);
    assert(gcHeap->clearedReferences == NULL);

    if (spec->isConcurrent) {
        /*
         * Resume threads while tracing from the roots.  We unlock the
         * heap to allow mutator threads to allocate from free space.
         */
        dvmClearCardTable();
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        rootEnd = dvmGetRelativeTimeMsec();
    }

    /* Recursively mark any objects that marked objects point to strongly.
     * If we're not collecting soft references, soft-reachable
     * objects will also be marked.
     */
    LOGD_HEAP("Recursing...");
    dvmHeapScanMarkedObjects();

    if (spec->isConcurrent) {
        /*
         * Re-acquire the heap lock and perform the final thread
         * suspension.
         */
        dirtyStart = dvmGetRelativeTimeMsec();
        dvmLockHeap();
        dvmSuspendAllThreads(SUSPEND_FOR_GC);
        /*
         * As no barrier intercepts root updates, we conservatively
         * assume all roots may be gray and re-mark them.
         */
        dvmHeapReMarkRootSet();
        /*
         * With the exception of reference objects and weak interned
         * strings, all gray objects should now be on dirty cards.
         */
        if (gDvm.verifyCardTable) {
            dvmVerifyCardTable();
        }
        /*
         * Recursively mark gray objects pointed to by the roots or by
         * heap objects dirtied during the concurrent mark.
         */
        dvmHeapReScanMarkedObjects();
    }

    /*
     * All strongly-reachable objects have now been marked.  Process
     * weakly-reachable objects discovered while tracing.
     */
    dvmHeapProcessReferences(&gcHeap->softReferences,
                             spec->doPreserve == false,
                             &gcHeap->weakReferences,
                             &gcHeap->finalizerReferences,
                             &gcHeap->phantomReferences);

#if defined(WITH_JIT)
    /*
     * Patching a chaining cell is very cheap as it only updates 4 words. It's
     * the overhead of stopping all threads and synchronizing the I/D cache
     * that makes it expensive.
     *
     * Therefore we batch those work orders in a queue and go through them
     * when threads are suspended for GC.
     */
    dvmCompilerPerformSafePointChecks();
#endif

    LOGD_HEAP("Sweeping...");

    dvmHeapSweepSystemWeaks();

    /*
     * Live objects have a bit set in the mark bitmap, swap the mark
     * and live bitmaps.  The sweep can proceed concurrently viewing
     * the new live bitmap as the old mark bitmap, and vice versa.
     */
    dvmHeapSourceSwapBitmaps();

    if (gDvm.postVerify) {
        LOGV_HEAP("Verifying roots and heap after GC");
        verifyRootsAndHeap();
    }

    if (spec->isConcurrent) {
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
    }
    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
                                &numObjectsFreed, &numBytesFreed);
    LOGD_HEAP("Cleaning up...");
    dvmHeapFinishMarkStep();
    if (spec->isConcurrent) {
        dvmLockHeap();
    }

    LOGD_HEAP("Done.");

    /* Now's a good time to adjust the heap size, since
     * we know what our utilization is.
     *
     * This doesn't actually resize any memory;
     * it just lets the heap grow more when necessary.
     */
    dvmHeapSourceGrowForUtilization();

    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);

    dvmMethodTraceGCEnd();
    LOGV_HEAP("GC finished");

    gcHeap->gcRunning = false;

    LOGV_HEAP("Resuming threads");

    if (spec->isConcurrent) {
        /*
         * Wake-up any threads that blocked after a failed allocation
         * request.
         */
        dvmBroadcastCond(&gDvm.gcHeapCond);
    }

    if (!spec->isConcurrent) {
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
        /*
         * Restore the original thread scheduling priority if it was
         * changed at the start of the current garbage collection.
         */
        if (oldThreadPriority != INT_MAX) {
            os_lowerThreadPriority(oldThreadPriority);
        }
    }

    /*
     * Move queue of pending references back into Java.
     */
    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);

    gcEnd = dvmGetRelativeTimeMsec();
    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    if (!spec->isConcurrent) {
        u4 markSweepTime = dirtyEnd - rootStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             markSweepTime, gcTime);
    } else {
        u4 rootTime = rootEnd - rootStart;
        u4 dirtyTime = dirtyEnd - dirtyStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             rootTime, dirtyTime, gcTime);
    }
    if (gcHeap->ddmHpifWhen != 0) {
        LOGD_HEAP("Sending VM heap info to DDM");
        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    }
    if (gcHeap->ddmHpsgWhen != 0) {
        LOGD_HEAP("Dumping VM heap to DDM");
        dvmDdmSendHeapSegments(false, false);
    }
    if (gcHeap->ddmNhsgWhen != 0) {
        LOGD_HEAP("Dumping native heap to DDM");
        dvmDdmSendHeapSegments(false, true);
    }
}
Пример #2
0
/*
 * The heap worker thread sits quietly until the GC tells it there's work
 * to do.
 */
static void* heapWorkerThreadStart(void* arg)
{
    Thread *self = dvmThreadSelf();

    UNUSED_PARAMETER(arg);

    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);

    /* tell the main thread that we're ready */
    lockMutex(&gDvm.heapWorkerLock);
    gDvm.heapWorkerReady = true;
    dvmSignalCond(&gDvm.heapWorkerCond);
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    lockMutex(&gDvm.heapWorkerLock);
    while (!gDvm.haltHeapWorker) {
        struct timespec trimtime;
        bool timedwait = false;

        /* We're done running interpreted code for now. */
        dvmChangeStatus(NULL, THREAD_VMWAIT);

        /* Signal anyone who wants to know when we're done. */
        dvmBroadcastCond(&gDvm.heapWorkerIdleCond);

        /* Trim the heap if we were asked to. */
        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
            struct timespec now;

#ifdef HAVE_TIMEDWAIT_MONOTONIC
            clock_gettime(CLOCK_MONOTONIC, &now);       // relative time
#else
            struct timeval tvnow;
            gettimeofday(&tvnow, NULL);                 // absolute time
            now.tv_sec = tvnow.tv_sec;
            now.tv_nsec = tvnow.tv_usec * 1000;
#endif

            if (trimtime.tv_sec < now.tv_sec ||
                (trimtime.tv_sec == now.tv_sec &&
                 trimtime.tv_nsec <= now.tv_nsec))
            {
                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];

                /*
                 * Acquire the gcHeapLock.  The requires releasing the
                 * heapWorkerLock before the gcHeapLock is acquired.
                 * It is possible that the gcHeapLock may be acquired
                 * during a concurrent GC in which case heapWorkerLock
                 * is held by the GC and we are unable to make forward
                 * progress.  We avoid deadlock by releasing the
                 * gcHeapLock and then waiting to be signaled when the
                 * GC completes.  There is no guarantee that the next
                 * time we are run will coincide with GC inactivity so
                 * the check and wait must be performed within a loop.
                 */
                dvmUnlockMutex(&gDvm.heapWorkerLock);
                dvmLockHeap();
                while (gDvm.gcHeap->gcRunning) {
                    dvmWaitForConcurrentGcToComplete();
                }
                dvmLockMutex(&gDvm.heapWorkerLock);

                memset(madvisedSizes, 0, sizeof(madvisedSizes));
                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);

                dvmUnlockHeap();

                trimtime.tv_sec = 0;
                trimtime.tv_nsec = 0;
                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
            } else {
                timedwait = true;
            }
        }

        /* sleep until signaled */
        if (timedwait) {
            int cc __attribute__ ((__unused__));
#ifdef HAVE_TIMEDWAIT_MONOTONIC
            cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#else
            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#endif
            assert(cc == 0 || cc == ETIMEDOUT);
        } else {
            dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
        }

        /*
         * Return to the running state before doing heap work.  This
         * will block if the GC has initiated a suspend.  We release
         * the heapWorkerLock beforehand for the GC to make progress
         * and wait to be signaled after the GC completes.  There is
         * no guarantee that the next time we are run will coincide
         * with GC inactivity so the check and wait must be performed
         * within a loop.
         */
        dvmUnlockMutex(&gDvm.heapWorkerLock);
        dvmChangeStatus(NULL, THREAD_RUNNING);
        dvmLockHeap();
        while (gDvm.gcHeap->gcRunning) {
            dvmWaitForConcurrentGcToComplete();
        }
        dvmLockMutex(&gDvm.heapWorkerLock);
        dvmUnlockHeap();
        LOGV("HeapWorker is awake\n");

        /* Process any events in the queue.
         */
        doHeapWork(self);
    }
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    if (gDvm.verboseShutdown)
        LOGD("HeapWorker thread shutting down\n");
    return NULL;
}