/*
 * public static native void getHeapSpaceStats(long[] data)
 */
static void Dalvik_dalvik_system_VMDebug_getHeapSpaceStats(const u4* args,
    JValue* pResult)
{
    ArrayObject* dataArray = (ArrayObject*) args[0];

    if (dataArray == NULL || dataArray->length < 6) {
      RETURN_VOID();
    }

    jlong* arr = (jlong*)(void*)dataArray->contents;

    int j = 0;
    size_t per_heap_allocated[2];
    size_t per_heap_size[2];
    memset(per_heap_allocated, 0, sizeof(per_heap_allocated));
    memset(per_heap_size, 0, sizeof(per_heap_size));
    dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, (size_t*) &per_heap_allocated, 2);
    dvmHeapSourceGetValue(HS_FOOTPRINT, (size_t*) &per_heap_size, 2);
    jlong heapSize = per_heap_size[0];
    jlong heapUsed = per_heap_allocated[0];
    jlong heapFree = heapSize - heapUsed;
    jlong zygoteSize = per_heap_size[1];
    jlong zygoteUsed = per_heap_allocated[1];
    jlong zygoteFree = zygoteSize - zygoteUsed;
    arr[j++] = heapSize;
    arr[j++] = heapUsed;
    arr[j++] = heapFree;
    arr[j++] = zygoteSize;
    arr[j++] = zygoteUsed;
    arr[j++] = zygoteFree;

    RETURN_VOID();
}
Esempio n. 2
0
int dvmGetHeapDebugInfo(HeapDebugInfoType info)
{
    switch (info) {
    case kVirtualHeapSize:
        return (int)dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    case kVirtualHeapAllocated:
        return (int)dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    default:
        return -1;
    }
}
Esempio n. 3
0
/* Walk through the list of objects that haven't been
 * marked and free them.
 */
void
dvmHeapSweepUnmarkedObjects(int *numFreed, size_t *sizeFreed)
{
    const HeapBitmap *markBitmaps;
    const GcMarkContext *markContext;
    HeapBitmap objectBitmaps[HEAP_SOURCE_MAX_HEAP_COUNT];
    size_t origObjectsAllocated;
    size_t origBytesAllocated;
    size_t numBitmaps;

    /* All reachable objects have been marked.
     * Detach any unreachable interned strings before
     * we sweep.
     */
    dvmGcDetachDeadInternedStrings(isUnmarkedObject);

    /* Free any known objects that are not marked.
     */
    origObjectsAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
    origBytesAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);

    markContext = &gDvm.gcHeap->markContext;
    markBitmaps = markContext->bitmaps;
    numBitmaps = dvmHeapSourceGetObjectBitmaps(objectBitmaps,
            HEAP_SOURCE_MAX_HEAP_COUNT);
#ifndef NDEBUG
    if (numBitmaps != markContext->numBitmaps) {
        LOGE("heap bitmap count mismatch: %zd != %zd\n",
                numBitmaps, markContext->numBitmaps);
        dvmAbort();
    }
#endif

#if WITH_HPROF && WITH_HPROF_UNREACHABLE
    hprofDumpUnmarkedObjects(markBitmaps, objectBitmaps, numBitmaps);
#endif

    dvmHeapBitmapXorWalkLists(markBitmaps, objectBitmaps, numBitmaps,
            sweepBitmapCallback, NULL);

    *numFreed = origObjectsAllocated -
            dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0);
    *sizeFreed = origBytesAllocated -
            dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);

#ifdef WITH_PROFILER
    if (gDvm.allocProf.enabled) {
        gDvm.allocProf.freeCount += *numFreed;
        gDvm.allocProf.freeSize += *sizeFreed;
    }
#endif
}
Esempio n. 4
0
void dvmDdmSendHeapInfo(int reason, bool shouldLock)
{
    struct timeval now;
    u8 nowMs;
    u1 *buf, *b;

    buf = (u1 *)malloc(HPIF_SIZE(1));
    if (buf == NULL) {
        return;
    }
    b = buf;

    /* If there's a one-shot 'when', reset it.
     */
    if (reason == gDvm.gcHeap->ddmHpifWhen) {
        if (shouldLock && ! dvmLockHeap()) {
            ALOGW("%s(): can't lock heap to clear when", __func__);
            goto skip_when;
        }
        if (reason == gDvm.gcHeap->ddmHpifWhen) {
            if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
                gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
            }
        }
        if (shouldLock) {
            dvmUnlockHeap();
        }
    }
skip_when:

    /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
     */
    if (gettimeofday(&now, NULL) < 0) {
        nowMs = 0;
    } else {
        nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
    }

    /* number of heaps */
    set4BE(b, 1); b += 4;

    /* For each heap (of which there is one) */
    {
        /* heap ID */
        set4BE(b, DEFAULT_HEAP_ID); b += 4;

        /* timestamp */
        set8BE(b, nowMs); b += 8;

        /* 'when' value */
        *b++ = (u1)reason;

        /* max allowed heap size in bytes */
        set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4;

        /* current heap size in bytes */
        set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;

        /* number of bytes allocated */
        set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;

        /* number of objects allocated */
        set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
    }
    assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));

    dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
}
Esempio n. 5
0
/*
 * Initiate garbage collection.
 *
 * NOTES:
 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
 *   be added to the thread list while we work.  The thread should NOT
 *   start executing, so this is only interesting when we start chasing
 *   thread stacks.  (Before we do so, grab the lock.)
 *
 * We are not allowed to GC when the debugger has suspended the VM, which
 * is awkward because debugger requests can cause allocations.  The easiest
 * way to enforce this is to refuse to GC on an allocation made by the
 * JDWP thread -- we have to expand the heap or fail.
 */
void dvmCollectGarbageInternal(const GcSpec* spec)
{
    GcHeap *gcHeap = gDvm.gcHeap;
    u4 gcEnd = 0;
    u4 rootStart = 0 , rootEnd = 0;
    u4 dirtyStart = 0, dirtyEnd = 0;
    size_t numObjectsFreed, numBytesFreed;
    size_t currAllocated, currFootprint;
    size_t percentFree;
    int oldThreadPriority = INT_MAX;

    /* The heap lock must be held.
     */

    if (gcHeap->gcRunning) {
        LOGW_HEAP("Attempted recursive GC");
        return;
    }

    gcHeap->gcRunning = true;

    rootStart = dvmGetRelativeTimeMsec();
    dvmSuspendAllThreads(SUSPEND_FOR_GC);

    /*
     * If we are not marking concurrently raise the priority of the
     * thread performing the garbage collection.
     */
    if (!spec->isConcurrent) {
        oldThreadPriority = os_raiseThreadPriority();
    }
    if (gDvm.preVerify) {
        LOGV_HEAP("Verifying roots and heap before GC");
        verifyRootsAndHeap();
    }

    dvmMethodTraceGCBegin();

    /* Set up the marking context.
     */
    if (!dvmHeapBeginMarkStep(spec->isPartial)) {
        LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
        dvmAbort();
    }

    /* Mark the set of objects that are strongly reachable from the roots.
     */
    LOGD_HEAP("Marking...");
    dvmHeapMarkRootSet();

    /* dvmHeapScanMarkedObjects() will build the lists of known
     * instances of the Reference classes.
     */
    assert(gcHeap->softReferences == NULL);
    assert(gcHeap->weakReferences == NULL);
    assert(gcHeap->finalizerReferences == NULL);
    assert(gcHeap->phantomReferences == NULL);
    assert(gcHeap->clearedReferences == NULL);

    if (spec->isConcurrent) {
        /*
         * Resume threads while tracing from the roots.  We unlock the
         * heap to allow mutator threads to allocate from free space.
         */
        dvmClearCardTable();
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        rootEnd = dvmGetRelativeTimeMsec();
    }

    /* Recursively mark any objects that marked objects point to strongly.
     * If we're not collecting soft references, soft-reachable
     * objects will also be marked.
     */
    LOGD_HEAP("Recursing...");
    dvmHeapScanMarkedObjects();

    if (spec->isConcurrent) {
        /*
         * Re-acquire the heap lock and perform the final thread
         * suspension.
         */
        dirtyStart = dvmGetRelativeTimeMsec();
        dvmLockHeap();
        dvmSuspendAllThreads(SUSPEND_FOR_GC);
        /*
         * As no barrier intercepts root updates, we conservatively
         * assume all roots may be gray and re-mark them.
         */
        dvmHeapReMarkRootSet();
        /*
         * With the exception of reference objects and weak interned
         * strings, all gray objects should now be on dirty cards.
         */
        if (gDvm.verifyCardTable) {
            dvmVerifyCardTable();
        }
        /*
         * Recursively mark gray objects pointed to by the roots or by
         * heap objects dirtied during the concurrent mark.
         */
        dvmHeapReScanMarkedObjects();
    }

    /*
     * All strongly-reachable objects have now been marked.  Process
     * weakly-reachable objects discovered while tracing.
     */
    dvmHeapProcessReferences(&gcHeap->softReferences,
                             spec->doPreserve == false,
                             &gcHeap->weakReferences,
                             &gcHeap->finalizerReferences,
                             &gcHeap->phantomReferences);

#if defined(WITH_JIT)
    /*
     * Patching a chaining cell is very cheap as it only updates 4 words. It's
     * the overhead of stopping all threads and synchronizing the I/D cache
     * that makes it expensive.
     *
     * Therefore we batch those work orders in a queue and go through them
     * when threads are suspended for GC.
     */
    dvmCompilerPerformSafePointChecks();
#endif

    LOGD_HEAP("Sweeping...");

    dvmHeapSweepSystemWeaks();

    /*
     * Live objects have a bit set in the mark bitmap, swap the mark
     * and live bitmaps.  The sweep can proceed concurrently viewing
     * the new live bitmap as the old mark bitmap, and vice versa.
     */
    dvmHeapSourceSwapBitmaps();

    if (gDvm.postVerify) {
        LOGV_HEAP("Verifying roots and heap after GC");
        verifyRootsAndHeap();
    }

    if (spec->isConcurrent) {
        dvmUnlockHeap();
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
    }
    dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
                                &numObjectsFreed, &numBytesFreed);
    LOGD_HEAP("Cleaning up...");
    dvmHeapFinishMarkStep();
    if (spec->isConcurrent) {
        dvmLockHeap();
    }

    LOGD_HEAP("Done.");

    /* Now's a good time to adjust the heap size, since
     * we know what our utilization is.
     *
     * This doesn't actually resize any memory;
     * it just lets the heap grow more when necessary.
     */
    dvmHeapSourceGrowForUtilization();

    currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);

    dvmMethodTraceGCEnd();
    LOGV_HEAP("GC finished");

    gcHeap->gcRunning = false;

    LOGV_HEAP("Resuming threads");

    if (spec->isConcurrent) {
        /*
         * Wake-up any threads that blocked after a failed allocation
         * request.
         */
        dvmBroadcastCond(&gDvm.gcHeapCond);
    }

    if (!spec->isConcurrent) {
        dvmResumeAllThreads(SUSPEND_FOR_GC);
        dirtyEnd = dvmGetRelativeTimeMsec();
        /*
         * Restore the original thread scheduling priority if it was
         * changed at the start of the current garbage collection.
         */
        if (oldThreadPriority != INT_MAX) {
            os_lowerThreadPriority(oldThreadPriority);
        }
    }

    /*
     * Move queue of pending references back into Java.
     */
    dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);

    gcEnd = dvmGetRelativeTimeMsec();
    percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    if (!spec->isConcurrent) {
        u4 markSweepTime = dirtyEnd - rootStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             markSweepTime, gcTime);
    } else {
        u4 rootTime = rootEnd - rootStart;
        u4 dirtyTime = dirtyEnd - dirtyStart;
        u4 gcTime = gcEnd - rootStart;
        bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
        ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
             spec->reason,
             isSmall ? "<" : "",
             numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
             percentFree,
             currAllocated / 1024, currFootprint / 1024,
             rootTime, dirtyTime, gcTime);
    }
    if (gcHeap->ddmHpifWhen != 0) {
        LOGD_HEAP("Sending VM heap info to DDM");
        dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    }
    if (gcHeap->ddmHpsgWhen != 0) {
        LOGD_HEAP("Dumping VM heap to DDM");
        dvmDdmSendHeapSegments(false, false);
    }
    if (gcHeap->ddmNhsgWhen != 0) {
        LOGD_HEAP("Dumping native heap to DDM");
        dvmDdmSendHeapSegments(false, true);
    }
}
Esempio n. 6
0
void dvmLogGcStats(size_t numFreed, size_t sizeFreed, size_t gcTimeMs)
{
    size_t perHeapActualSize[HEAP_SOURCE_MAX_HEAP_COUNT],
           perHeapAllowedSize[HEAP_SOURCE_MAX_HEAP_COUNT],
           perHeapNumAllocated[HEAP_SOURCE_MAX_HEAP_COUNT],
           perHeapSizeAllocated[HEAP_SOURCE_MAX_HEAP_COUNT];
    unsigned char eventBuf[1 + (1 + sizeof(long long)) * 4];
    size_t actualSize, allowedSize, numAllocated, sizeAllocated;
    size_t softLimit = dvmHeapSourceGetIdealFootprint();
    size_t nHeaps = dvmHeapSourceGetNumHeaps();

    /* Enough to quiet down gcc for unitialized variable check */
    perHeapActualSize[0] = perHeapAllowedSize[0] = perHeapNumAllocated[0] =
                           perHeapSizeAllocated[0] = 0;
    actualSize = dvmHeapSourceGetValue(HS_FOOTPRINT, perHeapActualSize,
                                       HEAP_SOURCE_MAX_HEAP_COUNT);
    allowedSize = dvmHeapSourceGetValue(HS_ALLOWED_FOOTPRINT,
                      perHeapAllowedSize, HEAP_SOURCE_MAX_HEAP_COUNT);
    numAllocated = dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED,
                      perHeapNumAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);
    sizeAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED,
                      perHeapSizeAllocated, HEAP_SOURCE_MAX_HEAP_COUNT);

    /*
     * Construct the the first 64-bit value to write to the log.
     * Global information:
     *
     * [63   ] Must be zero
     * [62-24] ASCII process identifier
     * [23-12] GC time in ms
     * [11- 0] Bytes freed
     *
     */
    long long event0;
    event0 = 0LL << 63 |
            (long long)intToFloat12(gcTimeMs) << 12 |
            (long long)intToFloat12(sizeFreed);
    insertProcessName(&event0);

    /*
     * Aggregated heap stats:
     *
     * [63-62] 10
     * [61-60] Reserved; must be zero
     * [59-48] Objects freed
     * [47-36] Actual size (current footprint)
     * [35-24] Allowed size (current hard max)
     * [23-12] Objects allocated
     * [11- 0] Bytes allocated
     */
    long long event1;
    event1 = 2LL << 62 |
            (long long)intToFloat12(numFreed) << 48 |
            (long long)intToFloat12(actualSize) << 36 |
            (long long)intToFloat12(allowedSize) << 24 |
            (long long)intToFloat12(numAllocated) << 12 |
            (long long)intToFloat12(sizeAllocated);

    /*
     * Report the current state of the zygote heap(s).
     *
     * The active heap is always heap[0].  We can be in one of three states
     * at present:
     *
     *  (1) Still in the zygote.  Zygote using heap[0].
     *  (2) In the zygote, when the first child is started.  We created a
     *      new heap just before the first fork() call, so the original
     *      "zygote heap" is now heap[1], and we have a small heap[0] for
     *      anything we do from here on.
     *  (3) In an app process.  The app gets a new heap[0], and can also
     *      see the two zygote heaps [1] and [2] (probably unwise to
     *      assume any specific ordering).
     *
     * So if nHeaps == 1, we want the stats from heap[0]; else we want
     * the sum of the values from heap[1] to heap[nHeaps-1].
     *
     *
     * Zygote heap stats (except for the soft limit, which belongs to the
     * active heap):
     *
     * [63-62] 11
     * [61-60] Reserved; must be zero
     * [59-48] Soft Limit (for the active heap)
     * [47-36] Actual size (current footprint)
     * [35-24] Allowed size (current hard max)
     * [23-12] Objects allocated
     * [11- 0] Bytes allocated
     */
    long long event2;
    size_t zActualSize, zAllowedSize, zNumAllocated, zSizeAllocated;
    int firstHeap = (nHeaps == 1) ? 0 : 1;
    size_t hh;

    zActualSize = zAllowedSize = zNumAllocated = zSizeAllocated = 0;
    for (hh = firstHeap; hh < nHeaps; hh++) {
        zActualSize += perHeapActualSize[hh];
        zAllowedSize += perHeapAllowedSize[hh];
        zNumAllocated += perHeapNumAllocated[hh];
        zSizeAllocated += perHeapSizeAllocated[hh];
    }
    event2 = 3LL << 62 |
            (long long)intToFloat12(softLimit) << 48 |
            (long long)intToFloat12(zActualSize) << 36 |
            (long long)intToFloat12(zAllowedSize) << 24 |
            (long long)intToFloat12(zNumAllocated) << 12 |
            (long long)intToFloat12(zSizeAllocated);

    /*
     * Report the current external allocation stats and the native heap
     * summary.
     *
     * [63-48] Reserved; must be zero (TODO: put new data in these slots)
     * [47-36] dlmalloc_footprint
     * [35-24] mallinfo: total allocated space
     * [23-12] External byte limit
     * [11- 0] External bytes allocated
     */
    long long event3;
    size_t externalLimit, externalBytesAllocated;
    size_t uordblks, footprint;

#if 0
    /*
     * This adds 2-5msec to the GC cost on a DVT, or about 2-3% of the cost
     * of a GC, so it's not horribly expensive but it's not free either.
     */
    extern size_t dlmalloc_footprint(void);
    struct mallinfo mi;
    //u8 start, end;

    //start = dvmGetRelativeTimeNsec();
    mi = mallinfo();
    uordblks = mi.uordblks;
    footprint = dlmalloc_footprint();
    //end = dvmGetRelativeTimeNsec();
    //LOGD("mallinfo+footprint took %dusec; used=%zd footprint=%zd\n",
    //    (int)((end - start) / 1000), mi.uordblks, footprint);
#else
    uordblks = footprint = 0;
#endif

    externalLimit =
            dvmHeapSourceGetValue(HS_EXTERNAL_LIMIT, NULL, 0);
    externalBytesAllocated =
            dvmHeapSourceGetValue(HS_EXTERNAL_BYTES_ALLOCATED, NULL, 0);
    event3 =
            (long long)intToFloat12(footprint) << 36 |
            (long long)intToFloat12(uordblks) << 24 |
            (long long)intToFloat12(externalLimit) << 12 |
            (long long)intToFloat12(externalBytesAllocated);

    /* Build the event data.
     * [ 0: 0] item count (4)
     * [ 1: 1] EVENT_TYPE_LONG
     * [ 2: 9] event0
     * [10:10] EVENT_TYPE_LONG
     * [11:18] event1
     * [19:19] EVENT_TYPE_LONG
     * [20:27] event2
     * [28:28] EVENT_TYPE_LONG
     * [29:36] event2
     */
    unsigned char *c = eventBuf;
    *c++ = 4;
    *c++ = EVENT_TYPE_LONG;
    memcpy(c, &event0, sizeof(event0));
    c += sizeof(event0);
    *c++ = EVENT_TYPE_LONG;
    memcpy(c, &event1, sizeof(event1));
    c += sizeof(event1);
    *c++ = EVENT_TYPE_LONG;
    memcpy(c, &event2, sizeof(event2));
    c += sizeof(event2);
    *c++ = EVENT_TYPE_LONG;
    memcpy(c, &event3, sizeof(event3));

    (void) android_btWriteLog(EVENT_LOG_TAG_dvm_gc_info, EVENT_TYPE_LIST,
            eventBuf, sizeof(eventBuf));
}