Beispiel #1
0
void dvmHeapSourceRegisterNativeAllocation(int bytes)
{
    /* If we have just done a GC, ensure that the finalizers are done and update
     * the native watermarks.
     */
    if (gHs->nativeNeedToRunFinalization) {
        dvmRunFinalization();
        dvmHeapSourceUpdateMaxNativeFootprint();
        gHs->nativeNeedToRunFinalization = false;
    }

    android_atomic_add(bytes, &gHs->nativeBytesAllocated);

    if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
        /* The second watermark is higher than the gc watermark. If you hit
         * this it means you are allocating native objects faster than the GC
         * can keep up with. If this occurs, we do a GC for alloc.
         */
        if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
            Thread* self = dvmThreadSelf();
            dvmRunFinalization();
            if (dvmCheckException(self)) {
                return;
            }
            dvmLockHeap();
            bool waited = dvmWaitForConcurrentGcToComplete();
            dvmUnlockHeap();
            if (waited) {
                // Just finished a GC, attempt to run finalizers.
                dvmRunFinalization();
                if (dvmCheckException(self)) {
                    return;
                }
            }

            // If we still are over the watermark, attempt a GC for alloc and run finalizers.
            if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
                dvmLockHeap();
                dvmWaitForConcurrentGcToComplete();
                dvmCollectGarbageInternal(GC_FOR_MALLOC);
                dvmUnlockHeap();
                dvmRunFinalization();
                gHs->nativeNeedToRunFinalization = false;
                if (dvmCheckException(self)) {
                    return;
                }
            }
            /* We have just run finalizers, update the native watermark since
             * it is very likely that finalizers released native managed
             * allocations.
             */
            dvmHeapSourceUpdateMaxNativeFootprint();
        } else {
            dvmSignalCond(&gHs->gcThreadCond);
        }
    }
}
Beispiel #2
0
static void gcDaemonShutdown()
{
    if (gHs->hasGcThread) {
        dvmLockMutex(&gHs->gcThreadMutex);
        gHs->gcThreadShutdown = true;
        dvmSignalCond(&gHs->gcThreadCond);
        dvmUnlockMutex(&gHs->gcThreadMutex);
        pthread_join(gHs->gcThread, NULL);
    }
}
/*
 * Wake up the heap worker to let it know that there's work to be done.
 */
void dvmSignalHeapWorker(bool shouldLock)
{
    if (shouldLock) {
        dvmLockMutex(&gDvm.heapWorkerLock);
    }

    dvmSignalCond(&gDvm.heapWorkerCond);

    if (shouldLock) {
        dvmUnlockMutex(&gDvm.heapWorkerLock);
    }
}
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr = mspace_calloc(heap->msp, 1, n);
    if (ptr == NULL) {
        return NULL;
    }
    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
Beispiel #5
0
static CompilerWorkOrder workDequeue(void)
{
    assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
           != kWorkOrderInvalid);
    CompilerWorkOrder work =
        gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
    gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
        kWorkOrderInvalid;
    if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
        gDvmJit.compilerWorkDequeueIndex = 0;
    }
    gDvmJit.compilerQueueLength--;
    if (gDvmJit.compilerQueueLength == 0) {
        dvmSignalCond(&gDvmJit.compilerQueueEmpty);
    }

    /* Remember the high water mark of the queue length */
    if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
        gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;

    return work;
}
Beispiel #6
0
/**
 * @brief 取出一条编译工作队列 
 * @return 返回一个CompilerWorkOrder结构
 */
static CompilerWorkOrder workDequeue(void)
{
	/* 断言确定当前工作节点不是无效的 */
    assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
           != kWorkOrderInvalid);
    CompilerWorkOrder work =
        gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];				/* 取出当前的工作节点 */
    gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
        kWorkOrderInvalid;															/* 设置当前的工作节点类型为无效的并且索引增加 */
	/* 如果索引到达工作队列最大值则索引重新设置为0 */
    if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
        gDvmJit.compilerWorkDequeueIndex = 0;
    }
    gDvmJit.compilerQueueLength--;				/* 长度递减 */
    if (gDvmJit.compilerQueueLength == 0) {
        dvmSignalCond(&gDvmJit.compilerQueueEmpty);
    }

    /* Remember the high water mark of the queue length */
    if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
        gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;

    return work;
}
Beispiel #7
0
/*
 * Allocates <n> bytes of zeroed data.
 */
void* dvmHeapSourceAlloc(size_t n)
{
    HS_BOILERPLATE();

    HeapSource *hs = gHs;
    Heap* heap = hs2heap(hs);
    if (heap->bytesAllocated + n > hs->softLimit) {
        /*
         * This allocation would push us over the soft limit; act as
         * if the heap is full.
         */
        LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
                  FRACTIONAL_MB(hs->softLimit), n);
        return NULL;
    }
    void* ptr;
    if (gDvm.lowMemoryMode) {
        /* This is only necessary because mspace_calloc always memsets the
         * allocated memory to 0. This is bad for memory usage since it leads
         * to dirty zero pages. If low memory mode is enabled, we use
         * mspace_malloc which doesn't memset the allocated memory and madvise
         * the page aligned region back to the kernel.
         */
        ptr = mspace_malloc(heap->msp, n);
        if (ptr == NULL) {
            return NULL;
        }
        uintptr_t zero_begin = (uintptr_t)ptr;
        uintptr_t zero_end = (uintptr_t)ptr + n;
        /* Calculate the page aligned region.
         */
        uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
        uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
        /* If our allocation spans more than one page, we attempt to madvise.
         */
        if (begin < end) {
            /* madvise the page aligned region to kernel.
             */
            madvise((void*)begin, end - begin, MADV_DONTNEED);
            /* Zero the region after the page aligned region.
             */
            memset((void*)end, 0, zero_end - end);
            /* Zero out the region before the page aligned region.
             */
            zero_end = begin;
        }
        memset((void*)zero_begin, 0, zero_end - zero_begin);
    } else {
        ptr = mspace_calloc(heap->msp, 1, n);
        if (ptr == NULL) {
            return NULL;
        }
    }

    countAllocation(heap, ptr);
    /*
     * Check to see if a concurrent GC should be initiated.
     */
    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
        /*
         * The garbage collector thread is already running or has yet
         * to be started.  Do nothing.
         */
        return ptr;
    }
    if (heap->bytesAllocated > heap->concurrentStartBytes) {
        /*
         * We have exceeded the allocation threshold.  Wake up the
         * garbage collector.
         */
        dvmSignalCond(&gHs->gcThreadCond);
    }
    return ptr;
}
/*
 * The heap worker thread sits quietly until the GC tells it there's work
 * to do.
 */
static void* heapWorkerThreadStart(void* arg)
{
    Thread *self = dvmThreadSelf();

    UNUSED_PARAMETER(arg);

    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);

    /* tell the main thread that we're ready */
    lockMutex(&gDvm.heapWorkerLock);
    gDvm.heapWorkerReady = true;
    dvmSignalCond(&gDvm.heapWorkerCond);
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    lockMutex(&gDvm.heapWorkerLock);
    while (!gDvm.haltHeapWorker) {
        struct timespec trimtime;
        bool timedwait = false;

        /* We're done running interpreted code for now. */
        dvmChangeStatus(NULL, THREAD_VMWAIT);

        /* Signal anyone who wants to know when we're done. */
        dvmBroadcastCond(&gDvm.heapWorkerIdleCond);

        /* Trim the heap if we were asked to. */
        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
            struct timespec now;

#ifdef HAVE_TIMEDWAIT_MONOTONIC
            clock_gettime(CLOCK_MONOTONIC, &now);       // relative time
#else
            struct timeval tvnow;
            gettimeofday(&tvnow, NULL);                 // absolute time
            now.tv_sec = tvnow.tv_sec;
            now.tv_nsec = tvnow.tv_usec * 1000;
#endif

            if (trimtime.tv_sec < now.tv_sec ||
                (trimtime.tv_sec == now.tv_sec &&
                 trimtime.tv_nsec <= now.tv_nsec))
            {
                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];

                /*
                 * Acquire the gcHeapLock.  The requires releasing the
                 * heapWorkerLock before the gcHeapLock is acquired.
                 * It is possible that the gcHeapLock may be acquired
                 * during a concurrent GC in which case heapWorkerLock
                 * is held by the GC and we are unable to make forward
                 * progress.  We avoid deadlock by releasing the
                 * gcHeapLock and then waiting to be signaled when the
                 * GC completes.  There is no guarantee that the next
                 * time we are run will coincide with GC inactivity so
                 * the check and wait must be performed within a loop.
                 */
                dvmUnlockMutex(&gDvm.heapWorkerLock);
                dvmLockHeap();
                while (gDvm.gcHeap->gcRunning) {
                    dvmWaitForConcurrentGcToComplete();
                }
                dvmLockMutex(&gDvm.heapWorkerLock);

                memset(madvisedSizes, 0, sizeof(madvisedSizes));
                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);

                dvmUnlockHeap();

                trimtime.tv_sec = 0;
                trimtime.tv_nsec = 0;
                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
            } else {
                timedwait = true;
            }
        }

        /* sleep until signaled */
        if (timedwait) {
            int cc __attribute__ ((__unused__));
#ifdef HAVE_TIMEDWAIT_MONOTONIC
            cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#else
            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#endif
            assert(cc == 0 || cc == ETIMEDOUT);
        } else {
            dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
        }

        /*
         * Return to the running state before doing heap work.  This
         * will block if the GC has initiated a suspend.  We release
         * the heapWorkerLock beforehand for the GC to make progress
         * and wait to be signaled after the GC completes.  There is
         * no guarantee that the next time we are run will coincide
         * with GC inactivity so the check and wait must be performed
         * within a loop.
         */
        dvmUnlockMutex(&gDvm.heapWorkerLock);
        dvmChangeStatus(NULL, THREAD_RUNNING);
        dvmLockHeap();
        while (gDvm.gcHeap->gcRunning) {
            dvmWaitForConcurrentGcToComplete();
        }
        dvmLockMutex(&gDvm.heapWorkerLock);
        dvmUnlockHeap();
        LOGV("HeapWorker is awake\n");

        /* Process any events in the queue.
         */
        doHeapWork(self);
    }
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    if (gDvm.verboseShutdown)
        LOGD("HeapWorker thread shutting down\n");
    return NULL;
}