Example #1
0
/**
 * @brief 重新设置代码缓冲区
 */
static void resetCodeCache(void)
{
    Thread* thread;
    u8 startTime = dvmGetRelativeTimeUsec();		/* 获取相对的启动时间 */
    int inJit = 0;
    int byteUsed = gDvmJit.codeCacheByteUsed;		/* 获取当前代码缓冲的使用量 */

    /* If any thread is found stuck in the JIT state, don't reset the cache  */
	/* 任意线程被发现处于JIT状态,不要重新设置缓冲 */
    dvmLockThreadList(NULL);
    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
        /*
         * Crawl the stack to wipe out the returnAddr field so that
         * 1) the soon-to-be-deleted code in the JIT cache won't be used
         * 2) or the thread stuck in the JIT land will soon return
         *    to the interpreter land
         */
        crawlDalvikStack(thread, false);
        if (thread->inJitCodeCache) {
            inJit++;
        }
        /* Cancel any ongoing trace selection */
        dvmDisableSubMode(thread, kSubModeJitTraceBuild);
    }
    dvmUnlockThreadList();

    if (inJit) {
        ALOGD("JIT code cache reset delayed (%d bytes %d/%d)",
             gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
             ++gDvmJit.numCodeCacheResetDelayed);
        return;
    }

    /* Lock the mutex to clean up the work queue */
    dvmLockMutex(&gDvmJit.compilerLock);

    /* Update the translation cache version */
	/* 每释放一次codeCache则此版本增加,编译一次也也对应的增加一此 */
    gDvmJit.cacheVersion++;

    /* Drain the work queue to free the work orders */
	/* 循环丢弃所有编译的队列 */
    while (workQueueLength()) {
        CompilerWorkOrder work = workDequeue();
        free(work.info);
    }

    /* Reset the JitEntry table contents to the initial unpopulated state */
	/* 重新设置JitTable */
    dvmJitResetTable();

    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
    /*
     * Wipe out the code cache content to force immediate crashes if
     * stale JIT'ed code is invoked.
     */
	/* 清除JIT代码 */
    dvmCompilerCacheClear((char *) gDvmJit.codeCache + gDvmJit.templateSize,
                          gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);

    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
                          (intptr_t) gDvmJit.codeCache +
                          gDvmJit.codeCacheByteUsed, 0);

    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);

    /* Reset the current mark of used bytes to the end of template code */
    gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
    gDvmJit.numCompilations = 0;	/* 当前编译的数量归0 */

    /* Reset the work queue */
	/* 重设订单队列 */
    memset(gDvmJit.compilerWorkQueue, 0,
           sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
    gDvmJit.compilerQueueLength = 0;

    /* Reset the IC patch work queue */
	/* 重设IC patch工作队列 */
    dvmLockMutex(&gDvmJit.compilerICPatchLock);
    gDvmJit.compilerICPatchIndex = 0;
    dvmUnlockMutex(&gDvmJit.compilerICPatchLock);

    /*
     * Reset the inflight compilation address (can only be done in safe points
     * or by the compiler thread when its thread state is RUNNING).
     */
    gDvmJit.inflightBaseAddr = NULL;

    /* All clear now */
    gDvmJit.codeCacheFull = false;

    dvmUnlockMutex(&gDvmJit.compilerLock);

    ALOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
         (dvmGetRelativeTimeUsec() - startTime) / 1000,
         byteUsed, ++gDvmJit.numCodeCacheReset,
         gDvmJit.numCodeCacheResetDelayed);
}
Example #2
0
/**
 * @brief 附加一个trace队列到订单
 * @param pc 要trace的dalvik指令指针
 * @param kind 订单的类型
 * @param info 指向JitTraceDescription结构的指针
 */
bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
{
    int cc;
    int i;
    int numWork;
    bool result = true;

	/* 加锁 */
    dvmLockMutex(&gDvmJit.compilerLock);

    /*
     * Return if queue or code cache is full.
     */
	/* 如果队列或者代码缓存已经满了则直接返回 */
    if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
        gDvmJit.codeCacheFull == true) {
        dvmUnlockMutex(&gDvmJit.compilerLock);
        return false;
    }

    for (numWork = gDvmJit.compilerQueueLength,
           i = gDvmJit.compilerWorkDequeueIndex;
         numWork > 0;
         numWork--) {
        /* Already enqueued */
		/* 编译编译队列查看是否编译过 */
        if (gDvmJit.compilerWorkQueue[i++].pc == pc) {
            dvmUnlockMutex(&gDvmJit.compilerLock);
            return true;
        }
        /* Wrap around */
		/* 下一轮  */
        if (i == COMPILER_WORK_QUEUE_SIZE)
            i = 0;
    }

	/* 获取一个订单 */
    CompilerWorkOrder *newOrder =
        &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
    newOrder->pc = pc;					/* 设置偏移 */
    newOrder->kind = kind;				/* 类型 */
    newOrder->info = info;				/* JIT描述 */
    newOrder->result.methodCompilationAborted = NULL;		/* JitTranslationInfo结构 */
    newOrder->result.codeAddress = NULL;
	/* 如果是以调试模式启动,则丢弃代码 */
    newOrder->result.discardResult =
        (kind == kWorkOrderTraceDebug) ? true : false;
    newOrder->result.cacheVersion = gDvmJit.cacheVersion;	/* trace请求版本 */
    newOrder->result.requestingThread = dvmThreadSelf();	/* 线程句柄 */

    gDvmJit.compilerWorkEnqueueIndex++;		/* 入列索引增加 */
	/* 到达最大索引数 */
    if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
        gDvmJit.compilerWorkEnqueueIndex = 0;
    gDvmJit.compilerQueueLength++;			/* 队列长度增加 */
	/* 设置条件变量 */
    cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
    assert(cc == 0);

    dvmUnlockMutex(&gDvmJit.compilerLock);
    return result;
}
Example #3
0
/*
 * Update the read/write status of one or more pages.
 */
static void updatePages(Object* classLoader, void* mem, int direction)
{
    LinearAllocHdr* pHdr = getHeader(classLoader);
    dvmLockMutex(&pHdr->lock);

    /* make sure we have the right region */
    assert(mem >= (void*) pHdr->mapAddr &&
           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));

    u4* pLen = getBlockHeader(mem);
    u4 len = *pLen & LENGTHFLAG_MASK;
    int firstPage, lastPage;

    firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / SYSTEM_PAGE_SIZE;
    lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / SYSTEM_PAGE_SIZE;
    LOGVV("--- updating pages %d-%d (%d)", firstPage, lastPage, direction);

    int i, cc;

    /*
     * Update individual pages.  We could do some sort of "lazy update" to
     * combine mprotect calls, but that's almost certainly more trouble
     * than it's worth.
     */
    for (i = firstPage; i <= lastPage; i++) {
        if (direction < 0) {
            /*
             * Trying to mark read-only.
             */
            if (i == firstPage) {
                if ((*pLen & LENGTHFLAG_RW) == 0) {
                    ALOGW("Double RO on %p", mem);
                    dvmAbort();
                } else
                    *pLen &= ~LENGTHFLAG_RW;
            }

            if (pHdr->writeRefCount[i] == 0) {
                ALOGE("Can't make page %d any less writable", i);
                dvmAbort();
            }
            pHdr->writeRefCount[i]--;
            if (pHdr->writeRefCount[i] == 0) {
                LOGVV("---  prot page %d RO", i);
                cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
                        SYSTEM_PAGE_SIZE, PROT_READ);
                assert(cc == 0);
            }
        } else {
            /*
             * Trying to mark writable.
             */
            if (pHdr->writeRefCount[i] >= 32767) {
                ALOGE("Can't make page %d any more writable", i);
                dvmAbort();
            }
            if (pHdr->writeRefCount[i] == 0) {
                LOGVV("---  prot page %d RW", i);
                cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
                        SYSTEM_PAGE_SIZE, PROT_READ | PROT_WRITE);
                assert(cc == 0);
            }
            pHdr->writeRefCount[i]++;

            if (i == firstPage) {
                if ((*pLen & LENGTHFLAG_RW) != 0) {
                    ALOGW("Double RW on %p", mem);
                    dvmAbort();
                } else
                    *pLen |= LENGTHFLAG_RW;
            }
        }
    }

    dvmUnlockMutex(&pHdr->lock);
}
Example #4
0
/*
 * Allocate "size" bytes of storage, associated with a particular class
 * loader.
 *
 * It's okay for size to be zero.
 *
 * We always leave "curOffset" pointing at the next place where we will
 * store the header that precedes the returned storage.
 *
 * This aborts the VM on failure, so it's not necessary to check for a
 * NULL return value.
 */
void* dvmLinearAlloc(Object* classLoader, size_t size)
{
    LinearAllocHdr* pHdr = getHeader(classLoader);
    int startOffset, nextOffset;
    int lastGoodOff, firstWriteOff, lastWriteOff;

#ifdef DISABLE_LINEAR_ALLOC
    return calloc(1, size);
#endif

    LOGVV("--- LinearAlloc(%p, %d)", classLoader, size);

    /*
     * What we'd like to do is just determine the new end-of-alloc size
     * and atomic-swap the updated value in.  The trouble is that, the
     * first time we reach a new page, we need to call mprotect() to
     * make the page available, and we don't want to call mprotect() on
     * every allocation.  The troubled situation is:
     *  - thread A allocs across a page boundary, but gets preempted
     *    before mprotect() completes
     *  - thread B allocs within the new page, and doesn't call mprotect()
     */
    dvmLockMutex(&pHdr->lock);

    startOffset = pHdr->curOffset;
    assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);

    /*
     * Compute the new offset.  The old offset points at the address where
     * we will store the hidden block header, so we advance past that,
     * add the size of data they want, add another header's worth so we
     * know we have room for that, and round up to BLOCK_ALIGN.  That's
     * the next location where we'll put user data.  We then subtract the
     * chunk header size off so we're back to the header pointer.
     *
     * Examples:
     *   old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
     *   old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
     */
    nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
                    & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
    LOGVV("--- old=%d size=%d new=%d", startOffset, size, nextOffset);

    if (nextOffset > pHdr->mapLength) {
        /*
         * We don't have to abort here.  We could fall back on the system
         * malloc(), and have our "free" call figure out what to do.  Only
         * works if the users of these functions actually free everything
         * they allocate.
         */
        ALOGE("LinearAlloc exceeded capacity (%d), last=%d",
            pHdr->mapLength, (int) size);
        dvmAbort();
    }

    /*
     * Round up "size" to encompass the entire region, including the 0-7
     * pad bytes before the next chunk header.  This way we get maximum
     * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
     * stuff we always treat the full extent.
     */
    size = nextOffset - (startOffset + HEADER_EXTRA);
    LOGVV("--- (size now %d)", size);

    /*
     * See if we are starting on or have crossed into a new page.  If so,
     * call mprotect on the page(s) we're about to write to.  We have to
     * page-align the start address, but don't have to make the length a
     * SYSTEM_PAGE_SIZE multiple (but we do it anyway).
     *
     * Note that "startOffset" is not the last *allocated* byte, but rather
     * the offset of the first *unallocated* byte (which we are about to
     * write the chunk header to).  "nextOffset" is similar.
     *
     * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
     * we've written to this page before, because it might be read-only.
     */
    lastGoodOff = (startOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
    firstWriteOff = startOffset & ~(SYSTEM_PAGE_SIZE-1);
    lastWriteOff = (nextOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
    LOGVV("---  lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x",
        lastGoodOff, firstWriteOff, lastWriteOff);
    if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
        int cc, start, len;

        start = firstWriteOff;
        assert(start <= nextOffset);
        len = (lastWriteOff - firstWriteOff) + SYSTEM_PAGE_SIZE;

        LOGVV("---    calling mprotect(start=%d len=%d RW)", start, len);
        cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
        if (cc != 0) {
            ALOGE("LinearAlloc mprotect (+%d %d) failed: %s",
                start, len, strerror(errno));
            /* we're going to fail soon, might as do it now */
            dvmAbort();
        }
    }

    /* update the ref counts on the now-writable pages */
    if (ENFORCE_READ_ONLY) {
        int i, start, end;

        start = firstWriteOff / SYSTEM_PAGE_SIZE;
        end = lastWriteOff / SYSTEM_PAGE_SIZE;

        LOGVV("---  marking pages %d-%d RW (alloc %d at %p)",
            start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
        for (i = start; i <= end; i++)
            pHdr->writeRefCount[i]++;
    }

    /* stow the size in the header */
    if (ENFORCE_READ_ONLY)
        *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
    else
        *(u4*)(pHdr->mapAddr + startOffset) = size;

    /*
     * Update data structure.
     */
    pHdr->curOffset = nextOffset;

    dvmUnlockMutex(&pHdr->lock);
    return pHdr->mapAddr + startOffset + HEADER_EXTRA;
}
Example #5
0
/*
 * The heap worker thread sits quietly until the GC tells it there's work
 * to do.
 */
static void* heapWorkerThreadStart(void* arg)
{
    Thread *self = dvmThreadSelf();
    int cc;

    UNUSED_PARAMETER(arg);

    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);

    /* tell the main thread that we're ready */
    dvmLockMutex(&gDvm.heapWorkerLock);
    gDvm.heapWorkerReady = true;
    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
    assert(cc == 0);
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    dvmLockMutex(&gDvm.heapWorkerLock);
    while (!gDvm.haltHeapWorker) {
        struct timespec trimtime;
        bool timedwait = false;

        /* We're done running interpreted code for now. */
        dvmChangeStatus(NULL, THREAD_VMWAIT);

        /* Signal anyone who wants to know when we're done. */
        cc = pthread_cond_broadcast(&gDvm.heapWorkerIdleCond);
        assert(cc == 0);

        /* Trim the heap if we were asked to. */
        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
            struct timespec now;

#ifdef HAVE_TIMEDWAIT_MONOTONIC
            clock_gettime(CLOCK_MONOTONIC, &now);       // relative time
#else
            struct timeval tvnow;
            gettimeofday(&tvnow, NULL);                 // absolute time
            now.tv_sec = tvnow.tv_sec;
            now.tv_nsec = tvnow.tv_usec * 1000;
#endif

            if (trimtime.tv_sec < now.tv_sec ||
                (trimtime.tv_sec == now.tv_sec && 
                 trimtime.tv_nsec <= now.tv_nsec))
            {
                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];

                /* The heap must be locked before the HeapWorker;
                 * unroll and re-order the locks.  dvmLockHeap()
                 * will put us in VMWAIT if necessary.  Once it
                 * returns, there shouldn't be any contention on
                 * heapWorkerLock.
                 */
                dvmUnlockMutex(&gDvm.heapWorkerLock);
                dvmLockHeap();
                dvmLockMutex(&gDvm.heapWorkerLock);

                memset(madvisedSizes, 0, sizeof(madvisedSizes));
                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);

                dvmUnlockHeap();

                trimtime.tv_sec = 0;
                trimtime.tv_nsec = 0;
                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
            } else {
                timedwait = true;
            }
        }

        /* sleep until signaled */
        if (timedwait) {
#ifdef HAVE_TIMEDWAIT_MONOTONIC
            cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#else
            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#endif
            assert(cc == 0 || cc == ETIMEDOUT || cc == EINTR);
        } else {
            cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
            assert(cc == 0);
        }

        /* dvmChangeStatus() may block;  don't hold heapWorkerLock.
         */
        dvmUnlockMutex(&gDvm.heapWorkerLock);
        dvmChangeStatus(NULL, THREAD_RUNNING);
        dvmLockMutex(&gDvm.heapWorkerLock);
        LOGV("HeapWorker is awake\n");

        /* Process any events in the queue.
         */
        doHeapWork(self);
    }
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    if (gDvm.verboseShutdown)
        LOGD("HeapWorker thread shutting down\n");
    return NULL;
}
static void Dalvik_dalvik_system_VMRuntime_preloadDexCaches(const u4* args, JValue* pResult)
{
    if (!kPreloadDexCachesEnabled) {
        return;
    }

    DexCacheStats total;
    DexCacheStats before;
    if (kPreloadDexCachesCollectStats) {
        ALOGI("VMRuntime.preloadDexCaches starting");
        preloadDexCachesStatsTotal(&total);
        preloadDexCachesStatsFilled(&before);
    }

    // We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
    StringTable strings;
    if (kPreloadDexCachesStrings) {
        dvmLockMutex(&gDvm.internLock);
        dvmHashTableLock(gDvm.literalStrings);
        for (int i = 0; i < gDvm.literalStrings->tableSize; ++i) {
            HashEntry *entry = &gDvm.literalStrings->pEntries[i];
            if (entry->data != NULL && entry->data != HASH_TOMBSTONE) {
                preloadDexCachesStringsVisitor(&entry->data, 0, ROOT_INTERNED_STRING, &strings);
            }
        }
        dvmHashTableUnlock(gDvm.literalStrings);
        dvmUnlockMutex(&gDvm.internLock);
    }

    for (ClassPathEntry* cpe = gDvm.bootClassPath; cpe->kind != kCpeLastEntry; cpe++) {
        DvmDex* pDvmDex = getDvmDexFromClassPathEntry(cpe);
        const DexHeader* pHeader = pDvmDex->pHeader;
        const DexFile* pDexFile = pDvmDex->pDexFile;

        if (kPreloadDexCachesStrings) {
            for (size_t i = 0; i < pHeader->stringIdsSize; i++) {
                preloadDexCachesResolveString(pDvmDex, i, strings);
            }
        }

        if (kPreloadDexCachesTypes) {
            for (size_t i = 0; i < pHeader->typeIdsSize; i++) {
                preloadDexCachesResolveType(pDvmDex, i);
            }
        }

        if (kPreloadDexCachesFieldsAndMethods) {
            for (size_t classDefIndex = 0;
                 classDefIndex < pHeader->classDefsSize;
                 classDefIndex++) {
                const DexClassDef* pClassDef = dexGetClassDef(pDexFile, classDefIndex);
                const u1* pEncodedData = dexGetClassData(pDexFile, pClassDef);
                UniquePtr<DexClassData> pClassData(dexReadAndVerifyClassData(&pEncodedData, NULL));
                if (pClassData.get() == NULL) {
                    continue;
                }
                for (uint32_t fieldIndex = 0;
                     fieldIndex < pClassData->header.staticFieldsSize;
                     fieldIndex++) {
                    const DexField* pField = &pClassData->staticFields[fieldIndex];
                    preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, false);
                }
                for (uint32_t fieldIndex = 0;
                     fieldIndex < pClassData->header.instanceFieldsSize;
                     fieldIndex++) {
                    const DexField* pField = &pClassData->instanceFields[fieldIndex];
                    preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, true);
                }
                for (uint32_t methodIndex = 0;
                     methodIndex < pClassData->header.directMethodsSize;
                     methodIndex++) {
                    const DexMethod* pDexMethod = &pClassData->directMethods[methodIndex];
                    MethodType methodType = (((pDexMethod->accessFlags & ACC_STATIC) != 0) ?
                                             METHOD_STATIC :
                                             METHOD_DIRECT);
                    preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, methodType);
                }
                for (uint32_t methodIndex = 0;
                     methodIndex < pClassData->header.virtualMethodsSize;
                     methodIndex++) {
                    const DexMethod* pDexMethod = &pClassData->virtualMethods[methodIndex];
                    preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, METHOD_VIRTUAL);
                }
            }
        }
    }

    if (kPreloadDexCachesCollectStats) {
        DexCacheStats after;
        preloadDexCachesStatsFilled(&after);
        ALOGI("VMRuntime.preloadDexCaches strings total=%d before=%d after=%d",
              total.numStrings, before.numStrings, after.numStrings);
        ALOGI("VMRuntime.preloadDexCaches types total=%d before=%d after=%d",
              total.numTypes, before.numTypes, after.numTypes);
        ALOGI("VMRuntime.preloadDexCaches fields total=%d before=%d after=%d",
              total.numFields, before.numFields, after.numFields);
        ALOGI("VMRuntime.preloadDexCaches methods total=%d before=%d after=%d",
              total.numMethods, before.numMethods, after.numMethods);
        ALOGI("VMRuntime.preloadDexCaches finished");
    }

    RETURN_VOID();
}