Exemplo n.º 1
0
/*
 * Visits roots.  TODO: visit cached global references.
 */
void dvmVisitRoots(RootVisitor *visitor, void *arg)
{
    assert(visitor != NULL);
    //u4 t0 = dvmGetRelativeTimeMsec();

#ifndef FASTIVA_PRELOAD_STATIC_INSTANCE
    visitHashTable(visitor, gDvm.loadedClasses, ROOT_STICKY_CLASS, arg);
    visitPrimitiveTypes(visitor, arg);
#endif
    if (gDvm.dbgRegistry != NULL) {
        visitHashTable(visitor, gDvm.dbgRegistry, ROOT_DEBUGGER, arg);
    }
    if (gDvm.literalStrings != NULL) {
        visitHashTable(visitor, gDvm.literalStrings, ROOT_INTERNED_STRING, arg);
    }
    dvmLockMutex(&gDvm.jniGlobalRefLock);
    visitIndirectRefTable(visitor, &gDvm.jniGlobalRefTable, 0, ROOT_JNI_GLOBAL, arg);
    dvmUnlockMutex(&gDvm.jniGlobalRefLock);
    dvmLockMutex(&gDvm.jniPinRefLock);
    visitReferenceTable(visitor, &gDvm.jniPinRefTable, 0, ROOT_VM_INTERNAL, arg);
    dvmUnlockMutex(&gDvm.jniPinRefLock);
    visitThreads(visitor, arg);
    (*visitor)(&gDvm.outOfMemoryObj, 0, ROOT_VM_INTERNAL, arg);
    (*visitor)(&gDvm.internalErrorObj, 0, ROOT_VM_INTERNAL, arg);
    (*visitor)(&gDvm.noClassDefFoundErrorObj, 0, ROOT_VM_INTERNAL, arg);
#ifdef FASTIVA
	(*visitor)(&kernelData.g_pAnnotationsList, 0, ROOT_VM_INTERNAL, arg);
#endif
}
Exemplo n.º 2
0
/*
 * Crank up the heap worker thread.
 *
 * Does not return until the thread is ready for business.
 */
bool dvmHeapWorkerStartup(void)
{
    assert(!gDvm.haltHeapWorker);
    assert(!gDvm.heapWorkerReady);
    assert(gDvm.heapWorkerHandle == 0);
    assert(gDvm.heapWorkerInitialized);

    /* use heapWorkerLock/heapWorkerCond to communicate readiness */
    dvmLockMutex(&gDvm.heapWorkerLock);

//BUG: If a GC happens in here or in the new thread while we hold the lock,
//     the GC will deadlock when trying to acquire heapWorkerLock.
    if (!dvmCreateInternalThread(&gDvm.heapWorkerHandle,
                "HeapWorker", heapWorkerThreadStart, NULL))
    {
        dvmUnlockMutex(&gDvm.heapWorkerLock);
        return false;
    }

    /*
     * Wait for the heap worker to come up.  We know the thread was created,
     * so this should not get stuck.
     */
    while (!gDvm.heapWorkerReady) {
        dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
    }

    dvmUnlockMutex(&gDvm.heapWorkerLock);
    return true;
}
Exemplo n.º 3
0
/*
 * Implement java.lang.Thread.interrupt().
 */
void dvmThreadInterrupt(Thread* thread)
{
    assert(thread != NULL);

    dvmLockMutex(&thread->waitMutex);

    /*
     * If the interrupted flag is already set no additional action is
     * required.
     */
    if (thread->interrupted == true) {
        dvmUnlockMutex(&thread->waitMutex);
        return;
    }

    /*
     * Raise the "interrupted" flag.  This will cause it to bail early out
     * of the next wait() attempt, if it's not currently waiting on
     * something.
     */
    thread->interrupted = true;

    /*
     * Is the thread waiting?
     *
     * Note that fat vs. thin doesn't matter here;  waitMonitor
     * is only set when a thread actually waits on a monitor,
     * which implies that the monitor has already been fattened.
     */
    if (thread->waitMonitor != NULL) {
        pthread_cond_signal(&thread->waitCond);
    }

    dvmUnlockMutex(&thread->waitMutex);
}
Exemplo n.º 4
0
/*
 * Notify one thread waiting on this monitor.
 */
static void notifyMonitor(Thread* self, Monitor* mon)
{
    Thread* thread;

    assert(self != NULL);
    assert(mon != NULL);

    /* Make sure that we hold the lock. */
    if (mon->owner != self) {
        dvmThrowIllegalMonitorStateException(
            "object not locked by thread before notify()");
        return;
    }
    /* Signal the first waiting thread in the wait set. */
    while (mon->waitSet != NULL) {
        thread = mon->waitSet;
        mon->waitSet = thread->waitNext;
        thread->waitNext = NULL;
        dvmLockMutex(&thread->waitMutex);
        /* Check to see if the thread is still waiting. */
        if (thread->waitMonitor != NULL) {
            pthread_cond_signal(&thread->waitCond);
            dvmUnlockMutex(&thread->waitMutex);
            return;
        }
        dvmUnlockMutex(&thread->waitMutex);
    }
}
Exemplo n.º 5
0
void dvmCompilerUpdateGlobalState()
{
    bool jitActive;
    bool jitActivate;
    bool needUnchain = false;

    /*
     * The tableLock might not be initialized yet by the compiler thread if
     * debugger is attached from the very beginning of the VM launch. If
     * pProfTableCopy is NULL, the lock is not initialized yet and we don't
     * need to refresh anything either.
     */
    if (gDvmJit.pProfTableCopy == NULL) {
        return;
    }

    /*
     * On the first enabling of method tracing, switch the compiler
     * into a mode that includes trace support for invokes and returns.
     * If there are any existing translations, flush them.  NOTE:  we
     * can't blindly flush the translation cache because this code
     * may be executed before the compiler thread has finished
     * initialization.
     */
    if ((gDvm.activeProfilers != 0) &&
        !gDvmJit.methodTraceSupport) {
        bool resetRequired;
        /*
         * compilerLock will prevent new compilations from being
         * installed while we are working.
         */
        dvmLockMutex(&gDvmJit.compilerLock);
        gDvmJit.cacheVersion++; // invalidate compilations in flight
        gDvmJit.methodTraceSupport = true;
        resetRequired = (gDvmJit.numCompilations != 0);
        dvmUnlockMutex(&gDvmJit.compilerLock);
        if (resetRequired) {
            dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
            resetCodeCache();
            dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
        }
    }

    dvmLockMutex(&gDvmJit.tableLock);
    jitActive = gDvmJit.pProfTable != NULL;
    jitActivate = !dvmDebuggerOrProfilerActive();

    if (jitActivate && !jitActive) {
        gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
    } else if (!jitActivate && jitActive) {
        gDvmJit.pProfTable = NULL;
        needUnchain = true;
    }
    dvmUnlockMutex(&gDvmJit.tableLock);
    if (needUnchain)
        dvmJitUnchainAll();
    // Make sure all threads have current values
    dvmJitUpdateThreadStateAll();
}
Exemplo n.º 6
0
/*
 * Attempt to enqueue a work order, returning true if successful.
 *
 * NOTE: Make sure that the caller frees the info pointer if the return value
 * is false.
 */
bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
{
    int cc;
    int i;
    int numWork;
    bool result = true;

    dvmLockMutex(&gDvmJit.compilerLock);

    /*
     * Return if queue or code cache is full.
     */
    if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
        gDvmJit.codeCacheFull == true) {
        dvmUnlockMutex(&gDvmJit.compilerLock);
        return false;
    }

    for (numWork = gDvmJit.compilerQueueLength,
           i = gDvmJit.compilerWorkDequeueIndex;
         numWork > 0;
         numWork--) {
        /* Already enqueued */
        if (gDvmJit.compilerWorkQueue[i++].pc == pc) {
            dvmUnlockMutex(&gDvmJit.compilerLock);
            return true;
        }
        /* Wrap around */
        if (i == COMPILER_WORK_QUEUE_SIZE)
            i = 0;
    }

    CompilerWorkOrder *newOrder =
        &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
    newOrder->pc = pc;
    newOrder->kind = kind;
    newOrder->info = info;
    newOrder->result.methodCompilationAborted = false;
    newOrder->result.codeAddress = NULL;
    newOrder->result.discardResult =
        (kind == kWorkOrderTraceDebug) ? true : false;
    newOrder->result.cacheVersion = gDvmJit.cacheVersion;
    newOrder->result.requestingThread = dvmThreadSelf();

    gDvmJit.compilerWorkEnqueueIndex++;
    if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
        gDvmJit.compilerWorkEnqueueIndex = 0;
    gDvmJit.compilerQueueLength++;
    cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
    assert(cc == 0);

    dvmUnlockMutex(&gDvmJit.compilerLock);
    return result;
}
Exemplo n.º 7
0
/**
 * Jit编译器入口点函数
 * @retval 0 表示失败
 * @retval 1 表示成功 
 */
bool dvmCompilerStartup(void)
{
	/* 初始化一些线程同步方面的变量 */
    dvmInitMutex(&gDvmJit.compilerLock);
    dvmInitMutex(&gDvmJit.compilerICPatchLock);
    dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
    dvmLockMutex(&gDvmJit.compilerLock);
    pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
    pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);

    /* Reset the work queue */
	/* 设置工作队列 */
    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
    gDvmJit.compilerQueueLength = 0;
    dvmUnlockMutex(&gDvmJit.compilerLock);

    /*
     * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
     * the compiler thread, which will do the real initialization if and
     * when it is signalled to do so.
     */
	/*
	 * 以下这个函数创建compilerThreadStart编译线程,当这条线程会执行真正的初始化工作
	 */
    return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
                                   compilerThreadStart, NULL);
}
Exemplo n.º 8
0
/*
 * For debugging, dump the contents of a linear alloc area.
 *
 * We grab the lock so that the header contents and list output are
 * consistent.
 */
void dvmLinearAllocDump(Object* classLoader)
{
#ifdef DISABLE_LINEAR_ALLOC
    return;
#endif
    LinearAllocHdr* pHdr = getHeader(classLoader);

    dvmLockMutex(&pHdr->lock);

    LOGI("LinearAlloc classLoader=%p\n", classLoader);
    LOGI("  mapAddr=%p mapLength=%d firstOffset=%d\n",
        pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
    LOGI("  curOffset=%d\n", pHdr->curOffset);

    int off = pHdr->firstOffset;
    u4 rawLen, fullLen;

    while (off < pHdr->curOffset) {
        rawLen = *(u4*) (pHdr->mapAddr + off);
        fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
                    & ~(BLOCK_ALIGN-1));

        LOGI("  %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA,
            (int) ((off + HEADER_EXTRA) / PAGESIZE),
            (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
            rawLen & LENGTHFLAG_MASK,
            (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");

        off += fullLen;
    }

    if (ENFORCE_READ_ONLY) {
        LOGI("writeRefCount map:\n");

        int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE;
        int zstart = 0;
        int i;

        for (i = 0; i < numPages; i++) {
            int count = pHdr->writeRefCount[i];

            if (count != 0) {
                if (zstart < i-1)
                    printf(" %d-%d: zero\n", zstart, i-1);
                else if (zstart == i-1)
                    printf(" %d: zero\n", zstart);
                zstart = i+1;
                printf(" %d: %d\n", i, count);
            }
        }
        if (zstart < i)
            printf(" %d-%d: zero\n", zstart, i-1);
    }

    LOGD("LinearAlloc %p using %d of %d (%d%%)\n",
        classLoader, pHdr->curOffset, pHdr->mapLength,
        (pHdr->curOffset * 100) / pHdr->mapLength);

    dvmUnlockMutex(&pHdr->lock);
}
Exemplo n.º 9
0
/*
 * Unlock a monitor.
 *
 * Returns true if the unlock succeeded.
 * If the unlock failed, an exception will be pending.
 */
static bool unlockMonitor(Thread* self, Monitor* mon)
{
    assert(self != NULL);
    assert(mon != NULL);
    if (mon->owner == self) {
        /*
         * We own the monitor, so nobody else can be in here.
         */
        if (mon->lockCount == 0) {
            mon->owner = NULL;
            mon->ownerMethod = NULL;
            mon->ownerPc = 0;
            dvmUnlockMutex(&mon->lock);
        } else {
            mon->lockCount--;
        }
    } else {
        /*
         * We don't own this, so we're not allowed to unlock it.
         * The JNI spec says that we should throw IllegalMonitorStateException
         * in this case.
         */
        dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
        return false;
    }
    return true;
}
Exemplo n.º 10
0
/*
 * Verify that all blocks are freed.
 *
 * This should only be done as we're shutting down, but there could be a
 * daemon thread that's still trying to do something, so we grab the locks.
 */
static void checkAllFree(Object* classLoader)
{
#ifdef DISABLE_LINEAR_ALLOC
    return;
#endif
    LinearAllocHdr* pHdr = getHeader(classLoader);

    dvmLockMutex(&pHdr->lock);

    int off = pHdr->firstOffset;
    u4 rawLen, fullLen;

    while (off < pHdr->curOffset) {
        rawLen = *(u4*) (pHdr->mapAddr + off);
        fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
                    & ~(BLOCK_ALIGN-1));

        if ((rawLen & LENGTHFLAG_FREE) == 0) {
            LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader,
                pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
        }

        off += fullLen;
    }

    dvmUnlockMutex(&pHdr->lock);
}
Exemplo n.º 11
0
/*
Change the 2-byte value at the specified address to a new value.  If the
location already has the new value, do nothing.

Otherwise works like dvmDexChangeDex1.

在指定位置改变一个双字节值为新值。如果该位置已经有新值,不作任何事情。

否则处理类似dvmDexChangeDex1。
*/
bool dvmDexChangeDex2(DvmDex* pDvmDex, u2* addr, u2 newVal)
{
    if (*addr == newVal) {
        ALOGV("+++ value at %p is already 0x%04x", addr, newVal);
        return true;
    }

    /*
     * We're not holding this for long, so we don't bother with switching
     * to VMWAIT.
     */
    dvmLockMutex(&pDvmDex->modLock);

    ALOGV("+++ change 2byte at %p from 0x%04x to 0x%04x", addr, *addr, newVal);
    if (sysChangeMapAccess(addr, 2, true, &pDvmDex->memMap) != 0) {
        ALOGD("NOTE: DEX page access change (->RW) failed");
        /* expected on files mounted from FAT; keep going (may crash) */
    }

    *addr = newVal;

    if (sysChangeMapAccess(addr, 2, false, &pDvmDex->memMap) != 0) {
        ALOGD("NOTE: DEX page access change (->RO) failed");
        /* expected on files mounted from FAT; keep going */
    }

    dvmUnlockMutex(&pDvmDex->modLock);

    return true;
}
Exemplo n.º 12
0
static StringObject* lookupInternedString(StringObject* strObj, bool isLiteral)
{
    StringObject* found;

    assert(strObj != NULL);
    u4 key = dvmComputeStringHash(strObj);
    dvmLockMutex(&gDvm.internLock);
    if (isLiteral) {
        /*
         * Check the literal table for a match.
         */
        StringObject* literal = lookupString(gDvm.literalStrings, key, strObj);
        if (literal != NULL) {
            /*
             * A match was found in the literal table, the easy case.
             */
            found = literal;
        } else {
            /*
             * There is no match in the literal table, check the
             * interned string table.
             */
            StringObject* interned = lookupString(gDvm.internedStrings, key, strObj);
            if (interned != NULL) {
                /*
                 * A match was found in the interned table.  Move the
                 * matching string to the literal table.
                 */
                dvmHashTableRemove(gDvm.internedStrings, key, interned);
                found = insertString(gDvm.literalStrings, key, interned);
                assert(found == interned);
            } else {
                /*
                 * No match in the literal table or the interned
                 * table.  Insert into the literal table.
                 */
                found = insertString(gDvm.literalStrings, key, strObj);
                assert(found == strObj);
            }
        }
    } else {
        /*
         * Check the literal table for a match.
         */
        found = lookupString(gDvm.literalStrings, key, strObj);
        if (found == NULL) {
            /*
             * No match was found in the literal table.  Insert into
             * the intern table if it does not already exist.
             */
            found = insertString(gDvm.internedStrings, key, strObj);
        }
    }
    assert(found != NULL);
    dvmUnlockMutex(&gDvm.internLock);
    return found;
}
Exemplo n.º 13
0
static void gcDaemonShutdown()
{
    if (gHs->hasGcThread) {
        dvmLockMutex(&gHs->gcThreadMutex);
        gHs->gcThreadShutdown = true;
        dvmSignalCond(&gHs->gcThreadCond);
        dvmUnlockMutex(&gHs->gcThreadMutex);
        pthread_join(gHs->gcThread, NULL);
    }
}
Exemplo n.º 14
0
/*
 * Clear white references from the intern table.
 */
void dvmGcDetachDeadInternedStrings(int (*isUnmarkedObject)(void *))
{
    /* It's possible for a GC to happen before dvmStringInternStartup()
     * is called.
     */
    if (gDvm.internedStrings != NULL) {
        dvmLockMutex(&gDvm.internLock);
        dvmHashForeachRemove(gDvm.internedStrings, isUnmarkedObject);
        dvmUnlockMutex(&gDvm.internLock);
    }
}
Exemplo n.º 15
0
/*
 * Returns true if the object is a weak interned string.  Any string
 * interned by the user is weak.
 */
bool dvmIsWeakInternedString(StringObject* strObj)
{
    assert(strObj != NULL);
    if (gDvm.internedStrings == NULL) {
        return false;
    }
    dvmLockMutex(&gDvm.internLock);
    u4 key = dvmComputeStringHash(strObj);
    StringObject* found = lookupString(gDvm.internedStrings, key, strObj);
    dvmUnlockMutex(&gDvm.internLock);
    return found == strObj;
}
Exemplo n.º 16
0
/*
 * Crank up the stdout/stderr converter thread.
 *
 * Returns immediately.
 */
bool dvmStdioConverterStartup()
{
    gDvm.haltStdioConverter = false;

    dvmInitMutex(&gDvm.stdioConverterLock);
    pthread_cond_init(&gDvm.stdioConverterCond, NULL);

    if (pipe(gDvm.stdoutPipe) != 0) {
        ALOGW("pipe failed: %s", strerror(errno));
        return false;
    }
    if (pipe(gDvm.stderrPipe) != 0) {
        ALOGW("pipe failed: %s", strerror(errno));
        return false;
    }

    if (dup2(gDvm.stdoutPipe[1], kFilenoStdout) != kFilenoStdout) {
        ALOGW("dup2(1) failed: %s", strerror(errno));
        return false;
    }
    close(gDvm.stdoutPipe[1]);
    gDvm.stdoutPipe[1] = -1;
#ifdef HAVE_ANDROID_OS
    /* don't redirect stderr on sim -- logs get written there! */
    /* (don't need this on the sim anyway) */
    if (dup2(gDvm.stderrPipe[1], kFilenoStderr) != kFilenoStderr) {
        ALOGW("dup2(2) failed: %d %s", errno, strerror(errno));
        return false;
    }
    close(gDvm.stderrPipe[1]);
    gDvm.stderrPipe[1] = -1;
#endif


    /*
     * Create the thread.
     */
    dvmLockMutex(&gDvm.stdioConverterLock);

    if (!dvmCreateInternalThread(&gDvm.stdioConverterHandle,
                                 "Stdio Converter",
                                 stdioConverterThreadStart,
                                 NULL)) {
        return false;
    }

    while (!gDvm.stdioConverterReady) {
        dvmWaitCond(&gDvm.stdioConverterCond, &gDvm.stdioConverterLock);
    }
    dvmUnlockMutex(&gDvm.stdioConverterLock);

    return true;
}
Exemplo n.º 17
0
/*
 * Wake up the heap worker to let it know that there's work to be done.
 */
void dvmSignalHeapWorker(bool shouldLock)
{
    if (shouldLock) {
        dvmLockMutex(&gDvm.heapWorkerLock);
    }

    dvmSignalCond(&gDvm.heapWorkerCond);

    if (shouldLock) {
        dvmUnlockMutex(&gDvm.heapWorkerLock);
    }
}
Exemplo n.º 18
0
/**
 * 关闭Jit编译器
 */
void dvmCompilerShutdown(void)
{
    void *threadReturn;

    /* Disable new translation requests */
	/* 关闭新的编译请求 */
    gDvmJit.pProfTable = NULL;
    gDvmJit.pProfTableCopy = NULL;
    dvmJitUpdateThreadStateAll();					/* 更新所有线程状态 */

	/*
	 * 以下代码应该是检测在虚拟机关闭之前等待所有编译工作队列完成
	 * dvmCompilerDumpStats()函数应该会更新所有工作队列的当前状态
	 * gDvmJit.compilerQueueLength会随着这个函数进行更新,这个常数
	 * 即是当前工作队列的数量。
	 */
    if (gDvm.verboseShutdown ||
            gDvmJit.profileMode == kTraceProfilingContinuous) {
        dvmCompilerDumpStats();
        while (gDvmJit.compilerQueueLength)
          sleep(5);
    }

	/* 如果编译器工作线程存在 */
    if (gDvmJit.compilerHandle) {

        gDvmJit.haltCompilerThread = true;			/* 设置关闭标志为true */

		/* 发送关闭信号 */
        dvmLockMutex(&gDvmJit.compilerLock);
        pthread_cond_signal(&gDvmJit.compilerQueueActivity);
        dvmUnlockMutex(&gDvmJit.compilerLock);

		/* 关闭compilerThreadStart线程 */
        if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
            ALOGW("Compiler thread join failed");
        else if (gDvm.verboseShutdown)
            ALOGD("Compiler thread has shut down");
    }

    /* Break loops within the translation cache */
    dvmJitUnchainAll();

    /*
     * NOTE: our current implementatation doesn't allow for the compiler
     * thread to be restarted after it exits here.  We aren't freeing
     * the JitTable or the ProfTable because threads which still may be
     * running or in the process of shutting down may hold references to
	 * 
     * them.
     */
}
Exemplo n.º 19
0
static void callMethod(Thread *self, Object *obj, Method *method)
{
    JValue unused;

    /* Keep track of the method we're about to call and
     * the current time so that other threads can detect
     * when this thread wedges and provide useful information.
     */
    gDvm.gcHeap->heapWorkerInterpStartTime = dvmGetRelativeTimeUsec();
    gDvm.gcHeap->heapWorkerInterpCpuStartTime = dvmGetThreadCpuTimeUsec();
    gDvm.gcHeap->heapWorkerCurrentMethod = method;
    gDvm.gcHeap->heapWorkerCurrentObject = obj;

    /* Call the method.
     *
     * Don't hold the lock when executing interpreted
     * code.  It may suspend, and the GC needs to grab
     * heapWorkerLock.
     */
    dvmUnlockMutex(&gDvm.heapWorkerLock);
    if (false) {
        /* Log entry/exit; this will likely flood the log enough to
         * cause "logcat" to drop entries.
         */
        char tmpTag[16];
        sprintf(tmpTag, "HW%d", self->systemTid);
        LOG(LOG_DEBUG, tmpTag, "Call %s\n", method->clazz->descriptor);
        dvmCallMethod(self, method, obj, &unused);
        LOG(LOG_DEBUG, tmpTag, " done\n");
    } else {
        dvmCallMethod(self, method, obj, &unused);
    }
    /*
     * Reacquire the heap worker lock in a suspend-friendly way.
     */
    lockMutex(&gDvm.heapWorkerLock);

    gDvm.gcHeap->heapWorkerCurrentObject = NULL;
    gDvm.gcHeap->heapWorkerCurrentMethod = NULL;
    gDvm.gcHeap->heapWorkerInterpStartTime = 0LL;

    /* Exceptions thrown during these calls interrupt
     * the method, but are otherwise ignored.
     */
    if (dvmCheckException(self)) {
#if DVM_SHOW_EXCEPTION >= 1
        LOGI("Uncaught exception thrown by finalizer (will be discarded):\n");
        dvmLogExceptionStackTrace();
#endif
        dvmClearException(self);
    }
}
Exemplo n.º 20
0
/*
 * public native void startJitCompilation()
 *
 * Callback function from the framework to indicate that an app has gone
 * through the startup phase and it is time to enable the JIT compiler.
 */
static void Dalvik_dalvik_system_VMRuntime_startJitCompilation(const u4* args,
    JValue* pResult)
{
#if defined(WITH_JIT)
    if (gDvm.executionMode == kExecutionModeJit &&
        gDvmJit.disableJit == false) {
        dvmLockMutex(&gDvmJit.compilerLock);
        gDvmJit.alreadyEnabledViaFramework = true;
        pthread_cond_signal(&gDvmJit.compilerQueueActivity);
        dvmUnlockMutex(&gDvmJit.compilerLock);
    }
#endif
    RETURN_VOID();
}
Exemplo n.º 21
0
/*
 * Wake up the heap worker to let it know that there's work to be done.
 */
void dvmSignalHeapWorker(bool shouldLock)
{
    int cc;

    if (shouldLock) {
        dvmLockMutex(&gDvm.heapWorkerLock);
    }

    cc = pthread_cond_signal(&gDvm.heapWorkerCond);
    assert(cc == 0);

    if (shouldLock) {
        dvmUnlockMutex(&gDvm.heapWorkerLock);
    }
}
Exemplo n.º 22
0
/* Block until the queue length is 0, or there is a pending suspend request */
void dvmCompilerDrainQueue(void)
{
    Thread *self = dvmThreadSelf();

    dvmLockMutex(&gDvmJit.compilerLock);
    while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
           self->suspendCount == 0) {
        /*
         * Use timed wait here - more than one mutator threads may be blocked
         * but the compiler thread will only signal once when the queue is
         * emptied. Furthermore, the compiler thread may have been shutdown
         * so the blocked thread may never get the wakeup signal.
         */
        dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock,                             1000, 0);
    }
    dvmUnlockMutex(&gDvmJit.compilerLock);
}
Exemplo n.º 23
0
/*
 * Do not return until any pending heap work has finished.  This may
 * or may not happen in the context of the calling thread.
 * No exceptions will escape.
 */
void dvmRunFinalizationSync()
{
    if (gDvm.zygote) {
        assert(!gDvm.heapWorkerReady);

        /* When in zygote mode, there is no heap worker.
         * Do the work in the current thread.
         */
        dvmLockMutex(&gDvm.heapWorkerLock);
        doHeapWork(dvmThreadSelf());
        dvmUnlockMutex(&gDvm.heapWorkerLock);
    } else {
        /* Outside of zygote mode, we can just ask the
         * heap worker thread to do the work.
         */
        dvmWaitForHeapWorkerIdle();
    }
}
Exemplo n.º 24
0
/*
 * Block until all pending heap worker work has finished.
 */
void dvmWaitForHeapWorkerIdle()
{
    assert(gDvm.heapWorkerReady);

    dvmChangeStatus(NULL, THREAD_VMWAIT);

    dvmLockMutex(&gDvm.heapWorkerLock);

    /* Wake up the heap worker and wait for it to finish. */
    //TODO(http://b/issue?id=699704): This will deadlock if
    //     called from finalize(), enqueue(), or clear().  We
    //     need to detect when this is called from the HeapWorker
    //     context and just give up.
    dvmSignalHeapWorker(false);
    dvmWaitCond(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);

    dvmUnlockMutex(&gDvm.heapWorkerLock);

    dvmChangeStatus(NULL, THREAD_RUNNING);
}
Exemplo n.º 25
0
/**
 * @brief 丢弃编译队列
 */
void dvmCompilerDrainQueue(void)
{
    Thread *self = dvmThreadSelf();

    dvmLockMutex(&gDvmJit.compilerLock);
	/* 遍历整个队列,并不停的检测编译线程是否终止,并且自身线程没有被挂起 */
    while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
           self->suspendCount == 0) {
        /*
         * Use timed wait here - more than one mutator threads may be blocked
         * but the compiler thread will only signal once when the queue is
         * emptied. Furthermore, the compiler thread may have been shutdown
         * so the blocked thread may never get the wakeup signal.
         */
		/* 使用等待时间 - 超过一个mutator线程被阻塞但是编译器线程将
		 * 仅有一条一次当队列被提交,编译器线程不关闭,阻塞线程将永远不会唤醒*/
        dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock,                             1000, 0);
    }
    dvmUnlockMutex(&gDvmJit.compilerLock);
}
Exemplo n.º 26
0
/*
 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
 * we're going to have to wait on the mutex.
 */
bool dvmLockHeap()
{
    /* This is hacked a bit to avoid deadlocks.  Basically I don't want a thread
     * to suspend itself hodling the heap lock. */
    int res;
    while ((res = dvmTryLockMutex(&gDvm.gcHeapLock)) != 0) {
        assert(res == EBUSY);

        Thread *self;
        ThreadStatus oldStatus;

        self = dvmThreadSelf();
        oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
        dvmLockMutex(&gDvm.gcHeapLock);
        dvmUnlockMutex(&gDvm.gcHeapLock);
        dvmChangeStatus(self, oldStatus);
    }

    return true;
}
Exemplo n.º 27
0
void dvmCompilerShutdown(void)
{
    void *threadReturn;

    /* Disable new translation requests */
    gDvmJit.pProfTable = NULL;
    gDvmJit.pProfTableCopy = NULL;
    dvmJitUpdateThreadStateAll();

    if (gDvm.verboseShutdown ||
            gDvmJit.profileMode == kTraceProfilingContinuous) {
        dvmCompilerDumpStats();
        while (gDvmJit.compilerQueueLength)
          sleep(5);
    }

    if (gDvmJit.compilerHandle) {

        gDvmJit.haltCompilerThread = true;

        dvmLockMutex(&gDvmJit.compilerLock);
        pthread_cond_signal(&gDvmJit.compilerQueueActivity);
        dvmUnlockMutex(&gDvmJit.compilerLock);

        if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
            ALOGW("Compiler thread join failed");
        else if (gDvm.verboseShutdown)
            ALOGD("Compiler thread has shut down");
    }

    /* Break loops within the translation cache */
    dvmJitUnchainAll();

    /*
     * NOTE: our current implementatation doesn't allow for the compiler
     * thread to be restarted after it exits here.  We aren't freeing
     * the JitTable or the ProfTable because threads which still may be
     * running or in the process of shutting down may hold references to
     * them.
     */
}
Exemplo n.º 28
0
void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
{
    bool success;
    int retries = 0;
    do {
        success = dvmCompilerWorkEnqueue(pc, kind, info);
        if (!success) {
            retries++;
            if (retries > ENQUEUE_MAX_RETRIES) {
                ALOGE("JIT: compiler queue wedged - forcing reset");
                gDvmJit.codeCacheFull = true;  // Force reset
                success = true;  // Because we'll drop the order now anyway
            } else {
                dvmLockMutex(&gDvmJit.compilerLock);
                pthread_cond_wait(&gDvmJit.compilerQueueActivity,
                                  &gDvmJit.compilerLock);
                dvmUnlockMutex(&gDvmJit.compilerLock);

            }
        }
    } while (!success);
}
Exemplo n.º 29
0
bool dvmCompilerStartup(void)
{

    dvmInitMutex(&gDvmJit.compilerLock);
    dvmInitMutex(&gDvmJit.compilerICPatchLock);
    dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
    dvmLockMutex(&gDvmJit.compilerLock);
    dvmInitCondForTimedWait(&gDvmJit.compilerQueueActivity);
    dvmInitCondForTimedWait(&gDvmJit.compilerQueueEmpty);

    /* Reset the work queue */
    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
    gDvmJit.compilerQueueLength = 0;
    dvmUnlockMutex(&gDvmJit.compilerLock);

    /*
     * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
     * the compiler thread, which will do the real initialization if and
     * when it is signalled to do so.
     */
    return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
                                   compilerThreadStart, NULL);
}
Exemplo n.º 30
0
void fastiva_Dalvik_dalvik_system_VMRuntime_preloadDexCaches(dalvik_system_VMRuntime_p self) {
#endif  
    if (!kPreloadDexCachesEnabled) {
        return;
    }

    DexCacheStats total;
    DexCacheStats before;
    if (kPreloadDexCachesCollectStats) {
        ALOGI("VMRuntime.preloadDexCaches starting");
        preloadDexCachesStatsTotal(&total);
        preloadDexCachesStatsFilled(&before);
    }
#ifdef FASTIVA
	return;
#endif

    // We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
    StringTable strings;
    if (kPreloadDexCachesStrings) {
        dvmLockMutex(&gDvm.internLock);
        dvmHashTableLock(gDvm.literalStrings);
        for (int i = 0; i < gDvm.literalStrings->tableSize; ++i) {
            HashEntry *entry = &gDvm.literalStrings->pEntries[i];
            if (entry->data != NULL && entry->data != HASH_TOMBSTONE) {
                preloadDexCachesStringsVisitor(&entry->data, 0, ROOT_INTERNED_STRING, &strings);
            }
        }
        dvmHashTableUnlock(gDvm.literalStrings);
        dvmUnlockMutex(&gDvm.internLock);
    }

    for (ClassPathEntry* cpe = gDvm.bootClassPath; cpe->kind != kCpeLastEntry; cpe++) {
        DvmDex* pDvmDex = getDvmDexFromClassPathEntry(cpe);
        const DexHeader* pHeader = pDvmDex->pHeader;
        const DexFile* pDexFile = pDvmDex->pDexFile;

        if (kPreloadDexCachesStrings) {
            for (size_t i = 0; i < pHeader->stringIdsSize; i++) {
                preloadDexCachesResolveString(pDvmDex, i, strings);
            }
        }

        if (kPreloadDexCachesTypes) {
            for (size_t i = 0; i < pHeader->typeIdsSize; i++) {
                preloadDexCachesResolveType(pDvmDex, i);
            }
        }

        if (kPreloadDexCachesFieldsAndMethods) {
            for (size_t classDefIndex = 0;
                 classDefIndex < pHeader->classDefsSize;
                 classDefIndex++) {
                const DexClassDef* pClassDef = dexGetClassDef(pDexFile, classDefIndex);
                const u1* pEncodedData = dexGetClassData(pDexFile, pClassDef);
                UniquePtr<DexClassData> pClassData(dexReadAndVerifyClassData(&pEncodedData, NULL));
                if (pClassData.get() == NULL) {
                    continue;
                }
                for (uint32_t fieldIndex = 0;
                     fieldIndex < pClassData->header.staticFieldsSize;
                     fieldIndex++) {
                    const DexField* pField = &pClassData->staticFields[fieldIndex];
                    preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, false);
                }
                for (uint32_t fieldIndex = 0;
                     fieldIndex < pClassData->header.instanceFieldsSize;
                     fieldIndex++) {
                    const DexField* pField = &pClassData->instanceFields[fieldIndex];
                    preloadDexCachesResolveField(pDvmDex, pField->fieldIdx, true);
                }
                for (uint32_t methodIndex = 0;
                     methodIndex < pClassData->header.directMethodsSize;
                     methodIndex++) {
                    const DexMethod* pDexMethod = &pClassData->directMethods[methodIndex];
                    MethodType methodType = (((pDexMethod->accessFlags & ACC_STATIC) != 0) ?
                                             METHOD_STATIC :
                                             METHOD_DIRECT);
                    preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, methodType);
                }
                for (uint32_t methodIndex = 0;
                     methodIndex < pClassData->header.virtualMethodsSize;
                     methodIndex++) {
                    const DexMethod* pDexMethod = &pClassData->virtualMethods[methodIndex];
                    preloadDexCachesResolveMethod(pDvmDex, pDexMethod->methodIdx, METHOD_VIRTUAL);
                }
            }
        }
    }

    if (kPreloadDexCachesCollectStats) {
        DexCacheStats after;
        preloadDexCachesStatsFilled(&after);
        ALOGI("VMRuntime.preloadDexCaches strings total=%d before=%d after=%d",
              total.numStrings, before.numStrings, after.numStrings);
        ALOGI("VMRuntime.preloadDexCaches types total=%d before=%d after=%d",
              total.numTypes, before.numTypes, after.numTypes);
        ALOGI("VMRuntime.preloadDexCaches fields total=%d before=%d after=%d",
              total.numFields, before.numFields, after.numFields);
        ALOGI("VMRuntime.preloadDexCaches methods total=%d before=%d after=%d",
              total.numMethods, before.numMethods, after.numMethods);
        ALOGI("VMRuntime.preloadDexCaches finished");
    }

    RETURN_VOID();
}