/* * void nameChanged(String newName) * * The name of the target thread has changed. We may need to alert DDMS. */ static void Dalvik_java_lang_VMThread_nameChanged(const u4* args, JValue* pResult) { Object* thisPtr = (Object*) args[0]; StringObject* nameStr = (StringObject*) args[1]; Thread* thread; int threadId = -1; /* get the thread's ID */ dvmLockThreadList(NULL); thread = dvmGetThreadFromThreadObject(thisPtr); if (thread != NULL) threadId = thread->threadId; dvmUnlockThreadList(); dvmDdmSendThreadNameChange(threadId, nameStr); //char* str = dvmCreateCstrFromString(nameStr); //ALOGI("UPDATE: threadid=%d now '%s'", threadId, str); //free(str); RETURN_VOID(); }
/* * Find the specified thread and return its stack trace as an array of * StackTraceElement objects. */ ArrayObject* dvmDdmGetStackTraceById(u4 threadId) { Thread* self = dvmThreadSelf(); Thread* thread; int* traceBuf; dvmLockThreadList(self); for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { if (thread->threadId == threadId) break; } if (thread == NULL) { LOGI("dvmDdmGetStackTraceById: threadid=%d not found\n", threadId); dvmUnlockThreadList(); return NULL; } /* * Suspend the thread, pull out the stack trace, then resume the thread * and release the thread list lock. If we're being asked to examine * our own stack trace, skip the suspend/resume. */ int stackDepth = -1; if (thread != self) dvmSuspendThread(thread); traceBuf = dvmFillInStackTraceRaw(thread, &stackDepth); if (thread != self) dvmResumeThread(thread); dvmUnlockThreadList(); /* * Convert the raw buffer into an array of StackTraceElement. */ ArrayObject* trace = dvmGetStackTraceRaw(traceBuf, stackDepth); free(traceBuf); return trace; }
static void resetCodeCache(void) { Thread* thread; u8 startTime = dvmGetRelativeTimeUsec(); int inJit = 0; int byteUsed = gDvmJit.codeCacheByteUsed; /* If any thread is found stuck in the JIT state, don't reset the cache */ dvmLockThreadList(NULL); for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { /* * Crawl the stack to wipe out the returnAddr field so that * 1) the soon-to-be-deleted code in the JIT cache won't be used * 2) or the thread stuck in the JIT land will soon return * to the interpreter land */ crawlDalvikStack(thread, false); if (thread->inJitCodeCache) { inJit++; } /* Cancel any ongoing trace selection */ dvmDisableSubMode(thread, kSubModeJitTraceBuild); } dvmUnlockThreadList(); if (inJit) { ALOGD("JIT code cache reset delayed (%d bytes %d/%d)", gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset, ++gDvmJit.numCodeCacheResetDelayed); return; } /* Lock the mutex to clean up the work queue */ dvmLockMutex(&gDvmJit.compilerLock); /* Update the translation cache version */ gDvmJit.cacheVersion++; /* Drain the work queue to free the work orders */ while (workQueueLength()) { CompilerWorkOrder work = workDequeue(); free(work.info); } /* Reset the JitEntry table contents to the initial unpopulated state */ dvmJitResetTable(); UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed); /* * Wipe out the code cache content to force immediate crashes if * stale JIT'ed code is invoked. */ dvmCompilerCacheClear((char *) gDvmJit.codeCache + gDvmJit.templateSize, gDvmJit.codeCacheByteUsed - gDvmJit.templateSize); dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache, (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed); PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed); /* Reset the current mark of used bytes to the end of template code */ gDvmJit.codeCacheByteUsed = gDvmJit.templateSize; gDvmJit.numCompilations = 0; /* Reset the work queue */ memset(gDvmJit.compilerWorkQueue, 0, sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE); gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0; gDvmJit.compilerQueueLength = 0; /* Reset the IC patch work queue */ dvmLockMutex(&gDvmJit.compilerICPatchLock); gDvmJit.compilerICPatchIndex = 0; dvmUnlockMutex(&gDvmJit.compilerICPatchLock); /* * Reset the inflight compilation address (can only be done in safe points * or by the compiler thread when its thread state is RUNNING). */ gDvmJit.inflightBaseAddr = NULL; /* All clear now */ gDvmJit.codeCacheFull = false; dvmUnlockMutex(&gDvmJit.compilerLock); ALOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)", (dvmGetRelativeTimeUsec() - startTime) / 1000, byteUsed, ++gDvmJit.numCodeCacheReset, gDvmJit.numCodeCacheResetDelayed); }
/* * Generate the contents of a THST chunk. The data encompasses all known * threads. * * Response has: * (1b) header len * (1b) bytes per entry * (2b) thread count * Then, for each thread: * (4b) threadId * (1b) thread status * (4b) tid * (4b) utime * (4b) stime * (1b) is daemon? * * The length fields exist in anticipation of adding additional fields * without wanting to break ddms or bump the full protocol version. I don't * think it warrants full versioning. They might be extraneous and could * be removed from a future version. * * Returns a new byte[] with the data inside, or NULL on failure. The * caller must call dvmReleaseTrackedAlloc() on the array. */ ArrayObject* dvmDdmGenerateThreadStats(void) { const int kHeaderLen = 4; const int kBytesPerEntry = 18; dvmLockThreadList(NULL); Thread* thread; int threadCount = 0; for (thread = gDvm.threadList; thread != NULL; thread = thread->next) threadCount++; /* * Create a temporary buffer. We can't perform heap allocation with * the thread list lock held (could cause a GC). The output is small * enough to sit on the stack. */ int bufLen = kHeaderLen + threadCount * kBytesPerEntry; u1 tmpBuf[bufLen]; u1* buf = tmpBuf; set1(buf+0, kHeaderLen); set1(buf+1, kBytesPerEntry); set2BE(buf+2, (u2) threadCount); buf += kHeaderLen; pid_t pid = getpid(); for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { unsigned long utime, stime; bool isDaemon = false; if (!getThreadStats(pid, thread->systemTid, &utime, &stime)) { // failed; drop in empty values utime = stime = 0; } Object* threadObj = thread->threadObj; if (threadObj != NULL) { isDaemon = dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon); } set4BE(buf+0, thread->threadId); set1(buf+4, thread->status); set4BE(buf+5, thread->systemTid); set4BE(buf+9, utime); set4BE(buf+13, stime); set1(buf+17, isDaemon); buf += kBytesPerEntry; } dvmUnlockThreadList(); /* * Create a byte array to hold the data. */ ArrayObject* arrayObj = dvmAllocPrimitiveArray('B', bufLen, ALLOC_DEFAULT); if (arrayObj != NULL) memcpy(arrayObj->contents, tmpBuf, bufLen); return arrayObj; }
/* Make sure that the HeapWorker thread hasn't spent an inordinate * amount of time inside a finalizer. * * Aborts the VM if the thread appears to be wedged. * * The caller must hold the heapWorkerLock to guarantee an atomic * read of the watchdog values. */ void dvmAssertHeapWorkerThreadRunning() { if (gDvm.gcHeap->heapWorkerCurrentObject != NULL) { static const u8 HEAP_WORKER_WATCHDOG_TIMEOUT = 10*1000*1000LL; // 10sec u8 heapWorkerInterpStartTime = gDvm.gcHeap->heapWorkerInterpStartTime; u8 now = dvmGetRelativeTimeUsec(); u8 delta = now - heapWorkerInterpStartTime; if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT && (gDvm.debuggerActive || gDvm.nativeDebuggerActive)) { /* * Debugger suspension can block the thread indefinitely. For * best results we should reset this explicitly whenever the * HeapWorker thread is resumed. Unfortunately this is also * affected by native debuggers, and we have no visibility * into how they're manipulating us. So, we ignore the * watchdog and just reset the timer. */ LOGI("Debugger is attached -- suppressing HeapWorker watchdog\n"); gDvm.gcHeap->heapWorkerInterpStartTime = now; /* reset timer */ } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT) { /* * Before we give up entirely, see if maybe we're just not * getting any CPU time because we're stuck in a background * process group. If we successfully move the thread into the * foreground we'll just leave it there (it doesn't do anything * if the process isn't GCing). */ dvmLockThreadList(NULL); Thread* thread = dvmGetThreadByHandle(gDvm.heapWorkerHandle); dvmUnlockThreadList(); if (thread != NULL) { int priChangeFlags, threadPrio; SchedPolicy threadPolicy; priChangeFlags = dvmRaiseThreadPriorityIfNeeded(thread, &threadPrio, &threadPolicy); if (priChangeFlags != 0) { LOGI("HeapWorker watchdog expired, raising priority" " and retrying\n"); gDvm.gcHeap->heapWorkerInterpStartTime = now; return; } } char* desc = dexProtoCopyMethodDescriptor( &gDvm.gcHeap->heapWorkerCurrentMethod->prototype); LOGE("HeapWorker is wedged: %lldms spent inside %s.%s%s\n", delta / 1000, gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor, gDvm.gcHeap->heapWorkerCurrentMethod->name, desc); free(desc); dvmDumpAllThreads(true); /* try to get a debuggerd dump from the target thread */ dvmNukeThread(thread); /* abort the VM */ dvmAbort(); } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT / 2) { char* desc = dexProtoCopyMethodDescriptor( &gDvm.gcHeap->heapWorkerCurrentMethod->prototype); LOGW("HeapWorker may be wedged: %lldms spent inside %s.%s%s\n", delta / 1000, gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor, gDvm.gcHeap->heapWorkerCurrentMethod->name, desc); free(desc); } } }