/* * If the concurrent GC is running, wait for it to finish. The caller * must hold the heap lock. * * Note: the second dvmChangeStatus() could stall if we were in RUNNING * on entry, and some other thread has asked us to suspend. In that * case we will be suspended with the heap lock held, which can lead to * deadlock if the other thread tries to do something with the managed heap. * For example, the debugger might suspend us and then execute a method that * allocates memory. We can avoid this situation by releasing the lock * before self-suspending. (The developer can work around this specific * situation by single-stepping the VM. Alternatively, we could disable * concurrent GC when the debugger is attached, but that might change * behavior more than is desirable.) * * This should not be a problem in production, because any GC-related * activity will grab the lock before issuing a suspend-all. (We may briefly * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads, * but there's no risk of deadlock.) */ bool dvmWaitForConcurrentGcToComplete() { ATRACE_BEGIN("GC: Wait For Concurrent"); bool waited = gDvm.gcHeap->gcRunning; Thread *self = dvmThreadSelf(); assert(self != NULL); u4 start = dvmGetRelativeTimeMsec(); #ifdef FASTIVA // Ensure no Java-object reference is used in local-stack. // and save Java-object reference maybe in registers. FASTIVA_SUSPEND_STACK_unsafe(self); ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); while (gDvm.gcHeap->gcRunning) { dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); } dvmChangeStatus(self, oldStatus); FASTIVA_RESUME_STACK_unsafe(self); #else while (gDvm.gcHeap->gcRunning) { ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); dvmChangeStatus(self, oldStatus); } #endif u4 end = dvmGetRelativeTimeMsec(); if (end - start > 0) { ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start); } ATRACE_END(); return waited; }
/* * Crank up the heap worker thread. * * Does not return until the thread is ready for business. */ bool dvmHeapWorkerStartup(void) { assert(!gDvm.haltHeapWorker); assert(!gDvm.heapWorkerReady); assert(gDvm.heapWorkerHandle == 0); assert(gDvm.heapWorkerInitialized); /* use heapWorkerLock/heapWorkerCond to communicate readiness */ dvmLockMutex(&gDvm.heapWorkerLock); //BUG: If a GC happens in here or in the new thread while we hold the lock, // the GC will deadlock when trying to acquire heapWorkerLock. if (!dvmCreateInternalThread(&gDvm.heapWorkerHandle, "HeapWorker", heapWorkerThreadStart, NULL)) { dvmUnlockMutex(&gDvm.heapWorkerLock); return false; } /* * Wait for the heap worker to come up. We know the thread was created, * so this should not get stuck. */ while (!gDvm.heapWorkerReady) { dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock); } dvmUnlockMutex(&gDvm.heapWorkerLock); return true; }
/* * Crank up the stdout/stderr converter thread. * * Returns immediately. */ bool dvmStdioConverterStartup() { gDvm.haltStdioConverter = false; dvmInitMutex(&gDvm.stdioConverterLock); pthread_cond_init(&gDvm.stdioConverterCond, NULL); if (pipe(gDvm.stdoutPipe) != 0) { ALOGW("pipe failed: %s", strerror(errno)); return false; } if (pipe(gDvm.stderrPipe) != 0) { ALOGW("pipe failed: %s", strerror(errno)); return false; } if (dup2(gDvm.stdoutPipe[1], kFilenoStdout) != kFilenoStdout) { ALOGW("dup2(1) failed: %s", strerror(errno)); return false; } close(gDvm.stdoutPipe[1]); gDvm.stdoutPipe[1] = -1; #ifdef HAVE_ANDROID_OS /* don't redirect stderr on sim -- logs get written there! */ /* (don't need this on the sim anyway) */ if (dup2(gDvm.stderrPipe[1], kFilenoStderr) != kFilenoStderr) { ALOGW("dup2(2) failed: %d %s", errno, strerror(errno)); return false; } close(gDvm.stderrPipe[1]); gDvm.stderrPipe[1] = -1; #endif /* * Create the thread. */ dvmLockMutex(&gDvm.stdioConverterLock); if (!dvmCreateInternalThread(&gDvm.stdioConverterHandle, "Stdio Converter", stdioConverterThreadStart, NULL)) { return false; } while (!gDvm.stdioConverterReady) { dvmWaitCond(&gDvm.stdioConverterCond, &gDvm.stdioConverterLock); } dvmUnlockMutex(&gDvm.stdioConverterLock); return true; }
/* * If the concurrent GC is running, wait for it to finish. The caller * must hold the heap lock. * * Note: the second dvmChangeStatus() could stall if we were in RUNNING * on entry, and some other thread has asked us to suspend. In that * case we will be suspended with the heap lock held, which can lead to * deadlock if the other thread tries to do something with the managed heap. * For example, the debugger might suspend us and then execute a method that * allocates memory. We can avoid this situation by releasing the lock * before self-suspending. (The developer can work around this specific * situation by single-stepping the VM. Alternatively, we could disable * concurrent GC when the debugger is attached, but that might change * behavior more than is desirable.) * * This should not be a problem in production, because any GC-related * activity will grab the lock before issuing a suspend-all. (We may briefly * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads, * but there's no risk of deadlock.) */ void dvmWaitForConcurrentGcToComplete() { Thread *self = dvmThreadSelf(); assert(self != NULL); u4 start = dvmGetRelativeTimeMsec(); while (gDvm.gcHeap->gcRunning) { ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); dvmChangeStatus(self, oldStatus); } u4 end = dvmGetRelativeTimeMsec(); ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start); }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } // Many JDWP requests cause allocation. We can't take the heap lock and wait to // transition to runnable so we can start a GC if a debugger is connected, because // we don't know that the JDWP thread isn't about to allocate and require the // heap lock itself, leading to deadlock. http://b/8191824. if (gDvm.debuggerConnected) { continue; } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
/* * Block until all pending heap worker work has finished. */ void dvmWaitForHeapWorkerIdle() { assert(gDvm.heapWorkerReady); dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gDvm.heapWorkerLock); /* Wake up the heap worker and wait for it to finish. */ //TODO(http://b/issue?id=699704): This will deadlock if // called from finalize(), enqueue(), or clear(). We // need to detect when this is called from the HeapWorker // context and just give up. dvmSignalHeapWorker(false); dvmWaitCond(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock); dvmUnlockMutex(&gDvm.heapWorkerLock); dvmChangeStatus(NULL, THREAD_RUNNING); }
/* * If the concurrent GC is running, wait for it to finish. The caller * must hold the heap lock. * * Note: the second dvmChangeStatus() could stall if we were in RUNNING * on entry, and some other thread has asked us to suspend. In that * case we will be suspended with the heap lock held, which can lead to * deadlock if the other thread tries to do something with the managed heap. * For example, the debugger might suspend us and then execute a method that * allocates memory. We can avoid this situation by releasing the lock * before self-suspending. (The developer can work around this specific * situation by single-stepping the VM. Alternatively, we could disable * concurrent GC when the debugger is attached, but that might change * behavior more than is desirable.) * * This should not be a problem in production, because any GC-related * activity will grab the lock before issuing a suspend-all. (We may briefly * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads, * but there's no risk of deadlock.) */ bool dvmWaitForConcurrentGcToComplete() { bool waited = gDvm.gcHeap->gcRunning; Thread *self = dvmThreadSelf(); assert(self != NULL); #ifdef DEBUG u4 start = dvmGetRelativeTimeMsec(); #endif while (gDvm.gcHeap->gcRunning) { ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); dvmChangeStatus(self, oldStatus); } #ifdef DEBUG u4 end = dvmGetRelativeTimeMsec(); if (end - start > 0) { ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start); } #endif return waited; }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
/* * The heap worker thread sits quietly until the GC tells it there's work * to do. */ static void* heapWorkerThreadStart(void* arg) { Thread *self = dvmThreadSelf(); UNUSED_PARAMETER(arg); LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId); /* tell the main thread that we're ready */ lockMutex(&gDvm.heapWorkerLock); gDvm.heapWorkerReady = true; dvmSignalCond(&gDvm.heapWorkerCond); dvmUnlockMutex(&gDvm.heapWorkerLock); lockMutex(&gDvm.heapWorkerLock); while (!gDvm.haltHeapWorker) { struct timespec trimtime; bool timedwait = false; /* We're done running interpreted code for now. */ dvmChangeStatus(NULL, THREAD_VMWAIT); /* Signal anyone who wants to know when we're done. */ dvmBroadcastCond(&gDvm.heapWorkerIdleCond); /* Trim the heap if we were asked to. */ trimtime = gDvm.gcHeap->heapWorkerNextTrim; if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) { struct timespec now; #ifdef HAVE_TIMEDWAIT_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &now); // relative time #else struct timeval tvnow; gettimeofday(&tvnow, NULL); // absolute time now.tv_sec = tvnow.tv_sec; now.tv_nsec = tvnow.tv_usec * 1000; #endif if (trimtime.tv_sec < now.tv_sec || (trimtime.tv_sec == now.tv_sec && trimtime.tv_nsec <= now.tv_nsec)) { size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT]; /* * Acquire the gcHeapLock. The requires releasing the * heapWorkerLock before the gcHeapLock is acquired. * It is possible that the gcHeapLock may be acquired * during a concurrent GC in which case heapWorkerLock * is held by the GC and we are unable to make forward * progress. We avoid deadlock by releasing the * gcHeapLock and then waiting to be signaled when the * GC completes. There is no guarantee that the next * time we are run will coincide with GC inactivity so * the check and wait must be performed within a loop. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmLockMutex(&gDvm.heapWorkerLock); memset(madvisedSizes, 0, sizeof(madvisedSizes)); dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmUnlockHeap(); trimtime.tv_sec = 0; trimtime.tv_nsec = 0; gDvm.gcHeap->heapWorkerNextTrim = trimtime; } else { timedwait = true; } } /* sleep until signaled */ if (timedwait) { int cc __attribute__ ((__unused__)); #ifdef HAVE_TIMEDWAIT_MONOTONIC cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #else cc = pthread_cond_timedwait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #endif assert(cc == 0 || cc == ETIMEDOUT); } else { dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock); } /* * Return to the running state before doing heap work. This * will block if the GC has initiated a suspend. We release * the heapWorkerLock beforehand for the GC to make progress * and wait to be signaled after the GC completes. There is * no guarantee that the next time we are run will coincide * with GC inactivity so the check and wait must be performed * within a loop. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmChangeStatus(NULL, THREAD_RUNNING); dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmLockMutex(&gDvm.heapWorkerLock); dvmUnlockHeap(); LOGV("HeapWorker is awake\n"); /* Process any events in the queue. */ doHeapWork(self); } dvmUnlockMutex(&gDvm.heapWorkerLock); if (gDvm.verboseShutdown) LOGD("HeapWorker thread shutting down\n"); return NULL; }