/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage(bool collectSoftReferences) { dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmCollectGarbageInternal(collectSoftReferences, GC_EXPLICIT); dvmUnlockHeap(); }
void dvmHeapSourceRegisterNativeAllocation(int bytes) { /* If we have just done a GC, ensure that the finalizers are done and update * the native watermarks. */ if (gHs->nativeNeedToRunFinalization) { dvmRunFinalization(); dvmHeapSourceUpdateMaxNativeFootprint(); gHs->nativeNeedToRunFinalization = false; } android_atomic_add(bytes, &gHs->nativeBytesAllocated); if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) { /* The second watermark is higher than the gc watermark. If you hit * this it means you are allocating native objects faster than the GC * can keep up with. If this occurs, we do a GC for alloc. */ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { Thread* self = dvmThreadSelf(); dvmRunFinalization(); if (dvmCheckException(self)) { return; } dvmLockHeap(); bool waited = dvmWaitForConcurrentGcToComplete(); dvmUnlockHeap(); if (waited) { // Just finished a GC, attempt to run finalizers. dvmRunFinalization(); if (dvmCheckException(self)) { return; } } // If we still are over the watermark, attempt a GC for alloc and run finalizers. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_FOR_MALLOC); dvmUnlockHeap(); dvmRunFinalization(); gHs->nativeNeedToRunFinalization = false; if (dvmCheckException(self)) { return; } } /* We have just run finalizers, update the native watermark since * it is very likely that finalizers released native managed * allocations. */ dvmHeapSourceUpdateMaxNativeFootprint(); } else { dvmSignalCond(&gHs->gcThreadCond); } } }
/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage(bool collectSoftReferences) { dvmLockHeap(); LOGVV("Explicit GC\n"); dvmCollectGarbageInternal(collectSoftReferences, GC_EXPLICIT); dvmUnlockHeap(); }
/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage() { if (gDvm.disableExplicitGc) { return; } dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_EXPLICIT); dvmUnlockHeap(); }
/* Do a full garbage collection, which may grow the * heap as a side-effect if the live set is large. */ static void gcForMalloc(bool clearSoftReferences) { if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.gcCount++; if (self != NULL) { self->allocProf.gcCount++; } } /* This may adjust the soft limit as a side-effect. */ const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC; dvmCollectGarbageInternal(spec); }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } // Many JDWP requests cause allocation. We can't take the heap lock and wait to // transition to runnable so we can start a GC if a debugger is connected, because // we don't know that the JDWP thread isn't about to allocate and require the // heap lock itself, leading to deadlock. http://b/8191824. if (gDvm.debuggerConnected) { continue; } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
/* Runs the main thread daemmon loop looking for incoming messages from its * parallel thread on what action it should take. */ static void* thread_daemon(void* pself) { Thread* self = (Thread*)pself; while(1) { u1 event = offReadU1(self); if(!gDvm.offConnected) { ALOGI("THREAD %d LOST CONNECTION", self->threadId); return NULL; } ALOGI("THREAD %d GOT EVENT %d", self->threadId, event); switch(event) { case OFF_ACTION_RESUME: { /* We got a resume message, drop back to our caller. */ return NULL; } break; case OFF_ACTION_LOCK: { offPerformLock(self); } break; case OFF_ACTION_NOTIFY: { offPerformNotify(self); } break; case OFF_ACTION_BROADCAST: { offPerformNotifyAll(self); } break; case OFF_ACTION_DEX_QUERYDEX: { offPerformQueryDex(self); } break; case OFF_ACTION_SYNC: { offSyncPull(); offWriteU1(self, 0); } break; case OFF_ACTION_INTERRUPT: { offPerformInterrupt(self); } break; case OFF_ACTION_TRIMGC: { dvmLockHeap(); self->offTrimSignaled = true; if (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmCollectGarbageInternal(GC_BEFORE_OOM); self->offTrimSignaled = false; dvmUnlockHeap(); } break; case OFF_ACTION_GRABVOL: { offPerformGrabVolatiles(self); } break; case OFF_ACTION_MIGRATE: { if(offPerformMigrate(self)) { return NULL; } } break; case OFF_ACTION_CLINIT: { offPerformClinit(self); } break; case OFF_ACTION_DEATH: { self->offFlagDeath = true; return NULL; } break; default: { ALOGE("Unknown action %d sent to thread %d", event, self->threadId); dvmAbort(); } } } }