void dvmHeapSourceRegisterNativeAllocation(int bytes) { /* If we have just done a GC, ensure that the finalizers are done and update * the native watermarks. */ if (gHs->nativeNeedToRunFinalization) { dvmRunFinalization(); dvmHeapSourceUpdateMaxNativeFootprint(); gHs->nativeNeedToRunFinalization = false; } android_atomic_add(bytes, &gHs->nativeBytesAllocated); if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) { /* The second watermark is higher than the gc watermark. If you hit * this it means you are allocating native objects faster than the GC * can keep up with. If this occurs, we do a GC for alloc. */ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { Thread* self = dvmThreadSelf(); dvmRunFinalization(); if (dvmCheckException(self)) { return; } dvmLockHeap(); bool waited = dvmWaitForConcurrentGcToComplete(); dvmUnlockHeap(); if (waited) { // Just finished a GC, attempt to run finalizers. dvmRunFinalization(); if (dvmCheckException(self)) { return; } } // If we still are over the watermark, attempt a GC for alloc and run finalizers. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_FOR_MALLOC); dvmUnlockHeap(); dvmRunFinalization(); gHs->nativeNeedToRunFinalization = false; if (dvmCheckException(self)) { return; } } /* We have just run finalizers, update the native watermark since * it is very likely that finalizers released native managed * allocations. */ dvmHeapSourceUpdateMaxNativeFootprint(); } else { dvmSignalCond(&gHs->gcThreadCond); } } }
bool dvmDdmHandleHpifChunk(int when) { switch (when) { case HPIF_WHEN_NOW: dvmDdmSendHeapInfo(when, true); break; case HPIF_WHEN_NEVER: case HPIF_WHEN_NEXT_GC: case HPIF_WHEN_EVERY_GC: if (dvmLockHeap()) { gDvm.gcHeap->ddmHpifWhen = when; dvmUnlockHeap(); } else { LOGI("%s(): can't lock heap to set when\n", __func__); return false; } break; default: LOGI("%s(): bad when value 0x%08x\n", __func__, when); return false; } return true; }
void dvmDdmSendHeapSegments(bool shouldLock, bool native) { u1 heapId[sizeof(u4)]; GcHeap *gcHeap = gDvm.gcHeap; int when, what; bool merge; /* Don't even grab the lock if there's nothing to do when we're called. */ if (!native) { when = gcHeap->ddmHpsgWhen; what = gcHeap->ddmHpsgWhat; if (when == HPSG_WHEN_NEVER) { return; } } else { when = gcHeap->ddmNhsgWhen; what = gcHeap->ddmNhsgWhat; if (when == HPSG_WHEN_NEVER) { return; } } if (shouldLock && !dvmLockHeap()) { LOGW("Can't lock heap for DDM HPSx dump\n"); return; } /* Figure out what kind of chunks we'll be sending. */ if (what == HPSG_WHAT_MERGED_OBJECTS) { merge = true; } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) { merge = false; } else { assert(!"bad HPSG.what value"); return; } /* First, send a heap start chunk. */ set4BE(heapId, DEFAULT_HEAP_ID); dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(u4), heapId); /* Send a series of heap segment chunks. */ walkHeap(merge, native); /* Finally, send a heap end chunk. */ dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(u4), heapId); if (shouldLock) { dvmUnlockHeap(); } }
size_t dvmCountAssignableInstancesOfClass(const ClassObject *clazz) { CountContext ctx = { clazz, 0 }; dvmLockHeap(); HeapBitmap *bitmap = dvmHeapSourceGetLiveBits(); dvmHeapBitmapWalk(bitmap, countAssignableInstancesOfClassCallback, &ctx); dvmUnlockHeap(); return ctx.count; }
/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage(bool collectSoftReferences) { dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmCollectGarbageInternal(collectSoftReferences, GC_EXPLICIT); dvmUnlockHeap(); }
/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage(bool collectSoftReferences) { dvmLockHeap(); LOGVV("Explicit GC\n"); dvmCollectGarbageInternal(collectSoftReferences, GC_EXPLICIT); dvmUnlockHeap(); }
/* * Explicitly initiate garbage collection. */ void dvmCollectGarbage() { if (gDvm.disableExplicitGc) { return; } dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_EXPLICIT); dvmUnlockHeap(); }
/* * Allocate storage on the GC heap. We guarantee 8-byte alignment. * * The new storage is zeroed out. * * Note that, in rare cases, this could get called while a GC is in * progress. If a non-VM thread tries to attach itself through JNI, * it will need to allocate some objects. If this becomes annoying to * deal with, we can block it at the source, but holding the allocation * mutex should be enough. * * In rare circumstances (JNI AttachCurrentThread) we can be called * from a non-VM thread. * * Use ALLOC_DONT_TRACK when we either don't want to track an allocation * (because it's being done for the interpreter "new" operation and will * be part of the root set immediately) or we can't (because this allocation * is for a brand new thread). * * Returns NULL and throws an exception on failure. * * TODO: don't do a GC if the debugger thinks all threads are suspended */ void* dvmMalloc(size_t size, int flags) { void *ptr; dvmLockHeap(); /* Try as hard as possible to allocate some memory. */ ptr = tryMalloc(size); if (ptr != NULL) { /* We've got the memory. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.allocCount++; gDvm.allocProf.allocSize += size; if (self != NULL) { self->allocProf.allocCount++; self->allocProf.allocSize += size; } } } else { /* The allocation failed. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.failedAllocCount++; gDvm.allocProf.failedAllocSize += size; if (self != NULL) { self->allocProf.failedAllocCount++; self->allocProf.failedAllocSize += size; } } } dvmUnlockHeap(); if (ptr != NULL) { /* * If caller hasn't asked us not to track it, add it to the * internal tracking list. */ if ((flags & ALLOC_DONT_TRACK) == 0) { dvmAddTrackedAlloc((Object*)ptr, NULL); } } else { /* * The allocation failed; throw an OutOfMemoryError. */ throwOOME(); } return ptr; }
/* * Removes any growth limits. Allows the user to allocate up to the * maximum heap size. */ void dvmClearGrowthLimit() { HS_BOILERPLATE(); dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength; gHs->growthLimit = gHs->maximumSize; size_t overhead = oldHeapOverhead(gHs, false); gHs->heaps[0].maximumSize = gHs->maximumSize - overhead; gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize; dvmUnlockHeap(); }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } // Many JDWP requests cause allocation. We can't take the heap lock and wait to // transition to runnable so we can start a GC if a debugger is connected, because // we don't know that the JDWP thread isn't about to allocate and require the // heap lock itself, leading to deadlock. http://b/8191824. if (gDvm.debuggerConnected) { continue; } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
bool dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native) { LOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)\n", when, what, native); switch (when) { case HPSG_WHEN_NEVER: case HPSG_WHEN_EVERY_GC: break; default: LOGI("%s(): bad when value 0x%08x\n", __func__, when); return false; } switch (what) { case HPSG_WHAT_MERGED_OBJECTS: case HPSG_WHAT_DISTINCT_OBJECTS: break; default: LOGI("%s(): bad what value 0x%08x\n", __func__, what); return false; } if (dvmLockHeap()) { if (!native) { gDvm.gcHeap->ddmHpsgWhen = when; gDvm.gcHeap->ddmHpsgWhat = what; } else { gDvm.gcHeap->ddmNhsgWhen = when; gDvm.gcHeap->ddmNhsgWhat = what; } //TODO: if what says we should dump immediately, signal (or do) it from here dvmUnlockHeap(); } else { LOGI("%s(): can't lock heap to set when/what\n", __func__); return false; } return true; }
/* * The garbage collection daemon. Initiates a concurrent collection * when signaled. Also periodically trims the heaps when a few seconds * have elapsed since the last concurrent GC. */ static void *gcDaemonThread(void* arg) { dvmChangeStatus(NULL, THREAD_VMWAIT); dvmLockMutex(&gHs->gcThreadMutex); while (gHs->gcThreadShutdown != true) { bool trim = false; if (gHs->gcThreadTrimNeeded) { int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex, HEAP_TRIM_IDLE_TIME_MS, 0); if (result == ETIMEDOUT) { /* Timed out waiting for a GC request, schedule a heap trim. */ trim = true; } } else { dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex); } dvmLockHeap(); /* * Another thread may have started a concurrent garbage * collection before we were scheduled. Check for this * condition before proceeding. */ if (!gDvm.gcHeap->gcRunning) { dvmChangeStatus(NULL, THREAD_RUNNING); if (trim) { trimHeaps(); gHs->gcThreadTrimNeeded = false; } else { dvmCollectGarbageInternal(GC_CONCURRENT); gHs->gcThreadTrimNeeded = true; } dvmChangeStatus(NULL, THREAD_VMWAIT); } dvmUnlockHeap(); } dvmChangeStatus(NULL, THREAD_RUNNING); return NULL; }
/* Runs the main thread daemmon loop looking for incoming messages from its * parallel thread on what action it should take. */ static void* thread_daemon(void* pself) { Thread* self = (Thread*)pself; while(1) { u1 event = offReadU1(self); if(!gDvm.offConnected) { ALOGI("THREAD %d LOST CONNECTION", self->threadId); return NULL; } ALOGI("THREAD %d GOT EVENT %d", self->threadId, event); switch(event) { case OFF_ACTION_RESUME: { /* We got a resume message, drop back to our caller. */ return NULL; } break; case OFF_ACTION_LOCK: { offPerformLock(self); } break; case OFF_ACTION_NOTIFY: { offPerformNotify(self); } break; case OFF_ACTION_BROADCAST: { offPerformNotifyAll(self); } break; case OFF_ACTION_DEX_QUERYDEX: { offPerformQueryDex(self); } break; case OFF_ACTION_SYNC: { offSyncPull(); offWriteU1(self, 0); } break; case OFF_ACTION_INTERRUPT: { offPerformInterrupt(self); } break; case OFF_ACTION_TRIMGC: { dvmLockHeap(); self->offTrimSignaled = true; if (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmCollectGarbageInternal(GC_BEFORE_OOM); self->offTrimSignaled = false; dvmUnlockHeap(); } break; case OFF_ACTION_GRABVOL: { offPerformGrabVolatiles(self); } break; case OFF_ACTION_MIGRATE: { if(offPerformMigrate(self)) { return NULL; } } break; case OFF_ACTION_CLINIT: { offPerformClinit(self); } break; case OFF_ACTION_DEATH: { self->offFlagDeath = true; return NULL; } break; default: { ALOGE("Unknown action %d sent to thread %d", event, self->threadId); dvmAbort(); } } } }
/* * Initiate garbage collection. * * NOTES: * - If we don't hold gDvm.threadListLock, it's possible for a thread to * be added to the thread list while we work. The thread should NOT * start executing, so this is only interesting when we start chasing * thread stacks. (Before we do so, grab the lock.) * * We are not allowed to GC when the debugger has suspended the VM, which * is awkward because debugger requests can cause allocations. The easiest * way to enforce this is to refuse to GC on an allocation made by the * JDWP thread -- we have to expand the heap or fail. */ void dvmCollectGarbageInternal(const GcSpec* spec) { GcHeap *gcHeap = gDvm.gcHeap; u4 gcEnd = 0; u4 rootStart = 0 , rootEnd = 0; u4 dirtyStart = 0, dirtyEnd = 0; size_t numObjectsFreed, numBytesFreed; size_t currAllocated, currFootprint; size_t percentFree; int oldThreadPriority = INT_MAX; /* The heap lock must be held. */ if (gcHeap->gcRunning) { LOGW_HEAP("Attempted recursive GC"); return; } gcHeap->gcRunning = true; rootStart = dvmGetRelativeTimeMsec(); dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * If we are not marking concurrently raise the priority of the * thread performing the garbage collection. */ if (!spec->isConcurrent) { oldThreadPriority = os_raiseThreadPriority(); } if (gDvm.preVerify) { LOGV_HEAP("Verifying roots and heap before GC"); verifyRootsAndHeap(); } dvmMethodTraceGCBegin(); /* Set up the marking context. */ if (!dvmHeapBeginMarkStep(spec->isPartial)) { LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting"); dvmAbort(); } /* Mark the set of objects that are strongly reachable from the roots. */ LOGD_HEAP("Marking..."); dvmHeapMarkRootSet(); /* dvmHeapScanMarkedObjects() will build the lists of known * instances of the Reference classes. */ assert(gcHeap->softReferences == NULL); assert(gcHeap->weakReferences == NULL); assert(gcHeap->finalizerReferences == NULL); assert(gcHeap->phantomReferences == NULL); assert(gcHeap->clearedReferences == NULL); if (spec->isConcurrent) { /* * Resume threads while tracing from the roots. We unlock the * heap to allow mutator threads to allocate from free space. */ dvmClearCardTable(); dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); rootEnd = dvmGetRelativeTimeMsec(); } /* Recursively mark any objects that marked objects point to strongly. * If we're not collecting soft references, soft-reachable * objects will also be marked. */ LOGD_HEAP("Recursing..."); dvmHeapScanMarkedObjects(); if (spec->isConcurrent) { /* * Re-acquire the heap lock and perform the final thread * suspension. */ dirtyStart = dvmGetRelativeTimeMsec(); dvmLockHeap(); dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * As no barrier intercepts root updates, we conservatively * assume all roots may be gray and re-mark them. */ dvmHeapReMarkRootSet(); /* * With the exception of reference objects and weak interned * strings, all gray objects should now be on dirty cards. */ if (gDvm.verifyCardTable) { dvmVerifyCardTable(); } /* * Recursively mark gray objects pointed to by the roots or by * heap objects dirtied during the concurrent mark. */ dvmHeapReScanMarkedObjects(); } /* * All strongly-reachable objects have now been marked. Process * weakly-reachable objects discovered while tracing. */ dvmHeapProcessReferences(&gcHeap->softReferences, spec->doPreserve == false, &gcHeap->weakReferences, &gcHeap->finalizerReferences, &gcHeap->phantomReferences); #if defined(WITH_JIT) /* * Patching a chaining cell is very cheap as it only updates 4 words. It's * the overhead of stopping all threads and synchronizing the I/D cache * that makes it expensive. * * Therefore we batch those work orders in a queue and go through them * when threads are suspended for GC. */ dvmCompilerPerformSafePointChecks(); #endif LOGD_HEAP("Sweeping..."); dvmHeapSweepSystemWeaks(); /* * Live objects have a bit set in the mark bitmap, swap the mark * and live bitmaps. The sweep can proceed concurrently viewing * the new live bitmap as the old mark bitmap, and vice versa. */ dvmHeapSourceSwapBitmaps(); if (gDvm.postVerify) { LOGV_HEAP("Verifying roots and heap after GC"); verifyRootsAndHeap(); } if (spec->isConcurrent) { dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); dirtyEnd = dvmGetRelativeTimeMsec(); } dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent, &numObjectsFreed, &numBytesFreed); LOGD_HEAP("Cleaning up..."); dvmHeapFinishMarkStep(); if (spec->isConcurrent) { dvmLockHeap(); } LOGD_HEAP("Done."); /* Now's a good time to adjust the heap size, since * we know what our utilization is. * * This doesn't actually resize any memory; * it just lets the heap grow more when necessary. */ dvmHeapSourceGrowForUtilization(); currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0); currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0); dvmMethodTraceGCEnd(); LOGV_HEAP("GC finished"); gcHeap->gcRunning = false; LOGV_HEAP("Resuming threads"); if (spec->isConcurrent) { /* * Wake-up any threads that blocked after a failed allocation * request. */ dvmBroadcastCond(&gDvm.gcHeapCond); } if (!spec->isConcurrent) { dvmResumeAllThreads(SUSPEND_FOR_GC); dirtyEnd = dvmGetRelativeTimeMsec(); /* * Restore the original thread scheduling priority if it was * changed at the start of the current garbage collection. */ if (oldThreadPriority != INT_MAX) { os_lowerThreadPriority(oldThreadPriority); } } /* * Move queue of pending references back into Java. */ dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences); gcEnd = dvmGetRelativeTimeMsec(); percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint); if (!spec->isConcurrent) { u4 markSweepTime = dirtyEnd - rootStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, markSweepTime, gcTime); } else { u4 rootTime = rootEnd - rootStart; u4 dirtyTime = dirtyEnd - dirtyStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, rootTime, dirtyTime, gcTime); } if (gcHeap->ddmHpifWhen != 0) { LOGD_HEAP("Sending VM heap info to DDM"); dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false); } if (gcHeap->ddmHpsgWhen != 0) { LOGD_HEAP("Dumping VM heap to DDM"); dvmDdmSendHeapSegments(false, false); } if (gcHeap->ddmNhsgWhen != 0) { LOGD_HEAP("Dumping native heap to DDM"); dvmDdmSendHeapSegments(false, true); } }
/* * The heap worker thread sits quietly until the GC tells it there's work * to do. */ static void* heapWorkerThreadStart(void* arg) { Thread *self = dvmThreadSelf(); int cc; UNUSED_PARAMETER(arg); LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId); /* tell the main thread that we're ready */ dvmLockMutex(&gDvm.heapWorkerLock); gDvm.heapWorkerReady = true; cc = pthread_cond_signal(&gDvm.heapWorkerCond); assert(cc == 0); dvmUnlockMutex(&gDvm.heapWorkerLock); dvmLockMutex(&gDvm.heapWorkerLock); while (!gDvm.haltHeapWorker) { struct timespec trimtime; bool timedwait = false; /* We're done running interpreted code for now. */ dvmChangeStatus(NULL, THREAD_VMWAIT); /* Signal anyone who wants to know when we're done. */ cc = pthread_cond_broadcast(&gDvm.heapWorkerIdleCond); assert(cc == 0); /* Trim the heap if we were asked to. */ trimtime = gDvm.gcHeap->heapWorkerNextTrim; if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) { struct timespec now; #ifdef HAVE_TIMEDWAIT_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &now); // relative time #else struct timeval tvnow; gettimeofday(&tvnow, NULL); // absolute time now.tv_sec = tvnow.tv_sec; now.tv_nsec = tvnow.tv_usec * 1000; #endif if (trimtime.tv_sec < now.tv_sec || (trimtime.tv_sec == now.tv_sec && trimtime.tv_nsec <= now.tv_nsec)) { size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT]; /* The heap must be locked before the HeapWorker; * unroll and re-order the locks. dvmLockHeap() * will put us in VMWAIT if necessary. Once it * returns, there shouldn't be any contention on * heapWorkerLock. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmLockHeap(); dvmLockMutex(&gDvm.heapWorkerLock); memset(madvisedSizes, 0, sizeof(madvisedSizes)); dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmUnlockHeap(); trimtime.tv_sec = 0; trimtime.tv_nsec = 0; gDvm.gcHeap->heapWorkerNextTrim = trimtime; } else { timedwait = true; } } /* sleep until signaled */ if (timedwait) { #ifdef HAVE_TIMEDWAIT_MONOTONIC cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #else cc = pthread_cond_timedwait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #endif assert(cc == 0 || cc == ETIMEDOUT || cc == EINTR); } else { cc = pthread_cond_wait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock); assert(cc == 0); } /* dvmChangeStatus() may block; don't hold heapWorkerLock. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmChangeStatus(NULL, THREAD_RUNNING); dvmLockMutex(&gDvm.heapWorkerLock); LOGV("HeapWorker is awake\n"); /* Process any events in the queue. */ doHeapWork(self); } dvmUnlockMutex(&gDvm.heapWorkerLock); if (gDvm.verboseShutdown) LOGD("HeapWorker thread shutting down\n"); return NULL; }
/* * The heap worker thread sits quietly until the GC tells it there's work * to do. */ static void* heapWorkerThreadStart(void* arg) { Thread *self = dvmThreadSelf(); UNUSED_PARAMETER(arg); LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId); /* tell the main thread that we're ready */ lockMutex(&gDvm.heapWorkerLock); gDvm.heapWorkerReady = true; dvmSignalCond(&gDvm.heapWorkerCond); dvmUnlockMutex(&gDvm.heapWorkerLock); lockMutex(&gDvm.heapWorkerLock); while (!gDvm.haltHeapWorker) { struct timespec trimtime; bool timedwait = false; /* We're done running interpreted code for now. */ dvmChangeStatus(NULL, THREAD_VMWAIT); /* Signal anyone who wants to know when we're done. */ dvmBroadcastCond(&gDvm.heapWorkerIdleCond); /* Trim the heap if we were asked to. */ trimtime = gDvm.gcHeap->heapWorkerNextTrim; if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) { struct timespec now; #ifdef HAVE_TIMEDWAIT_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &now); // relative time #else struct timeval tvnow; gettimeofday(&tvnow, NULL); // absolute time now.tv_sec = tvnow.tv_sec; now.tv_nsec = tvnow.tv_usec * 1000; #endif if (trimtime.tv_sec < now.tv_sec || (trimtime.tv_sec == now.tv_sec && trimtime.tv_nsec <= now.tv_nsec)) { size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT]; /* * Acquire the gcHeapLock. The requires releasing the * heapWorkerLock before the gcHeapLock is acquired. * It is possible that the gcHeapLock may be acquired * during a concurrent GC in which case heapWorkerLock * is held by the GC and we are unable to make forward * progress. We avoid deadlock by releasing the * gcHeapLock and then waiting to be signaled when the * GC completes. There is no guarantee that the next * time we are run will coincide with GC inactivity so * the check and wait must be performed within a loop. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmLockMutex(&gDvm.heapWorkerLock); memset(madvisedSizes, 0, sizeof(madvisedSizes)); dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT); dvmUnlockHeap(); trimtime.tv_sec = 0; trimtime.tv_nsec = 0; gDvm.gcHeap->heapWorkerNextTrim = trimtime; } else { timedwait = true; } } /* sleep until signaled */ if (timedwait) { int cc __attribute__ ((__unused__)); #ifdef HAVE_TIMEDWAIT_MONOTONIC cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #else cc = pthread_cond_timedwait(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock, &trimtime); #endif assert(cc == 0 || cc == ETIMEDOUT); } else { dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock); } /* * Return to the running state before doing heap work. This * will block if the GC has initiated a suspend. We release * the heapWorkerLock beforehand for the GC to make progress * and wait to be signaled after the GC completes. There is * no guarantee that the next time we are run will coincide * with GC inactivity so the check and wait must be performed * within a loop. */ dvmUnlockMutex(&gDvm.heapWorkerLock); dvmChangeStatus(NULL, THREAD_RUNNING); dvmLockHeap(); while (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmLockMutex(&gDvm.heapWorkerLock); dvmUnlockHeap(); LOGV("HeapWorker is awake\n"); /* Process any events in the queue. */ doHeapWork(self); } dvmUnlockMutex(&gDvm.heapWorkerLock); if (gDvm.verboseShutdown) LOGD("HeapWorker thread shutting down\n"); return NULL; }
void dvmDdmSendHeapInfo(int reason, bool shouldLock) { struct timeval now; u8 nowMs; u1 *buf, *b; buf = (u1 *)malloc(HPIF_SIZE(1)); if (buf == NULL) { return; } b = buf; /* If there's a one-shot 'when', reset it. */ if (reason == gDvm.gcHeap->ddmHpifWhen) { if (shouldLock && ! dvmLockHeap()) { ALOGW("%s(): can't lock heap to clear when", __func__); goto skip_when; } if (reason == gDvm.gcHeap->ddmHpifWhen) { if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) { gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER; } } if (shouldLock) { dvmUnlockHeap(); } } skip_when: /* The current time, in milliseconds since 0:00 GMT, 1/1/70. */ if (gettimeofday(&now, NULL) < 0) { nowMs = 0; } else { nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000; } /* number of heaps */ set4BE(b, 1); b += 4; /* For each heap (of which there is one) */ { /* heap ID */ set4BE(b, DEFAULT_HEAP_ID); b += 4; /* timestamp */ set8BE(b, nowMs); b += 8; /* 'when' value */ *b++ = (u1)reason; /* max allowed heap size in bytes */ set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4; /* current heap size in bytes */ set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4; /* number of bytes allocated */ set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4; /* number of objects allocated */ set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4; } assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1)); dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf); }
void* fastiva_dvmMalloc(size_t size, int flags, ClassObject* clazz) { #endif void *ptr; dvmLockHeap(); /* Try as hard as possible to allocate some memory. */ ptr = tryMalloc(size); if (ptr != NULL) { /* We've got the memory. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.allocCount++; gDvm.allocProf.allocSize += size; if (self != NULL) { self->allocProf.allocCount++; self->allocProf.allocSize += size; } } } else { /* The allocation failed. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.failedAllocCount++; gDvm.allocProf.failedAllocSize += size; if (self != NULL) { self->allocProf.failedAllocCount++; self->allocProf.failedAllocSize += size; } } } #ifdef FASTIVA if (clazz != NULL && ptr != NULL) { // for suspend-by-signal it must be in HeapLocked scope. DVM_OBJECT_INIT((Object*)ptr, clazz); d2f_initObject((Object*)ptr, clazz); } #endif dvmUnlockHeap(); if (ptr != NULL) { /* * If caller hasn't asked us not to track it, add it to the * internal tracking list. */ if ((flags & ALLOC_DONT_TRACK) == 0) { dvmAddTrackedAlloc((Object*)ptr, NULL); } } else { /* * The allocation failed; throw an OutOfMemoryError. */ throwOOME(); } return ptr; }