/* All objects for stronger reference levels have been * marked before this is called. */ void dvmHeapHandleReferences(Object *refListHead, enum RefType refType) { Object *reference; GcMarkContext *markContext = &gDvm.gcHeap->markContext; const int offVmData = gDvm.offJavaLangRefReference_vmData; const int offReferent = gDvm.offJavaLangRefReference_referent; bool workRequired = false; size_t numCleared = 0; size_t numEnqueued = 0; reference = refListHead; while (reference != NULL) { Object *next; Object *referent; /* Pull the interesting fields out of the Reference object. */ next = dvmGetFieldObject(reference, offVmData); referent = dvmGetFieldObject(reference, offReferent); //TODO: when handling REF_PHANTOM, unlink any references // that fail this initial if(). We need to re-walk // the list, and it would be nice to avoid the extra // work. if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) { bool schedClear, schedEnqueue; /* This is the strongest reference that refers to referent. * Do the right thing. */ switch (refType) { case REF_SOFT: case REF_WEAK: schedClear = clearReference(reference); schedEnqueue = enqueueReference(reference); break; case REF_PHANTOM: /* PhantomReferences are not cleared automatically. * Until someone clears it (or the reference itself * is collected), the referent must remain alive. * * It's necessary to fully mark the referent because * it will still be present during the next GC, and * all objects that it points to must be valid. * (The referent will be marked outside of this loop, * after handing all references of this strength, in * case multiple references point to the same object.) */ schedClear = false; /* A PhantomReference is only useful with a * queue, but since it's possible to create one * without a queue, we need to check. */ schedEnqueue = enqueueReference(reference); break; default: assert(!"Bad reference type"); schedClear = false; schedEnqueue = false; break; } numCleared += schedClear ? 1 : 0; numEnqueued += schedEnqueue ? 1 : 0; if (schedClear || schedEnqueue) { uintptr_t workBits; /* Stuff the clear/enqueue bits in the bottom of * the pointer. Assumes that objects are 8-byte * aligned. * * Note that we are adding the *Reference* (which * is by definition already marked at this point) to * this list; we're not adding the referent (which * has already been cleared). */ assert(((intptr_t)reference & 3) == 0); assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0); workBits = (schedClear ? WORKER_CLEAR : 0) | (schedEnqueue ? WORKER_ENQUEUE : 0); if (!dvmHeapAddRefToLargeTable( &gDvm.gcHeap->referenceOperations, (Object *)((uintptr_t)reference | workBits))) { LOGE_HEAP("dvmMalloc(): no room for any more " "reference operations\n"); dvmAbort(); } workRequired = true; } if (refType != REF_PHANTOM) { /* Let later GCs know not to reschedule this reference. */ dvmSetFieldObject(reference, offVmData, SCHEDULED_REFERENCE_MAGIC); } // else this is handled later for REF_PHANTOM } // else there was a stronger reference to the referent. reference = next; } #define refType2str(r) \ ((r) == REF_SOFT ? "soft" : ( \ (r) == REF_WEAK ? "weak" : ( \ (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" ))) LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType)); /* Walk though the reference list again, and mark any non-clear/marked * referents. Only PhantomReferences can have non-clear referents * at this point. */ if (refType == REF_PHANTOM) { bool scanRequired = false; HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0); reference = refListHead; while (reference != NULL) { Object *next; Object *referent; /* Pull the interesting fields out of the Reference object. */ next = dvmGetFieldObject(reference, offVmData); referent = dvmGetFieldObject(reference, offReferent); if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) { markObjectNonNull(referent, markContext); scanRequired = true; /* Let later GCs know not to reschedule this reference. */ dvmSetFieldObject(reference, offVmData, SCHEDULED_REFERENCE_MAGIC); } reference = next; } HPROF_CLEAR_GC_SCAN_STATE(); if (scanRequired) { processMarkStack(markContext); } } if (workRequired) { dvmSignalHeapWorker(false); } }
/* Mark all objects that obj refers to. * * Called on every object in markList. */ static void scanObject(const Object *obj, GcMarkContext *ctx) { ClassObject *clazz; assert(dvmIsValidObject(obj)); LOGV_SCAN("0x%08x %s\n", (uint)obj, obj->clazz->name); #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofDumpHeapObject(gDvm.gcHeap->hprofContext, obj); } #endif /* Get and mark the class object for this particular instance. */ clazz = obj->clazz; if (clazz == NULL) { /* This can happen if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). The object * won't contain any references yet, so we can * just skip it. */ return; } else if (clazz == gDvm.unlinkedJavaLangClass) { /* This class hasn't been linked yet. We're guaranteed * that the object doesn't contain any references that * aren't already tracked, so we can skip scanning it. * * NOTE: unlinkedJavaLangClass is not on the heap, so * it's very important that we don't try marking it. */ return; } #if WITH_OBJECT_HEADERS gMarkParent = obj; if (ptr2chunk(obj)->scanGeneration == gGeneration) { LOGE("object 0x%08x was already scanned this generation\n", (uintptr_t)obj); dvmAbort(); } ptr2chunk(obj)->oldScanGeneration = ptr2chunk(obj)->scanGeneration; ptr2chunk(obj)->scanGeneration = gGeneration; ptr2chunk(obj)->scanCount++; #endif assert(dvmIsValidObject((Object *)clazz)); markObjectNonNull((Object *)clazz, ctx); /* Mark any references in this object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { /* It's an array object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { /* It's an array of object references. */ scanObjectArray((ArrayObject *)obj, ctx); } // else there's nothing else to scan } else { /* It's a DataObject-compatible object. */ scanInstanceFields((DataObject *)obj, clazz, ctx); if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) { GcHeap *gcHeap = gDvm.gcHeap; Object *referent; /* It's a subclass of java/lang/ref/Reference. * The fields in this class have been arranged * such that scanInstanceFields() did not actually * mark the "referent" field; we need to handle * it specially. * * If the referent already has a strong mark (isMarked(referent)), * we don't care about its reference status. */ referent = dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_referent); if (referent != NULL && !isMarked(ptr2chunk(referent), &gcHeap->markContext)) { u4 refFlags; if (gcHeap->markAllReferents) { LOG_REF("Hard-marking a reference\n"); /* Don't bother with normal reference-following * behavior, just mark the referent. This should * only be used when following objects that just * became scheduled for finalization. */ markObjectNonNull(referent, ctx); goto skip_reference; } /* See if this reference was handled by a previous GC. */ if (dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_vmData) == SCHEDULED_REFERENCE_MAGIC) { LOG_REF("Skipping scheduled reference\n"); /* Don't reschedule it, but make sure that its * referent doesn't get collected (in case it's * a PhantomReference and wasn't cleared automatically). */ //TODO: Mark these after handling all new refs of // this strength, in case the new refs refer // to the same referent. Not a very common // case, though. markObjectNonNull(referent, ctx); goto skip_reference; } /* Find out what kind of reference is pointing * to referent. */ refFlags = GET_CLASS_FLAG_GROUP(clazz, CLASS_ISREFERENCE | CLASS_ISWEAKREFERENCE | CLASS_ISPHANTOMREFERENCE); /* We use the vmData field of Reference objects * as a next pointer in a singly-linked list. * That way, we don't need to allocate any memory * while we're doing a GC. */ #define ADD_REF_TO_LIST(list, ref) \ do { \ Object *ARTL_ref_ = (/*de-const*/Object *)(ref); \ dvmSetFieldObject(ARTL_ref_, \ gDvm.offJavaLangRefReference_vmData, list); \ list = ARTL_ref_; \ } while (false) /* At this stage, we just keep track of all of * the live references that we've seen. Later, * we'll walk through each of these lists and * deal with the referents. */ if (refFlags == CLASS_ISREFERENCE) { /* It's a soft reference. Depending on the state, * we'll attempt to collect all of them, some of * them, or none of them. */ if (gcHeap->softReferenceCollectionState == SR_COLLECT_NONE) { sr_collect_none: markObjectNonNull(referent, ctx); } else if (gcHeap->softReferenceCollectionState == SR_COLLECT_ALL) { sr_collect_all: ADD_REF_TO_LIST(gcHeap->softReferences, obj); } else { /* We'll only try to collect half of the * referents. */ if (gcHeap->softReferenceColor++ & 1) { goto sr_collect_none; } goto sr_collect_all; } } else { /* It's a weak or phantom reference. * Clearing CLASS_ISREFERENCE will reveal which. */ refFlags &= ~CLASS_ISREFERENCE; if (refFlags == CLASS_ISWEAKREFERENCE) { ADD_REF_TO_LIST(gcHeap->weakReferences, obj); } else if (refFlags == CLASS_ISPHANTOMREFERENCE) { ADD_REF_TO_LIST(gcHeap->phantomReferences, obj); } else { assert(!"Unknown reference type"); } } #undef ADD_REF_TO_LIST } } skip_reference: /* If this is a class object, mark various other things that * its internals point to. * * All class objects are instances of java.lang.Class, * including the java.lang.Class class object. */ if (clazz == gDvm.classJavaLangClass) { scanClassObject((ClassObject *)obj, ctx); } } #if WITH_OBJECT_HEADERS gMarkParent = NULL; #endif }
static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx, bool checkFinger, bool forceStack) { DvmHeapChunk *hc; assert(obj != NULL); #if GC_DEBUG(GC_DEBUG_PARANOID) //TODO: make sure we're locked assert(obj != (Object *)gDvm.unlinkedJavaLangClass); assert(dvmIsValidObject(obj)); #endif hc = ptr2chunk(obj); if (!setAndReturnMarkBit(ctx, hc)) { /* This object was not previously marked. */ if (forceStack || (checkFinger && (void *)hc < ctx->finger)) { /* This object will need to go on the mark stack. */ MARK_STACK_PUSH(ctx->stack, obj); } #if WITH_OBJECT_HEADERS if (hc->scanGeneration != hc->markGeneration) { LOGE("markObject(0x%08x): wasn't scanned last time\n", (uint)obj); dvmAbort(); } if (hc->markGeneration == gGeneration) { LOGE("markObject(0x%08x): already marked this generation\n", (uint)obj); dvmAbort(); } hc->oldMarkGeneration = hc->markGeneration; hc->markGeneration = gGeneration; hc->markFingerOld = hc->markFinger; hc->markFinger = ctx->finger; if (gMarkParent != NULL) { hc->parentOld = hc->parent; hc->parent = gMarkParent; } else { hc->parent = (const Object *)((uintptr_t)hc->parent | 1); } hc->markCount++; #endif #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0); } #endif #if DVM_TRACK_HEAP_MARKING gDvm.gcHeap->markCount++; gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) + HEAP_SOURCE_CHUNK_OVERHEAD; #endif /* obj->clazz can be NULL if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). This is ok. */ LOGV_MARK("0x%08x %s\n", (uint)obj, obj->clazz == NULL ? "<null class>" : obj->clazz->name); } }
/* A function suitable for passing to dvmHashForeachRemove() * to clear out any unmarked objects. Clears the low bits * of the pointer because the intern table may set them. */ static int isUnmarkedObject(void *object) { return !isMarked(ptr2chunk((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)), &gDvm.gcHeap->markContext); }
/* Find unreachable objects that need to be finalized, * and schedule them for finalization. */ void dvmHeapScheduleFinalizations() { HeapRefTable newPendingRefs; LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs; Object **ref; Object **lastRef; size_t totalPendCount; GcMarkContext *markContext = &gDvm.gcHeap->markContext; /* * All reachable objects have been marked. * Any unmarked finalizable objects need to be finalized. */ /* Create a table that the new pending refs will * be added to. */ if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) { //TODO: mark all finalizable refs and hope that // we can schedule them next time. Watch out, // because we may be expecting to free up space // by calling finalizers. LOGE_GC("dvmHeapScheduleFinalizations(): no room for " "pending finalizations\n"); dvmAbort(); } /* Walk through finalizableRefs and move any unmarked references * to the list of new pending refs. */ totalPendCount = 0; while (finRefs != NULL) { Object **gapRef; size_t newPendCount = 0; gapRef = ref = finRefs->refs.table; lastRef = finRefs->refs.nextEntry; while (ref < lastRef) { DvmHeapChunk *hc; hc = ptr2chunk(*ref); if (!isMarked(hc, markContext)) { if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) { //TODO: add the current table and allocate // a new, smaller one. LOGE_GC("dvmHeapScheduleFinalizations(): " "no room for any more pending finalizations: %zd\n", dvmHeapNumHeapRefTableEntries(&newPendingRefs)); dvmAbort(); } newPendCount++; } else { /* This ref is marked, so will remain on finalizableRefs. */ if (newPendCount > 0) { /* Copy it up to fill the holes. */ *gapRef++ = *ref; } else { /* No holes yet; don't bother copying. */ gapRef++; } } ref++; } finRefs->refs.nextEntry = gapRef; //TODO: if the table is empty when we're done, free it. totalPendCount += newPendCount; finRefs = finRefs->next; } LOGD_GC("dvmHeapScheduleFinalizations(): %zd finalizers triggered.\n", totalPendCount); if (totalPendCount == 0) { /* No objects required finalization. * Free the empty temporary table. */ dvmClearReferenceTable(&newPendingRefs); return; } /* Add the new pending refs to the main list. */ if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs, &newPendingRefs)) { LOGE_GC("dvmHeapScheduleFinalizations(): can't insert new " "pending finalizations\n"); dvmAbort(); } //TODO: try compacting the main list with a memcpy loop /* Mark the refs we just moved; we don't want them or their * children to get swept yet. */ ref = newPendingRefs.table; lastRef = newPendingRefs.nextEntry; assert(ref < lastRef); HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0); while (ref < lastRef) { markObjectNonNull(*ref, markContext); ref++; } HPROF_CLEAR_GC_SCAN_STATE(); /* Set markAllReferents so that we don't collect referents whose * only references are in final-reachable objects. * TODO: eventually provide normal reference behavior by properly * marking these references. */ gDvm.gcHeap->markAllReferents = true; processMarkStack(markContext); gDvm.gcHeap->markAllReferents = false; dvmSignalHeapWorker(false); }
void hprofFillInStackTrace(void *objectPtr) { DvmHeapChunk *chunk; StackTraceEntry stackTraceEntry; Thread* self; void* fp; int i; if (objectPtr == NULL) { return; } self = dvmThreadSelf(); if (self == NULL) { return; } fp = self->curFrame; /* Serial number to be filled in later. */ stackTraceEntry.trace.serialNumber = -1; /* * TODO - The HAT tool doesn't care about thread data, so we can defer * actually emitting thread records and assigning thread serial numbers. */ stackTraceEntry.trace.threadSerialNumber = (int) self; memset(&stackTraceEntry.trace.frameIds, 0, sizeof(stackTraceEntry.trace.frameIds)); i = 0; while ((fp != NULL) && (i < STACK_DEPTH)) { const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp); const Method* method = saveArea->method; StackFrameEntry frame; if (!dvmIsBreakFrame(fp)) { frame.frame.method = method; if (dvmIsNativeMethod(method)) { frame.frame.pc = 0; /* no saved PC for native methods */ } else { assert(saveArea->xtra.currentPc >= method->insns && saveArea->xtra.currentPc < method->insns + dvmGetMethodInsnsSize(method)); frame.frame.pc = (int) (saveArea->xtra.currentPc - method->insns); } // Canonicalize the frame and cache it in the hprof context stackTraceEntry.trace.frameIds[i++] = hprofLookupStackFrameId(&frame); } assert(fp != saveArea->prevFrame); fp = saveArea->prevFrame; } /* Store the stack trace serial number in the object header */ chunk = ptr2chunk(objectPtr); chunk->stackTraceSerialNumber = hprofLookupStackSerialNumber(&stackTraceEntry); }