/* * Pop a frame we added. There should be one method frame and one break * frame. * * If JNI Push/PopLocalFrame calls were mismatched, we might end up * popping multiple method frames before we find the break. * * Returns "false" if there was no frame to pop. */ static bool dvmPopFrame(Thread* self) { StackSaveArea* saveBlock; if (self->interpSave.curFrame == NULL) return false; saveBlock = SAVEAREA_FROM_FP(self->interpSave.curFrame); assert(!dvmIsBreakFrame((u4*)self->interpSave.curFrame)); /* * Remove everything up to the break frame. If this was a call into * native code, pop the JNI local references table. */ while (saveBlock->prevFrame != NULL && saveBlock->method != NULL) { /* probably a native->native JNI call */ if (dvmIsNativeMethod(saveBlock->method)) { LOGVV("Popping JNI stack frame for %s.%s%s", saveBlock->method->clazz->descriptor, saveBlock->method->name, (SAVEAREA_FROM_FP(saveBlock->prevFrame)->method == NULL) ? "" : " (JNI local)"); dvmPopJniLocals(self, saveBlock); } saveBlock = SAVEAREA_FROM_FP(saveBlock->prevFrame); } if (saveBlock->method != NULL) { ALOGE("PopFrame missed the break"); assert(false); dvmAbort(); // stack trashed -- nowhere to go in this thread } LOGVV("POP frame: cur=%p new=%p", self->interpSave.curFrame, saveBlock->prevFrame); self->interpSave.curFrame = saveBlock->prevFrame; #ifdef WITH_OFFLOAD offStackFramePopped(self); self->breakFrames--; CHECK_BREAK_FRAMES(); #endif return true; }
/* * This is the constructor for a generated proxy object. */ static void proxyConstructor(const u4* args, JValue* pResult, const Method* method, Thread* self) { Object* obj = (Object*) args[0]; Object* handler = (Object*) args[1]; ClassObject* clazz = obj->clazz; int fieldOffset; fieldOffset = dvmFindFieldOffset(clazz, "h", "Ljava/lang/reflect/InvocationHandler;"); if (fieldOffset < 0) { LOGE("Unable to find 'h' in Proxy object\n"); //dvmDumpClass(clazz, kDumpClassFullDetail); dvmAbort(); // this should never happen } dvmSetFieldObject(obj, fieldOffset, handler); }
/* * Convert an array of char* into a String[]. * * Returns NULL on failure, with an exception raised. */ static ArrayObject* convertStringArray(char** strings, size_t count) { Thread* self = dvmThreadSelf(); /* * Allocate an array to hold the String objects. */ ClassObject* stringArrayClass = dvmFindArrayClass("[Ljava/lang/String;", NULL); if (stringArrayClass == NULL) { /* shouldn't happen */ LOGE("Unable to find [Ljava/lang/String;\n"); dvmAbort(); } ArrayObject* stringArray = dvmAllocArrayByClass(stringArrayClass, count, ALLOC_DEFAULT); if (stringArray == NULL) { /* probably OOM */ LOGD("Failed allocating array of %d strings\n", count); assert(dvmCheckException(self)); return NULL; } /* * Create the individual String objects and add them to the array. */ size_t i; for (i = 0; i < count; i++) { Object *str = (Object *)dvmCreateStringFromCstr(strings[i]); if (str == NULL) { /* probably OOM; drop out now */ assert(dvmCheckException(self)); dvmReleaseTrackedAlloc((Object*)stringArray, self); return NULL; } dvmSetObjectArrayElement(stringArray, i, str); /* stored in tracked array, okay to release */ dvmReleaseTrackedAlloc(str, self); } dvmReleaseTrackedAlloc((Object*)stringArray, self); return stringArray; }
/* * Open up the reserved area and throw an exception. The reserved area * should only be needed to create and initialize the exception itself. * * If we already opened it and we're continuing to overflow, abort the VM. * * We have to leave the "reserved" area open until the "catch" handler has * finished doing its processing. This is because the catch handler may * need to resolve classes, which requires calling into the class loader if * the classes aren't already in the "initiating loader" list. */ void dvmHandleStackOverflow(Thread* self, const Method* method) { /* * Can we make the reserved area available? */ if (self->stackOverflowed) { /* * Already did, nothing to do but bail. */ LOGE("DalvikVM: double-overflow of stack in threadid=%d; aborting\n", self->threadId); dvmDumpThread(self, false); dvmAbort(); } /* open it up to the full range */ LOGI("threadid=%d: stack overflow on call to %s.%s:%s\n", self->threadId, method->clazz->descriptor, method->name, method->shorty); StackSaveArea* saveArea = SAVEAREA_FROM_FP(self->curFrame); LOGI(" method requires %d+%d+%d=%d bytes, fp is %p (%d left)\n", method->registersSize * 4, sizeof(StackSaveArea), method->outsSize * 4, (method->registersSize + method->outsSize) * 4 + sizeof(StackSaveArea), saveArea, (u1*) saveArea - self->interpStackEnd); LOGI(" expanding stack end (%p to %p)\n", self->interpStackEnd, self->interpStackStart - self->interpStackSize); //dvmDumpThread(self, false); self->interpStackEnd = self->interpStackStart - self->interpStackSize; self->stackOverflowed = true; /* * If we were trying to throw an exception when the stack overflowed, * we will blow up when doing the class lookup on StackOverflowError * because of the pending exception. So, we clear it and make it * the cause of the SOE. */ Object* excep = dvmGetException(self); if (excep != NULL) { LOGW("Stack overflow while throwing exception\n"); dvmClearException(self); } dvmThrowChainedExceptionByClass(gDvm.classJavaLangStackOverflowError, NULL, excep); }
/* * Pop a frame we added. There should be one method frame and one break * frame. * * If JNI Push/PopLocalFrame calls were mismatched, we might end up * popping multiple method frames before we find the break. * * Returns "false" if there was no frame to pop. */ static bool dvmPopFrame(Thread* self) { StackSaveArea* saveBlock; if (self->curFrame == NULL) return false; saveBlock = SAVEAREA_FROM_FP(self->curFrame); assert(!dvmIsBreakFrame(self->curFrame)); /* * Remove everything up to the break frame. If this was a call into * native code, pop the JNI local references table. */ while (saveBlock->prevFrame != NULL && saveBlock->method != NULL) { /* probably a native->native JNI call */ if (dvmIsNativeMethod(saveBlock->method)) { LOGVV("Popping JNI stack frame for %s.%s%s\n", saveBlock->method->clazz->descriptor, saveBlock->method->name, (SAVEAREA_FROM_FP(saveBlock->prevFrame)->method == NULL) ? "" : " (JNI local)"); assert(saveBlock->xtra.localRefCookie != 0); //assert(saveBlock->xtra.localRefCookie >= self->jniLocalRefTable.table && // saveBlock->xtra.localRefCookie <=self->jniLocalRefTable.nextEntry); dvmPopJniLocals(self, saveBlock); } saveBlock = SAVEAREA_FROM_FP(saveBlock->prevFrame); } if (saveBlock->method != NULL) { LOGE("PopFrame missed the break\n"); assert(false); dvmAbort(); // stack trashed -- nowhere to go in this thread } LOGVV("POP frame: cur=%p new=%p\n", self->curFrame, saveBlock->prevFrame); self->curFrame = saveBlock->prevFrame; return true; }
/* * native public static long nativePreFork() */ static void Dalvik_dalvik_system_ZygoteHooks_preFork(const u4* args, JValue* pResult) { dvmDumpLoaderStats("zygote"); if (!gDvm.zygote) { dvmThrowIllegalStateException( "VM instance not started with -Xzygote"); RETURN_LONG(-1L); } if (!dvmGcPreZygoteFork()) { ALOGE("pre-fork heap failed"); dvmAbort(); } RETURN_LONG(0L); }
/* * Stop tracking an object. * * We allow attempts to delete NULL "obj" so that callers don't have to wrap * calls with "if != NULL". */ void dvmReleaseTrackedAlloc(Object* obj, Thread* self) { if (obj == NULL) return; if (self == NULL) self = dvmThreadSelf(); assert(self != NULL); //LOGI("TRACK REM %p (%s)\n", obj, // (obj->clazz != NULL) ? obj->clazz->name : ""); if (!dvmRemoveFromReferenceTable(&self->internalLocalRefTable, self->internalLocalRefTable.table, obj)) { LOGE("threadid=%d: failed to remove %p from internal ref table\n", self->threadId, obj); dvmAbort(); } }
/* * Get the SHA-1 signature. */ static const u1* getSignature(const ClassPathEntry* cpe) { DvmDex* pDvmDex; switch (cpe->kind) { case kCpeJar: pDvmDex = dvmGetJarFileDex((JarFile*) cpe->ptr); break; case kCpeDex: pDvmDex = dvmGetRawDexFileDex((RawDexFile*) cpe->ptr); break; default: LOGE("unexpected cpe kind %d\n", cpe->kind); dvmAbort(); pDvmDex = NULL; // make gcc happy } assert(pDvmDex != NULL); return pDvmDex->pDexFile->pHeader->signature; }
/* * native public static int nativePostForkChild(long token, int debug_flags), */ static void Dalvik_dalvik_system_ZygoteHooks_postForkChild( const u4* args, JValue* pResult) { /* * Our system thread ID has changed. Get the new one. */ Thread* thread = dvmThreadSelf(); thread->systemTid = dvmGetSysThreadId(); /* configure additional debug options */ enableDebugFeatures(args[1]); gDvm.zygote = false; if (!dvmInitAfterZygote()) { ALOGE("error in post-zygote initialization"); dvmAbort(); } RETURN_VOID(); }
/* * Create and initialize a monitor. */ Monitor* dvmCreateMonitor(Object* obj) { Monitor* mon; mon = (Monitor*) calloc(1, sizeof(Monitor)); if (mon == NULL) { ALOGE("Unable to allocate monitor"); dvmAbort(); } mon->obj = obj; dvmInitMutex(&mon->lock); /* replace the head of the list with the new monitor */ do { mon->next = gDvm.monitorList; } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon, (int32_t*)(void*)&gDvm.monitorList) != 0); return mon; }
// work-around to get a reference wrapper to an object so that it can be used // for certain calls to the JNI environment. almost verbatim copy from Jni.cpp static jobject dexspyAddLocalReference(::Thread* self, Object* obj) { if (obj == NULL) { return NULL; } IndirectRefTable* pRefTable = &self->jniLocalRefTable; void* curFrame = self->interpSave.curFrame; u4 cookie = SAVEAREA_FROM_FP(curFrame)->xtra.localRefCookie; jobject jobj = (jobject) pRefTable->add(cookie, obj); if (UNLIKELY(jobj == NULL)) { pRefTable->dump("JNI local"); ALOGE("Failed adding to JNI local ref table (has %zd entries)", pRefTable->capacity()); dvmDumpThread(self, false); dvmAbort(); // spec says call FatalError; this is equivalent } if (UNLIKELY(gDvmJni.workAroundAppJniBugs)) { // Hand out direct pointers to support broken old apps. return reinterpret_cast<jobject>(obj); } return jobj; }
/* * Reduce the available stack size. By this point we should have finished * our overflow processing. */ void dvmCleanupStackOverflow(Thread* self) { const u1* newStackEnd; assert(self->stackOverflowed); newStackEnd = (self->interpStackStart - self->interpStackSize) + STACK_OVERFLOW_RESERVE; if ((u1*)self->curFrame <= newStackEnd) { LOGE("Can't shrink stack: curFrame is in reserved area (%p %p)\n", self->interpStackEnd, self->curFrame); dvmDumpThread(self, false); dvmAbort(); } self->interpStackEnd = newStackEnd; self->stackOverflowed = false; LOGI("Shrank stack (to %p, curFrame is %p)\n", self->interpStackEnd, self->curFrame); }
/* * Perform a "reg cmp reg" operation and jump to the PCR region if condition * satisfies. */ static MipsLIR *genRegRegCheck(CompilationUnit *cUnit, MipsConditionCode cond, int reg1, int reg2, int dOffset, MipsLIR *pcrLabel) { MipsLIR *res = NULL; if (cond == kMipsCondGe) { /* signed >= case */ int tReg = dvmCompilerAllocTemp(cUnit); res = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2); MipsLIR *branch = opCompareBranch(cUnit, kMipsBeqz, tReg, -1); genCheckCommon(cUnit, dOffset, branch, pcrLabel); } else if (cond == kMipsCondCs) { /* unsigned >= case */ int tReg = dvmCompilerAllocTemp(cUnit); res = newLIR3(cUnit, kMipsSltu, tReg, reg1, reg2); MipsLIR *branch = opCompareBranch(cUnit, kMipsBeqz, tReg, -1); genCheckCommon(cUnit, dOffset, branch, pcrLabel); } else { ALOGE("Unexpected condition in genRegRegCheck: %d\n", (int) cond); dvmAbort(); } return res; }
/* Return TRUE if error happens */ static bool assembleInstructions(CompilationUnit *cUnit, intptr_t startAddr) { short *bufferAddr = (short *) cUnit->codeBuffer; ArmLIR *lir; for (lir = (ArmLIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) { if (lir->opCode < 0) { if ((lir->opCode == ARM_PSEUDO_ALIGN4) && /* 1 means padding is needed */ (lir->operands[0] == 1)) { *bufferAddr++ = PADDING_MOV_R0_R0; } continue; } if (lir->isNop) { continue; } if (lir->opCode == THUMB_LDR_PC_REL || lir->opCode == THUMB_ADD_PC_REL) { ArmLIR *lirTarget = (ArmLIR *) lir->generic.target; intptr_t pc = (lir->generic.offset + 4) & ~3; /* * Allow an offset (stored in operands[2] to be added to the * PC-relative target. Useful to get to a fixed field inside a * chaining cell. */ intptr_t target = lirTarget->generic.offset + lir->operands[2]; int delta = target - pc; if (delta & 0x3) { LOGE("PC-rel distance is not multiples of 4: %d\n", delta); dvmAbort(); } if (delta > 1023) { return true; } lir->operands[1] = delta >> 2; } else if (lir->opCode == THUMB2_CBNZ || lir->opCode == THUMB2_CBZ) {
/* * Allocate a new instance of the class String, performing first-use * initialization of the class if necessary. Upon success, the * returned value will have all its fields except hashCode already * filled in, including a reference to a newly-allocated char[] for * the contents, sized as given. Additionally, a reference to the * chars array is stored to the pChars pointer. Callers must * subsequently call dvmReleaseTrackedAlloc() on the result pointer. * This function returns NULL on failure. */ static StringObject* makeStringObject(u4 charsLength, ArrayObject** pChars) { /* * The String class should have already gotten found (but not * necessarily initialized) before making it here. We assert it * explicitly, since historically speaking, we have had bugs with * regard to when the class String gets set up. The assert helps * make any regressions easier to diagnose. */ assert(gDvm.classJavaLangString != NULL); if (!dvmIsClassInitialized(gDvm.classJavaLangString)) { /* Perform first-time use initialization of the class. */ if (!dvmInitClass(gDvm.classJavaLangString)) { LOGE("FATAL: Could not initialize class String"); dvmAbort(); } } Object* result = dvmAllocObject(gDvm.classJavaLangString, ALLOC_DEFAULT); if (result == NULL) { return NULL; } ArrayObject* chars = dvmAllocPrimitiveArray('C', charsLength, ALLOC_DEFAULT); if (chars == NULL) { dvmReleaseTrackedAlloc(result, NULL); return NULL; } dvmSetFieldInt(result, STRING_FIELDOFF_COUNT, charsLength); dvmSetFieldObject(result, STRING_FIELDOFF_VALUE, (Object*) chars); dvmReleaseTrackedAlloc((Object*) chars, NULL); /* Leave offset and hashCode set to zero. */ *pChars = chars; return (StringObject*) result; }
/* * Stop tracking an object. * * We allow attempts to delete NULL "obj" so that callers don't have to wrap * calls with "if != NULL". */ void dvmReleaseTrackedAlloc(Object* obj, Thread* self) { if (obj == NULL) return; if (self == NULL) self = dvmThreadSelf(); assert(self != NULL); pthread_mutex_lock(&gDvm.s_mtx); if(gDvm.freeObjHook) { gDvm.freeObjHook(obj, self); } pthread_mutex_unlock(&gDvm.s_mtx); if (!dvmRemoveFromReferenceTable(&self->internalLocalRefTable, self->internalLocalRefTable.table, obj)) { ALOGE("threadid=%d: failed to remove %p from internal ref table", self->threadId, obj); dvmAbort(); } }
/* * Utility function when we're evaluating alternative implementations. */ static void badMatch(StringObject* thisStrObj, StringObject* compStrObj, int expectResult, int newResult, const char* compareType) { ArrayObject* thisArray; ArrayObject* compArray; const char* thisStr; const char* compStr; int thisOffset, compOffset, thisCount, compCount; thisCount = dvmGetFieldInt((Object*) thisStrObj, STRING_FIELDOFF_COUNT); compCount = dvmGetFieldInt((Object*) compStrObj, STRING_FIELDOFF_COUNT); thisOffset = dvmGetFieldInt((Object*) thisStrObj, STRING_FIELDOFF_OFFSET); compOffset = dvmGetFieldInt((Object*) compStrObj, STRING_FIELDOFF_OFFSET); thisArray = (ArrayObject*) dvmGetFieldObject((Object*) thisStrObj, STRING_FIELDOFF_VALUE); compArray = (ArrayObject*) dvmGetFieldObject((Object*) compStrObj, STRING_FIELDOFF_VALUE); thisStr = dvmCreateCstrFromString(thisStrObj); compStr = dvmCreateCstrFromString(compStrObj); ALOGE("%s expected %d got %d", compareType, expectResult, newResult); ALOGE(" this (o=%d l=%d) '%s'", thisOffset, thisCount, thisStr); ALOGE(" comp (o=%d l=%d) '%s'", compOffset, compCount, compStr); dvmPrintHexDumpEx(ANDROID_LOG_INFO, LOG_TAG, ((const u2*) thisArray->contents) + thisOffset, thisCount*2, kHexDumpLocal); dvmPrintHexDumpEx(ANDROID_LOG_INFO, LOG_TAG, ((const u2*) compArray->contents) + compOffset, compCount*2, kHexDumpLocal); dvmAbort(); }
/* * Add given DexOrJar to the hash table of user-loaded dex files. */ static void addToDexFileTable(DexOrJar* pDexOrJar) { /* * Later on, we will receive this pointer as an argument and need * to find it in the hash table without knowing if it's valid or * not, which means we can't compute a hash value from anything * inside DexOrJar. We don't share DexOrJar structs when the same * file is opened multiple times, so we can just use the low 32 * bits of the pointer as the hash. */ u4 hash = (u4) pDexOrJar; void* result; dvmHashTableLock(gDvm.userDexFiles); result = dvmHashTableLookup(gDvm.userDexFiles, hash, pDexOrJar, hashcmpDexOrJar, true); dvmHashTableUnlock(gDvm.userDexFiles); if (result != pDexOrJar) { ALOGE("Pointer has already been added?"); dvmAbort(); } pDexOrJar->okayToFree = true; }
/* Used for normalized loop exit condition checks */ static Opcode negateOpcode(Opcode opcode) { switch (opcode) { /* reg/reg cmp */ case OP_IF_EQ: return OP_IF_NE; case OP_IF_NE: return OP_IF_EQ; case OP_IF_LT: return OP_IF_GE; case OP_IF_GE: return OP_IF_LT; case OP_IF_GT: return OP_IF_LE; case OP_IF_LE: return OP_IF_GT; /* reg/zero cmp */ case OP_IF_EQZ: return OP_IF_NEZ; case OP_IF_NEZ: return OP_IF_EQZ; case OP_IF_LTZ: return OP_IF_GEZ; case OP_IF_GEZ: return OP_IF_LTZ; case OP_IF_GTZ: return OP_IF_LEZ; case OP_IF_LEZ: return OP_IF_GTZ; default: LOGE("opcode %d cannot be negated", opcode); dvmAbort(); break; } return (Opcode)-1; // unreached }
/* Find unreachable objects that need to be finalized, * and schedule them for finalization. */ void dvmHeapScheduleFinalizations() { HeapRefTable newPendingRefs; LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs; Object **ref; Object **lastRef; size_t totalPendCount; GcMarkContext *markContext = &gDvm.gcHeap->markContext; /* * All reachable objects have been marked. * Any unmarked finalizable objects need to be finalized. */ /* Create a table that the new pending refs will * be added to. */ if (!dvmHeapInitHeapRefTable(&newPendingRefs, 128)) { //TODO: mark all finalizable refs and hope that // we can schedule them next time. Watch out, // because we may be expecting to free up space // by calling finalizers. LOGE_GC("dvmHeapScheduleFinalizations(): no room for " "pending finalizations\n"); dvmAbort(); } /* Walk through finalizableRefs and move any unmarked references * to the list of new pending refs. */ totalPendCount = 0; while (finRefs != NULL) { Object **gapRef; size_t newPendCount = 0; gapRef = ref = finRefs->refs.table; lastRef = finRefs->refs.nextEntry; while (ref < lastRef) { DvmHeapChunk *hc; hc = ptr2chunk(*ref); if (!isMarked(hc, markContext)) { if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) { //TODO: add the current table and allocate // a new, smaller one. LOGE_GC("dvmHeapScheduleFinalizations(): " "no room for any more pending finalizations: %zd\n", dvmHeapNumHeapRefTableEntries(&newPendingRefs)); dvmAbort(); } newPendCount++; } else { /* This ref is marked, so will remain on finalizableRefs. */ if (newPendCount > 0) { /* Copy it up to fill the holes. */ *gapRef++ = *ref; } else { /* No holes yet; don't bother copying. */ gapRef++; } } ref++; } finRefs->refs.nextEntry = gapRef; //TODO: if the table is empty when we're done, free it. totalPendCount += newPendCount; finRefs = finRefs->next; } LOGD_GC("dvmHeapScheduleFinalizations(): %zd finalizers triggered.\n", totalPendCount); if (totalPendCount == 0) { /* No objects required finalization. * Free the empty temporary table. */ dvmClearReferenceTable(&newPendingRefs); return; } /* Add the new pending refs to the main list. */ if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs, &newPendingRefs)) { LOGE_GC("dvmHeapScheduleFinalizations(): can't insert new " "pending finalizations\n"); dvmAbort(); } //TODO: try compacting the main list with a memcpy loop /* Mark the refs we just moved; we don't want them or their * children to get swept yet. */ ref = newPendingRefs.table; lastRef = newPendingRefs.nextEntry; assert(ref < lastRef); HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0); while (ref < lastRef) { markObjectNonNull(*ref, markContext); ref++; } HPROF_CLEAR_GC_SCAN_STATE(); /* Set markAllReferents so that we don't collect referents whose * only references are in final-reachable objects. * TODO: eventually provide normal reference behavior by properly * marking these references. */ gDvm.gcHeap->markAllReferents = true; processMarkStack(markContext); gDvm.gcHeap->markAllReferents = false; dvmSignalHeapWorker(false); }
/* * Dump the stack for the specified thread, which is still running. * * This is very dangerous, because stack frames are being pushed on and * popped off, and if the thread exits we'll be looking at freed memory. * The plan here is to take a snapshot of the stack and then dump that * to try to minimize the chances of catching it mid-update. This should * work reasonably well on a single-CPU system. * * There is a small chance that calling here will crash the VM. */ void dvmDumpRunningThreadStack(const DebugOutputTarget* target, Thread* thread) { StackSaveArea* saveArea; const u1* origStack; u1* stackCopy = NULL; int origSize, fpOffset; void* fp; int depthLimit = 200; if (thread == NULL || thread->interpSave.curFrame == NULL) { dvmPrintDebugMessage(target, "DumpRunning: Thread at %p has no curFrame (threadid=%d)\n", thread, (thread != NULL) ? thread->threadId : 0); return; } /* wait for a full quantum */ sched_yield(); /* copy the info we need, then the stack itself */ origSize = thread->interpStackSize; origStack = (const u1*) thread->interpStackStart - origSize; stackCopy = (u1*) malloc(origSize); fpOffset = (u1*) thread->interpSave.curFrame - origStack; memcpy(stackCopy, origStack, origSize); /* * Run through the stack and rewrite the "prev" pointers. */ //ALOGI("DR: fpOff=%d (from %p %p)",fpOffset, origStack, // thread->interpSave.curFrame); fp = stackCopy + fpOffset; while (true) { int prevOffset; if (depthLimit-- < 0) { /* we're probably screwed */ dvmPrintDebugMessage(target, "DumpRunning: depth limit hit\n"); dvmAbort(); } saveArea = SAVEAREA_FROM_FP(fp); if (saveArea->prevFrame == NULL) break; prevOffset = (u1*) saveArea->prevFrame - origStack; if (prevOffset < 0 || prevOffset > origSize) { dvmPrintDebugMessage(target, "DumpRunning: bad offset found: %d (from %p %p)\n", prevOffset, origStack, saveArea->prevFrame); saveArea->prevFrame = NULL; break; } saveArea->prevFrame = (u4*)(stackCopy + prevOffset); fp = saveArea->prevFrame; } /* * We still need to pass the Thread for some monitor wait stuff. */ dumpFrames(target, stackCopy + fpOffset, thread); free(stackCopy); }
/* Runs the main thread daemmon loop looking for incoming messages from its * parallel thread on what action it should take. */ static void* thread_daemon(void* pself) { Thread* self = (Thread*)pself; while(1) { u1 event = offReadU1(self); if(!gDvm.offConnected) { ALOGI("THREAD %d LOST CONNECTION", self->threadId); return NULL; } ALOGI("THREAD %d GOT EVENT %d", self->threadId, event); switch(event) { case OFF_ACTION_RESUME: { /* We got a resume message, drop back to our caller. */ return NULL; } break; case OFF_ACTION_LOCK: { offPerformLock(self); } break; case OFF_ACTION_NOTIFY: { offPerformNotify(self); } break; case OFF_ACTION_BROADCAST: { offPerformNotifyAll(self); } break; case OFF_ACTION_DEX_QUERYDEX: { offPerformQueryDex(self); } break; case OFF_ACTION_SYNC: { offSyncPull(); offWriteU1(self, 0); } break; case OFF_ACTION_INTERRUPT: { offPerformInterrupt(self); } break; case OFF_ACTION_TRIMGC: { dvmLockHeap(); self->offTrimSignaled = true; if (gDvm.gcHeap->gcRunning) { dvmWaitForConcurrentGcToComplete(); } dvmCollectGarbageInternal(GC_BEFORE_OOM); self->offTrimSignaled = false; dvmUnlockHeap(); } break; case OFF_ACTION_GRABVOL: { offPerformGrabVolatiles(self); } break; case OFF_ACTION_MIGRATE: { if(offPerformMigrate(self)) { return NULL; } } break; case OFF_ACTION_CLINIT: { offPerformClinit(self); } break; case OFF_ACTION_DEATH: { self->offFlagDeath = true; return NULL; } break; default: { ALOGE("Unknown action %d sent to thread %d", event, self->threadId); dvmAbort(); } } } }
/* * Initializes the heap source; must be called before any other * dvmHeapSource*() functions. Returns a GcHeap structure * allocated from the heap source. */ GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize, size_t growthLimit) { GcHeap *gcHeap = NULL; HeapSource *hs = NULL; mspace msp; size_t length; void *base; assert(gHs == NULL); if (!(startSize <= growthLimit && growthLimit <= maximumSize)) { ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)", startSize, maximumSize, growthLimit); return NULL; } /* * Allocate a contiguous region of virtual memory to subdivided * among the heaps managed by the garbage collector. */ length = ALIGN_UP_TO_PAGE_SIZE(maximumSize); base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap"); if (base == NULL) { dvmAbort(); } /* Create an unlocked dlmalloc mspace to use as * a heap source. */ msp = createMspace(base, kInitialMorecoreStart, startSize); if (msp == NULL) { dvmAbort(); } gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap)); if (gcHeap == NULL) { LOGE_HEAP("Can't allocate heap descriptor"); dvmAbort(); } hs = (HeapSource *)calloc(1, sizeof(*hs)); if (hs == NULL) { LOGE_HEAP("Can't allocate heap source"); dvmAbort(); } hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX; hs->minFree = gDvm.heapMinFree; hs->maxFree = gDvm.heapMaxFree; hs->startSize = startSize; hs->maximumSize = maximumSize; hs->growthLimit = growthLimit; hs->idealSize = startSize; hs->softLimit = SIZE_MAX; // no soft limit at first hs->numHeaps = 0; hs->sawZygote = gDvm.zygote; hs->nativeBytesAllocated = 0; hs->nativeFootprintGCWatermark = startSize; hs->nativeFootprintLimit = startSize * 2; hs->nativeNeedToRunFinalization = false; hs->hasGcThread = false; hs->heapBase = (char *)base; hs->heapLength = length; if (hs->maxFree > hs->maximumSize) { hs->maxFree = hs->maximumSize; } if (hs->minFree < CONCURRENT_START) { hs->minFree = CONCURRENT_START; } else if (hs->minFree > hs->maxFree) { hs->minFree = hs->maxFree; } if (!addInitialHeap(hs, msp, growthLimit)) { LOGE_HEAP("Can't add initial heap"); dvmAbort(); } if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) { LOGE_HEAP("Can't create liveBits"); dvmAbort(); } if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) { LOGE_HEAP("Can't create markBits"); dvmHeapBitmapDelete(&hs->liveBits); dvmAbort(); } if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) { ALOGE("Can't create markStack"); dvmHeapBitmapDelete(&hs->markBits); dvmHeapBitmapDelete(&hs->liveBits); dvmAbort(); } gcHeap->markContext.bitmap = &hs->markBits; gcHeap->heapSource = hs; gHs = hs; return gcHeap; }
/* * Initialize an hprof context struct. * * This will take ownership of "fileName". * * NOTE: ctx is expected to have been zeroed out prior to calling this * function. */ void hprofContextInit(hprof_context_t *ctx, char *fileName, int fd, bool writeHeader, bool directToDdms) { /* * Have to do this here, because it must happen after we * memset the struct (want to treat fileDataPtr/fileDataSize * as read-only while the file is open). */ FILE* fp = open_memstream(&ctx->fileDataPtr, &ctx->fileDataSize); if (fp == NULL) { /* not expected */ ALOGE("hprof: open_memstream failed: %s", strerror(errno)); dvmAbort(); } ctx->directToDdms = directToDdms; ctx->fileName = fileName; ctx->memFp = fp; ctx->fd = fd; ctx->curRec.allocLen = 128; ctx->curRec.body = (unsigned char *)malloc(ctx->curRec.allocLen); //xxx check for/return an error if (writeHeader) { char magic[] = HPROF_MAGIC_STRING; unsigned char buf[4]; struct timeval now; u8 nowMs; /* Write the file header. * * [u1]*: NUL-terminated magic string. */ fwrite(magic, 1, sizeof(magic), fp); /* u4: size of identifiers. We're using addresses * as IDs, so make sure a pointer fits. */ U4_TO_BUF_BE(buf, 0, sizeof(void *)); fwrite(buf, 1, sizeof(u4), fp); /* The current time, in milliseconds since 0:00 GMT, 1/1/70. */ if (gettimeofday(&now, NULL) < 0) { nowMs = 0; } else { nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000; } /* u4: high word of the 64-bit time. */ U4_TO_BUF_BE(buf, 0, (u4)(nowMs >> 32)); fwrite(buf, 1, sizeof(u4), fp); /* u4: low word of the 64-bit time. */ U4_TO_BUF_BE(buf, 0, (u4)(nowMs & 0xffffffffULL)); fwrite(buf, 1, sizeof(u4), fp); //xxx fix the time } }
/* * Initiate garbage collection. * * NOTES: * - If we don't hold gDvm.threadListLock, it's possible for a thread to * be added to the thread list while we work. The thread should NOT * start executing, so this is only interesting when we start chasing * thread stacks. (Before we do so, grab the lock.) * * We are not allowed to GC when the debugger has suspended the VM, which * is awkward because debugger requests can cause allocations. The easiest * way to enforce this is to refuse to GC on an allocation made by the * JDWP thread -- we have to expand the heap or fail. */ void dvmCollectGarbageInternal(const GcSpec* spec) { GcHeap *gcHeap = gDvm.gcHeap; u4 gcEnd = 0; u4 rootStart = 0 , rootEnd = 0; u4 dirtyStart = 0, dirtyEnd = 0; size_t numObjectsFreed, numBytesFreed; size_t currAllocated, currFootprint; size_t percentFree; int oldThreadPriority = INT_MAX; /* The heap lock must be held. */ if (gcHeap->gcRunning) { LOGW_HEAP("Attempted recursive GC"); return; } gcHeap->gcRunning = true; rootStart = dvmGetRelativeTimeMsec(); dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * If we are not marking concurrently raise the priority of the * thread performing the garbage collection. */ if (!spec->isConcurrent) { oldThreadPriority = os_raiseThreadPriority(); } if (gDvm.preVerify) { LOGV_HEAP("Verifying roots and heap before GC"); verifyRootsAndHeap(); } dvmMethodTraceGCBegin(); /* Set up the marking context. */ if (!dvmHeapBeginMarkStep(spec->isPartial)) { LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting"); dvmAbort(); } /* Mark the set of objects that are strongly reachable from the roots. */ LOGD_HEAP("Marking..."); dvmHeapMarkRootSet(); /* dvmHeapScanMarkedObjects() will build the lists of known * instances of the Reference classes. */ assert(gcHeap->softReferences == NULL); assert(gcHeap->weakReferences == NULL); assert(gcHeap->finalizerReferences == NULL); assert(gcHeap->phantomReferences == NULL); assert(gcHeap->clearedReferences == NULL); if (spec->isConcurrent) { /* * Resume threads while tracing from the roots. We unlock the * heap to allow mutator threads to allocate from free space. */ dvmClearCardTable(); dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); rootEnd = dvmGetRelativeTimeMsec(); } /* Recursively mark any objects that marked objects point to strongly. * If we're not collecting soft references, soft-reachable * objects will also be marked. */ LOGD_HEAP("Recursing..."); dvmHeapScanMarkedObjects(); if (spec->isConcurrent) { /* * Re-acquire the heap lock and perform the final thread * suspension. */ dirtyStart = dvmGetRelativeTimeMsec(); dvmLockHeap(); dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * As no barrier intercepts root updates, we conservatively * assume all roots may be gray and re-mark them. */ dvmHeapReMarkRootSet(); /* * With the exception of reference objects and weak interned * strings, all gray objects should now be on dirty cards. */ if (gDvm.verifyCardTable) { dvmVerifyCardTable(); } /* * Recursively mark gray objects pointed to by the roots or by * heap objects dirtied during the concurrent mark. */ dvmHeapReScanMarkedObjects(); } /* * All strongly-reachable objects have now been marked. Process * weakly-reachable objects discovered while tracing. */ dvmHeapProcessReferences(&gcHeap->softReferences, spec->doPreserve == false, &gcHeap->weakReferences, &gcHeap->finalizerReferences, &gcHeap->phantomReferences); #if defined(WITH_JIT) /* * Patching a chaining cell is very cheap as it only updates 4 words. It's * the overhead of stopping all threads and synchronizing the I/D cache * that makes it expensive. * * Therefore we batch those work orders in a queue and go through them * when threads are suspended for GC. */ dvmCompilerPerformSafePointChecks(); #endif LOGD_HEAP("Sweeping..."); dvmHeapSweepSystemWeaks(); /* * Live objects have a bit set in the mark bitmap, swap the mark * and live bitmaps. The sweep can proceed concurrently viewing * the new live bitmap as the old mark bitmap, and vice versa. */ dvmHeapSourceSwapBitmaps(); if (gDvm.postVerify) { LOGV_HEAP("Verifying roots and heap after GC"); verifyRootsAndHeap(); } if (spec->isConcurrent) { dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); dirtyEnd = dvmGetRelativeTimeMsec(); } dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent, &numObjectsFreed, &numBytesFreed); LOGD_HEAP("Cleaning up..."); dvmHeapFinishMarkStep(); if (spec->isConcurrent) { dvmLockHeap(); } LOGD_HEAP("Done."); /* Now's a good time to adjust the heap size, since * we know what our utilization is. * * This doesn't actually resize any memory; * it just lets the heap grow more when necessary. */ dvmHeapSourceGrowForUtilization(); currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0); currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0); dvmMethodTraceGCEnd(); LOGV_HEAP("GC finished"); gcHeap->gcRunning = false; LOGV_HEAP("Resuming threads"); if (spec->isConcurrent) { /* * Wake-up any threads that blocked after a failed allocation * request. */ dvmBroadcastCond(&gDvm.gcHeapCond); } if (!spec->isConcurrent) { dvmResumeAllThreads(SUSPEND_FOR_GC); dirtyEnd = dvmGetRelativeTimeMsec(); /* * Restore the original thread scheduling priority if it was * changed at the start of the current garbage collection. */ if (oldThreadPriority != INT_MAX) { os_lowerThreadPriority(oldThreadPriority); } } /* * Move queue of pending references back into Java. */ dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences); gcEnd = dvmGetRelativeTimeMsec(); percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint); if (!spec->isConcurrent) { u4 markSweepTime = dirtyEnd - rootStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, markSweepTime, gcTime); } else { u4 rootTime = rootEnd - rootStart; u4 dirtyTime = dirtyEnd - dirtyStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, rootTime, dirtyTime, gcTime); } if (gcHeap->ddmHpifWhen != 0) { LOGD_HEAP("Sending VM heap info to DDM"); dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false); } if (gcHeap->ddmHpsgWhen != 0) { LOGD_HEAP("Dumping VM heap to DDM"); dvmDdmSendHeapSegments(false, false); } if (gcHeap->ddmNhsgWhen != 0) { LOGD_HEAP("Dumping native heap to DDM"); dvmDdmSendHeapSegments(false, true); } }
static bool sweepBitmapCallback(size_t numPtrs, void **ptrs, const void *finger, void *arg) { const ClassObject *const classJavaLangClass = gDvm.classJavaLangClass; size_t i; for (i = 0; i < numPtrs; i++) { DvmHeapChunk *hc; Object *obj; /* The pointers we're getting back are DvmHeapChunks, not * Objects. */ hc = (DvmHeapChunk *)*ptrs++; obj = (Object *)chunk2ptr(hc); #if WITH_OBJECT_HEADERS if (hc->markGeneration == gGeneration) { LOGE("sweeping marked object: 0x%08x\n", (uint)obj); dvmAbort(); } #endif /* Free the monitor associated with the object. */ dvmFreeObjectMonitor(obj); /* NOTE: Dereferencing clazz is dangerous. If obj was the last * one to reference its class object, the class object could be * on the sweep list, and could already have been swept, leaving * us with a stale pointer. */ LOGV_SWEEP("FREE: 0x%08x %s\n", (uint)obj, obj->clazz->name); /* This assumes that java.lang.Class will never go away. * If it can, and we were the last reference to it, it * could have already been swept. However, even in that case, * gDvm.classJavaLangClass should still have a useful * value. */ if (obj->clazz == classJavaLangClass) { LOGV_SWEEP("---------------> %s\n", ((ClassObject *)obj)->name); /* dvmFreeClassInnards() may have already been called, * but it's safe to call on the same ClassObject twice. */ dvmFreeClassInnards((ClassObject *)obj); } #if 0 /* Overwrite the to-be-freed object to make stale references * more obvious. */ { int chunklen; ClassObject *clazz = obj->clazz; #if WITH_OBJECT_HEADERS DvmHeapChunk chunk = *hc; chunk.header = ~OBJECT_HEADER | 1; #endif chunklen = dvmHeapSourceChunkSize(hc); memset(hc, 0xa5, chunklen); obj->clazz = (ClassObject *)((uintptr_t)clazz ^ 0xffffffff); #if WITH_OBJECT_HEADERS *hc = chunk; #endif } #endif //TODO: provide a heapsource function that takes a list of pointers to free // and call it outside of this loop. dvmHeapSourceFree(hc); } return true; }
static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx, bool checkFinger, bool forceStack) { DvmHeapChunk *hc; assert(obj != NULL); #if GC_DEBUG(GC_DEBUG_PARANOID) //TODO: make sure we're locked assert(obj != (Object *)gDvm.unlinkedJavaLangClass); assert(dvmIsValidObject(obj)); #endif hc = ptr2chunk(obj); if (!setAndReturnMarkBit(ctx, hc)) { /* This object was not previously marked. */ if (forceStack || (checkFinger && (void *)hc < ctx->finger)) { /* This object will need to go on the mark stack. */ MARK_STACK_PUSH(ctx->stack, obj); } #if WITH_OBJECT_HEADERS if (hc->scanGeneration != hc->markGeneration) { LOGE("markObject(0x%08x): wasn't scanned last time\n", (uint)obj); dvmAbort(); } if (hc->markGeneration == gGeneration) { LOGE("markObject(0x%08x): already marked this generation\n", (uint)obj); dvmAbort(); } hc->oldMarkGeneration = hc->markGeneration; hc->markGeneration = gGeneration; hc->markFingerOld = hc->markFinger; hc->markFinger = ctx->finger; if (gMarkParent != NULL) { hc->parentOld = hc->parent; hc->parent = gMarkParent; } else { hc->parent = (const Object *)((uintptr_t)hc->parent | 1); } hc->markCount++; #endif #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0); } #endif #if DVM_TRACK_HEAP_MARKING gDvm.gcHeap->markCount++; gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) + HEAP_SOURCE_CHUNK_OVERHEAD; #endif /* obj->clazz can be NULL if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). This is ok. */ LOGV_MARK("0x%08x %s\n", (uint)obj, obj->clazz == NULL ? "<null class>" : obj->clazz->name); } }
/* * Convert primitive, boxed data from "srcPtr" to "dstPtr". * * Section v2 2.6 lists the various conversions and promotions. We * allow the "widening" and "identity" conversions, but don't allow the * "narrowing" conversions. * * Allowed: * byte to short, int, long, float, double * short to int, long, float double * char to int, long, float, double * int to long, float, double * long to float, double * float to double * Values of types byte, char, and short are "internally" widened to int. * * Returns the width in 32-bit words of the destination primitive, or * -1 if the conversion is not allowed. * * TODO? use JValue rather than u4 pointers */ int dvmConvertPrimitiveValue(PrimitiveType srcType, PrimitiveType dstType, const s4* srcPtr, s4* dstPtr) { enum Conversion { OK4, OK8, ItoJ, ItoD, JtoD, FtoD, ItoF, JtoF, bad }; enum Conversion conv; #ifdef ARCH_HAVE_ALIGNED_DOUBLES double ret; #endif assert((srcType != PRIM_VOID) && (srcType != PRIM_NOT)); assert((dstType != PRIM_VOID) && (dstType != PRIM_NOT)); switch (dstType) { case PRIM_BOOLEAN: case PRIM_CHAR: case PRIM_BYTE: { conv = (srcType == dstType) ? OK4 : bad; break; } case PRIM_SHORT: { switch (srcType) { case PRIM_BYTE: case PRIM_SHORT: conv = OK4; break; default: conv = bad; break; } break; } case PRIM_INT: { switch (srcType) { case PRIM_BYTE: case PRIM_CHAR: case PRIM_SHORT: case PRIM_INT: conv = OK4; break; default: conv = bad; break; } break; } case PRIM_LONG: { switch (srcType) { case PRIM_BYTE: case PRIM_CHAR: case PRIM_SHORT: case PRIM_INT: conv = ItoJ; break; case PRIM_LONG: conv = OK8; break; default: conv = bad; break; } break; } case PRIM_FLOAT: { switch (srcType) { case PRIM_BYTE: case PRIM_CHAR: case PRIM_SHORT: case PRIM_INT: conv = ItoF; break; case PRIM_LONG: conv = JtoF; break; case PRIM_FLOAT: conv = OK4; break; default: conv = bad; break; } break; } case PRIM_DOUBLE: { switch (srcType) { case PRIM_BYTE: case PRIM_CHAR: case PRIM_SHORT: case PRIM_INT: conv = ItoD; break; case PRIM_LONG: conv = JtoD; break; case PRIM_FLOAT: conv = FtoD; break; case PRIM_DOUBLE: conv = OK8; break; default: conv = bad; break; } break; } case PRIM_VOID: case PRIM_NOT: default: { conv = bad; break; } } switch (conv) { case OK4: *dstPtr = *srcPtr; return 1; case OK8: *(s8*) dstPtr = *(s8*)srcPtr; return 2; case ItoJ: *(s8*) dstPtr = (s8) (*(s4*) srcPtr); return 2; #ifndef ARCH_HAVE_ALIGNED_DOUBLES case ItoD: *(double*) dstPtr = (double) (*(s4*) srcPtr); return 2; case JtoD: *(double*) dstPtr = (double) (*(long long*) srcPtr); return 2; case FtoD: *(double*) dstPtr = (double) (*(float*) srcPtr); return 2; #else case ItoD: ret = (double) (*(s4*) srcPtr); memcpy(dstPtr, &ret, 8); return 2; case JtoD: ret = (double) (*(long long*) srcPtr); memcpy(dstPtr, &ret, 8); return 2; case FtoD: ret = (double) (*(float*) srcPtr); memcpy(dstPtr, &ret, 8); return 2; #endif case ItoF: *(float*) dstPtr = (float) (*(int*) srcPtr); return 1; case JtoF: *(float*) dstPtr = (float) (*(long long*) srcPtr); return 1; case bad: { ALOGV("illegal primitive conversion: '%s' to '%s'", dexGetPrimitiveTypeDescriptor(srcType), dexGetPrimitiveTypeDescriptor(dstType)); return -1; } default: { dvmAbort(); return -1; // Keep the compiler happy. } } }
/* All objects for stronger reference levels have been * marked before this is called. */ void dvmHeapHandleReferences(Object *refListHead, enum RefType refType) { Object *reference; GcMarkContext *markContext = &gDvm.gcHeap->markContext; const int offVmData = gDvm.offJavaLangRefReference_vmData; const int offReferent = gDvm.offJavaLangRefReference_referent; bool workRequired = false; size_t numCleared = 0; size_t numEnqueued = 0; reference = refListHead; while (reference != NULL) { Object *next; Object *referent; /* Pull the interesting fields out of the Reference object. */ next = dvmGetFieldObject(reference, offVmData); referent = dvmGetFieldObject(reference, offReferent); //TODO: when handling REF_PHANTOM, unlink any references // that fail this initial if(). We need to re-walk // the list, and it would be nice to avoid the extra // work. if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) { bool schedClear, schedEnqueue; /* This is the strongest reference that refers to referent. * Do the right thing. */ switch (refType) { case REF_SOFT: case REF_WEAK: schedClear = clearReference(reference); schedEnqueue = enqueueReference(reference); break; case REF_PHANTOM: /* PhantomReferences are not cleared automatically. * Until someone clears it (or the reference itself * is collected), the referent must remain alive. * * It's necessary to fully mark the referent because * it will still be present during the next GC, and * all objects that it points to must be valid. * (The referent will be marked outside of this loop, * after handing all references of this strength, in * case multiple references point to the same object.) */ schedClear = false; /* A PhantomReference is only useful with a * queue, but since it's possible to create one * without a queue, we need to check. */ schedEnqueue = enqueueReference(reference); break; default: assert(!"Bad reference type"); schedClear = false; schedEnqueue = false; break; } numCleared += schedClear ? 1 : 0; numEnqueued += schedEnqueue ? 1 : 0; if (schedClear || schedEnqueue) { uintptr_t workBits; /* Stuff the clear/enqueue bits in the bottom of * the pointer. Assumes that objects are 8-byte * aligned. * * Note that we are adding the *Reference* (which * is by definition already marked at this point) to * this list; we're not adding the referent (which * has already been cleared). */ assert(((intptr_t)reference & 3) == 0); assert(((WORKER_CLEAR | WORKER_ENQUEUE) & ~3) == 0); workBits = (schedClear ? WORKER_CLEAR : 0) | (schedEnqueue ? WORKER_ENQUEUE : 0); if (!dvmHeapAddRefToLargeTable( &gDvm.gcHeap->referenceOperations, (Object *)((uintptr_t)reference | workBits))) { LOGE_HEAP("dvmMalloc(): no room for any more " "reference operations\n"); dvmAbort(); } workRequired = true; } if (refType != REF_PHANTOM) { /* Let later GCs know not to reschedule this reference. */ dvmSetFieldObject(reference, offVmData, SCHEDULED_REFERENCE_MAGIC); } // else this is handled later for REF_PHANTOM } // else there was a stronger reference to the referent. reference = next; } #define refType2str(r) \ ((r) == REF_SOFT ? "soft" : ( \ (r) == REF_WEAK ? "weak" : ( \ (r) == REF_PHANTOM ? "phantom" : "UNKNOWN" ))) LOGD_HEAP("dvmHeapHandleReferences(): cleared %zd, enqueued %zd %s references\n", numCleared, numEnqueued, refType2str(refType)); /* Walk though the reference list again, and mark any non-clear/marked * referents. Only PhantomReferences can have non-clear referents * at this point. */ if (refType == REF_PHANTOM) { bool scanRequired = false; HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_REFERENCE_CLEANUP, 0); reference = refListHead; while (reference != NULL) { Object *next; Object *referent; /* Pull the interesting fields out of the Reference object. */ next = dvmGetFieldObject(reference, offVmData); referent = dvmGetFieldObject(reference, offReferent); if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) { markObjectNonNull(referent, markContext); scanRequired = true; /* Let later GCs know not to reschedule this reference. */ dvmSetFieldObject(reference, offVmData, SCHEDULED_REFERENCE_MAGIC); } reference = next; } HPROF_CLEAR_GC_SCAN_STATE(); if (scanRequired) { processMarkStack(markContext); } } if (workRequired) { dvmSignalHeapWorker(false); } }
/* Mark all objects that obj refers to. * * Called on every object in markList. */ static void scanObject(const Object *obj, GcMarkContext *ctx) { ClassObject *clazz; assert(dvmIsValidObject(obj)); LOGV_SCAN("0x%08x %s\n", (uint)obj, obj->clazz->name); #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofDumpHeapObject(gDvm.gcHeap->hprofContext, obj); } #endif /* Get and mark the class object for this particular instance. */ clazz = obj->clazz; if (clazz == NULL) { /* This can happen if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). The object * won't contain any references yet, so we can * just skip it. */ return; } else if (clazz == gDvm.unlinkedJavaLangClass) { /* This class hasn't been linked yet. We're guaranteed * that the object doesn't contain any references that * aren't already tracked, so we can skip scanning it. * * NOTE: unlinkedJavaLangClass is not on the heap, so * it's very important that we don't try marking it. */ return; } #if WITH_OBJECT_HEADERS gMarkParent = obj; if (ptr2chunk(obj)->scanGeneration == gGeneration) { LOGE("object 0x%08x was already scanned this generation\n", (uintptr_t)obj); dvmAbort(); } ptr2chunk(obj)->oldScanGeneration = ptr2chunk(obj)->scanGeneration; ptr2chunk(obj)->scanGeneration = gGeneration; ptr2chunk(obj)->scanCount++; #endif assert(dvmIsValidObject((Object *)clazz)); markObjectNonNull((Object *)clazz, ctx); /* Mark any references in this object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { /* It's an array object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { /* It's an array of object references. */ scanObjectArray((ArrayObject *)obj, ctx); } // else there's nothing else to scan } else { /* It's a DataObject-compatible object. */ scanInstanceFields((DataObject *)obj, clazz, ctx); if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) { GcHeap *gcHeap = gDvm.gcHeap; Object *referent; /* It's a subclass of java/lang/ref/Reference. * The fields in this class have been arranged * such that scanInstanceFields() did not actually * mark the "referent" field; we need to handle * it specially. * * If the referent already has a strong mark (isMarked(referent)), * we don't care about its reference status. */ referent = dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_referent); if (referent != NULL && !isMarked(ptr2chunk(referent), &gcHeap->markContext)) { u4 refFlags; if (gcHeap->markAllReferents) { LOG_REF("Hard-marking a reference\n"); /* Don't bother with normal reference-following * behavior, just mark the referent. This should * only be used when following objects that just * became scheduled for finalization. */ markObjectNonNull(referent, ctx); goto skip_reference; } /* See if this reference was handled by a previous GC. */ if (dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_vmData) == SCHEDULED_REFERENCE_MAGIC) { LOG_REF("Skipping scheduled reference\n"); /* Don't reschedule it, but make sure that its * referent doesn't get collected (in case it's * a PhantomReference and wasn't cleared automatically). */ //TODO: Mark these after handling all new refs of // this strength, in case the new refs refer // to the same referent. Not a very common // case, though. markObjectNonNull(referent, ctx); goto skip_reference; } /* Find out what kind of reference is pointing * to referent. */ refFlags = GET_CLASS_FLAG_GROUP(clazz, CLASS_ISREFERENCE | CLASS_ISWEAKREFERENCE | CLASS_ISPHANTOMREFERENCE); /* We use the vmData field of Reference objects * as a next pointer in a singly-linked list. * That way, we don't need to allocate any memory * while we're doing a GC. */ #define ADD_REF_TO_LIST(list, ref) \ do { \ Object *ARTL_ref_ = (/*de-const*/Object *)(ref); \ dvmSetFieldObject(ARTL_ref_, \ gDvm.offJavaLangRefReference_vmData, list); \ list = ARTL_ref_; \ } while (false) /* At this stage, we just keep track of all of * the live references that we've seen. Later, * we'll walk through each of these lists and * deal with the referents. */ if (refFlags == CLASS_ISREFERENCE) { /* It's a soft reference. Depending on the state, * we'll attempt to collect all of them, some of * them, or none of them. */ if (gcHeap->softReferenceCollectionState == SR_COLLECT_NONE) { sr_collect_none: markObjectNonNull(referent, ctx); } else if (gcHeap->softReferenceCollectionState == SR_COLLECT_ALL) { sr_collect_all: ADD_REF_TO_LIST(gcHeap->softReferences, obj); } else { /* We'll only try to collect half of the * referents. */ if (gcHeap->softReferenceColor++ & 1) { goto sr_collect_none; } goto sr_collect_all; } } else { /* It's a weak or phantom reference. * Clearing CLASS_ISREFERENCE will reveal which. */ refFlags &= ~CLASS_ISREFERENCE; if (refFlags == CLASS_ISWEAKREFERENCE) { ADD_REF_TO_LIST(gcHeap->weakReferences, obj); } else if (refFlags == CLASS_ISPHANTOMREFERENCE) { ADD_REF_TO_LIST(gcHeap->phantomReferences, obj); } else { assert(!"Unknown reference type"); } } #undef ADD_REF_TO_LIST } } skip_reference: /* If this is a class object, mark various other things that * its internals point to. * * All class objects are instances of java.lang.Class, * including the java.lang.Class class object. */ if (clazz == gDvm.classJavaLangClass) { scanClassObject((ClassObject *)obj, ctx); } } #if WITH_OBJECT_HEADERS gMarkParent = NULL; #endif }