static void markObjectNonNull(const Object *obj, GcMarkContext *ctx, bool checkFinger) { assert(ctx != NULL); assert(obj != NULL); assert(dvmIsValidObject(obj)); if (obj < (Object *)ctx->immuneLimit) { assert(isMarked(obj, ctx)); return; } if (!setAndReturnMarkBit(ctx, obj)) { /* This object was not previously marked. */ if (checkFinger && (void *)obj < ctx->finger) { /* This object will need to go on the mark stack. */ MARK_STACK_PUSH(ctx->stack, obj); } #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0); } #endif } }
/* Process all enqueued heap work, including finalizers and reference * enqueueing. Clearing has already been done by the VM. * * Caller must hold gDvm.heapWorkerLock. */ static void doHeapWork(Thread *self) { Object *obj; HeapWorkerOperation op; int numFinalizersCalled, numReferencesEnqueued; assert(gDvm.voffJavaLangObject_finalize >= 0); assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL); numFinalizersCalled = 0; numReferencesEnqueued = 0; while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) { Method *method = NULL; /* Make sure the object hasn't been collected since * being scheduled. */ assert(dvmIsValidObject(obj)); /* Call the appropriate method(s). */ if (op == WORKER_FINALIZE) { numFinalizersCalled++; method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize]; assert(dvmCompareNameDescriptorAndMethod("finalize", "()V", method) == 0); assert(method->clazz != gDvm.classJavaLangObject); callMethod(self, obj, method); } else { assert(op == WORKER_ENQUEUE); assert(dvmGetFieldObject( obj, gDvm.offJavaLangRefReference_queue) != NULL); assert(dvmGetFieldObject( obj, gDvm.offJavaLangRefReference_queueNext) == NULL); numReferencesEnqueued++; callMethod(self, obj, gDvm.methJavaLangRefReference_enqueueInternal); } /* Let the GC collect the object. */ dvmReleaseTrackedAlloc(obj, self); } LOGV("Called %d finalizers\n", numFinalizersCalled); LOGV("Enqueued %d references\n", numReferencesEnqueued); }
/* * Create a copy of an object, for Object.clone(). * * We use the size actually allocated, rather than obj->clazz->objectSize, * because the latter doesn't work for array objects. */ Object* dvmCloneObject(Object* obj) { Object* copy; int size; int flags; assert(dvmIsValidObject(obj)); /* Class.java shouldn't let us get here (java.lang.Class is final * and does not implement Clonable), but make extra sure. * A memcpy() clone will wreak havoc on a ClassObject's "innards". */ assert(obj->clazz != gDvm.classJavaLangClass); if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) flags = ALLOC_DEFAULT | ALLOC_FINALIZABLE; else flags = ALLOC_DEFAULT; //TODO: use clazz->objectSize for non-arrays size = dvmObjectSizeInHeap(obj); copy = dvmMalloc(size, flags); if (copy == NULL) return NULL; #if WITH_HPROF && WITH_HPROF_STACK hprofFillInStackTrace(copy); dvmTrackAllocation(obj->clazz, size); #endif memcpy(copy, obj, size); DVM_LOCK_INIT(©->lock); Monitor* mon = NULL;//dvmCreateMonitor(copy); copy->lock = (u4)mon | LW_SHAPE_FAT; //LOGV("CloneObject: %p->%p %s (%d)\n", obj, copy, obj->clazz->name, size); // TODO: deal with reference classes /* don't call dvmReleaseTrackedAlloc -- the caller must do that */ return copy; }
/* * Create a copy of an object, for Object.clone(). * * We use the size actually allocated, rather than obj->clazz->objectSize, * because the latter doesn't work for array objects. */ Object* dvmCloneObject(Object* obj, int flags) { assert(dvmIsValidObject(obj)); ClassObject* clazz = obj->clazz; /* Class.java shouldn't let us get here (java.lang.Class is final * and does not implement Clonable), but make extra sure. * A memcpy() clone will wreak havoc on a ClassObject's "innards". */ assert(!dvmIsTheClassClass(clazz)); size_t size; if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { size = dvmArrayObjectSize((ArrayObject *)obj); } else { size = clazz->objectSize; } Object* copy = (Object*)dvmMalloc(size, flags); copy->tag = 0; if (copy == NULL) return NULL; DVM_OBJECT_INIT(copy, clazz); size_t offset = sizeof(Object); /* Copy instance data. We assume memcpy copies by words. */ memcpy((char*)copy + offset, (char*)obj + offset, size - offset); /* Mark the clone as finalizable if appropriate. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISFINALIZABLE)) { dvmSetFinalizable(copy); } dvmTrackAllocation(clazz, size); /* notify DDMS */ pthread_mutex_lock(&gDvm.s_mtx); if(gDvm.newObjHook){ gDvm.newObjHook(copy); } pthread_mutex_unlock(&gDvm.s_mtx); return copy; }
static void markObjectNonNull(const Object *obj, GcMarkContext *ctx, bool checkFinger) { assert(ctx != NULL); assert(obj != NULL); assert(dvmIsValidObject(obj)); if (obj < (Object *)ctx->immuneLimit) { assert(isMarked(obj, ctx)); return; } if (!setAndReturnMarkBit(ctx, obj)) { /* This object was not previously marked. */ if (checkFinger && (void *)obj < ctx->finger) { /* This object will need to go on the mark stack. */ markStackPush(&ctx->stack, obj); } } }
/* * Check to see if "obj" is NULL. If so, throw an exception. Assumes the * pc has already been exported to the stack. * * Perform additional checks on debug builds. * * Use this to check for NULL when the instruction handler calls into * something that could throw an exception (so we have already called * EXPORT_PC at the top). */ static inline bool checkForNull(Object* obj) { if (obj == NULL) { dvmThrowException("Ljava/lang/NullPointerException;", NULL); return false; } #ifdef WITH_EXTRA_OBJECT_VALIDATION if (!dvmIsValidObject(obj)) { LOGE("Invalid object %p\n", obj); dvmAbort(); } #endif #ifndef NDEBUG if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { /* probable heap corruption */ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj); dvmAbort(); } #endif return true; }
/* * Create a copy of an object, for Object.clone(). * * We use the size actually allocated, rather than obj->clazz->objectSize, * because the latter doesn't work for array objects. */ Object* dvmCloneObject(Object* obj) { Object* copy; int size; int flags; assert(dvmIsValidObject(obj)); /* Class.java shouldn't let us get here (java.lang.Class is final * and does not implement Clonable), but make extra sure. * A memcpy() clone will wreak havoc on a ClassObject's "innards". */ assert(obj->clazz != gDvm.classJavaLangClass); if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) flags = ALLOC_DEFAULT | ALLOC_FINALIZABLE; else flags = ALLOC_DEFAULT; if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) { size = dvmArrayObjectSize((ArrayObject *)obj); } else { size = obj->clazz->objectSize; } copy = dvmMalloc(size, flags); if (copy == NULL) return NULL; #if WITH_HPROF && WITH_HPROF_STACK hprofFillInStackTrace(copy); dvmTrackAllocation(obj->clazz, size); #endif memcpy(copy, obj, size); DVM_LOCK_INIT(©->lock); dvmWriteBarrierObject(copy); return copy; }
/* * Add "obj" to "pRef". */ bool dvmAddToReferenceTable(ReferenceTable* pRef, Object* obj) { assert(dvmIsValidObject(obj)); assert(obj != NULL); assert(pRef->table != NULL); assert(pRef->allocEntries <= pRef->maxEntries); if (pRef->nextEntry == pRef->table + pRef->allocEntries) { /* reached end of allocated space; did we hit buffer max? */ if (pRef->nextEntry == pRef->table + pRef->maxEntries) { LOGW("ReferenceTable overflow (max=%d)\n", pRef->maxEntries); return false; } Object** newTable; int newSize; newSize = pRef->allocEntries * 2; if (newSize > pRef->maxEntries) newSize = pRef->maxEntries; assert(newSize > pRef->allocEntries); newTable = (Object**) realloc(pRef->table, newSize * sizeof(Object*)); if (newTable == NULL) { LOGE("Unable to expand ref table (from %d to %d %d-byte entries)\n", pRef->allocEntries, newSize, sizeof(Object*)); return false; } LOGVV("Growing %p from %d to %d\n", pRef, pRef->allocEntries, newSize); /* update entries; adjust "nextEntry" in case memory moved */ pRef->nextEntry = newTable + (pRef->nextEntry - pRef->table); pRef->table = newTable; pRef->allocEntries = newSize; } *pRef->nextEntry++ = obj; return true; }
static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx, bool checkFinger, bool forceStack) { DvmHeapChunk *hc; assert(obj != NULL); #if GC_DEBUG(GC_DEBUG_PARANOID) //TODO: make sure we're locked assert(obj != (Object *)gDvm.unlinkedJavaLangClass); assert(dvmIsValidObject(obj)); #endif hc = ptr2chunk(obj); if (!setAndReturnMarkBit(ctx, hc)) { /* This object was not previously marked. */ if (forceStack || (checkFinger && (void *)hc < ctx->finger)) { /* This object will need to go on the mark stack. */ MARK_STACK_PUSH(ctx->stack, obj); } #if WITH_OBJECT_HEADERS if (hc->scanGeneration != hc->markGeneration) { LOGE("markObject(0x%08x): wasn't scanned last time\n", (uint)obj); dvmAbort(); } if (hc->markGeneration == gGeneration) { LOGE("markObject(0x%08x): already marked this generation\n", (uint)obj); dvmAbort(); } hc->oldMarkGeneration = hc->markGeneration; hc->markGeneration = gGeneration; hc->markFingerOld = hc->markFinger; hc->markFinger = ctx->finger; if (gMarkParent != NULL) { hc->parentOld = hc->parent; hc->parent = gMarkParent; } else { hc->parent = (const Object *)((uintptr_t)hc->parent | 1); } hc->markCount++; #endif #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofMarkRootObject(gDvm.gcHeap->hprofContext, obj, 0); } #endif #if DVM_TRACK_HEAP_MARKING gDvm.gcHeap->markCount++; gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) + HEAP_SOURCE_CHUNK_OVERHEAD; #endif /* obj->clazz can be NULL if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). This is ok. */ LOGV_MARK("0x%08x %s\n", (uint)obj, obj->clazz == NULL ? "<null class>" : obj->clazz->name); } }
static void heap_chunk_callback(const void *chunkptr, size_t chunklen, const void *userptr, size_t userlen, void *arg) { HeapChunkContext *ctx = (HeapChunkContext *)arg; u1 state; UNUSED_PARAMETER(userlen); assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0); /* Make sure there's enough room left in the buffer. * We need to use two bytes for every fractional 256 * allocation units used by the chunk. */ { size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2); size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf); if (bytesLeft < needed) { flush_hpsg_chunk(ctx); } bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf); if (bytesLeft < needed) { LOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)\n", chunklen, needed); return; } } //TODO: notice when there's a gap and start a new heap, or at least a new range. if (ctx->needHeader) { /* * Start a new HPSx chunk. */ /* [u4]: heap ID */ set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4; /* [u1]: size of allocation unit, in bytes */ *ctx->p++ = 8; /* [u4]: virtual address of segment start */ set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4; /* [u4]: offset of this piece (relative to the virtual address) */ set4BE(ctx->p, 0); ctx->p += 4; /* [u4]: length of piece, in allocation units * We won't know this until we're done, so save the offset * and stuff in a dummy value. */ ctx->pieceLenField = ctx->p; set4BE(ctx->p, 0x55555555); ctx->p += 4; ctx->needHeader = false; } /* Determine the type of this chunk. */ if (userptr == NULL) { /* It's a free chunk. */ state = HPSG_STATE(SOLIDITY_FREE, 0); } else { const DvmHeapChunk *hc = (const DvmHeapChunk *)userptr; const Object *obj = chunk2ptr(hc); /* If we're looking at the native heap, we'll just return * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks */ bool native = ctx->type == CHUNK_TYPE("NHSG"); /* It's an allocated chunk. Figure out what it is. */ //TODO: if ctx.merge, see if this chunk is different from the last chunk. // If it's the same, we should combine them. if (!native && dvmIsValidObject(obj)) { ClassObject *clazz = obj->clazz; if (clazz == NULL) { /* The object was probably just created * but hasn't been initialized yet. */ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } else if (clazz == gDvm.unlinkedJavaLangClass || clazz == gDvm.classJavaLangClass) { state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); } else { switch (clazz->elementClass->primitiveType) { case PRIM_BOOLEAN: case PRIM_BYTE: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); break; case PRIM_CHAR: case PRIM_SHORT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); break; case PRIM_INT: case PRIM_FLOAT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); break; case PRIM_DOUBLE: case PRIM_LONG: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); break; default: assert(!"Unknown GC heap object type"); state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); break; } } } else { state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } } else { obj = NULL; // it's not actually an object state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } } /* Write out the chunk description. */ chunklen /= ALLOCATION_UNIT_SIZE; // convert to allocation units ctx->totalAllocationUnits += chunklen; while (chunklen > 256) { *ctx->p++ = state | HPSG_PARTIAL; *ctx->p++ = 255; // length - 1 chunklen -= 256; } *ctx->p++ = state; *ctx->p++ = chunklen - 1; }
/* * Update the debugger on interesting events, such as hitting a breakpoint * or a single-step point. This is called from the top of the interpreter * loop, before the current instruction is processed. * * Set "methodEntry" if we've just entered the method. This detects * method exit by checking to see if the next instruction is "return". * * This can't catch native method entry/exit, so we have to handle that * at the point of invocation. We also need to catch it in dvmCallMethod * if we want to capture native->native calls made through JNI. * * Notes to self: * - Don't want to switch to VMWAIT while posting events to the debugger. * Let the debugger code decide if we need to change state. * - We may want to check for debugger-induced thread suspensions on * every instruction. That would make a "suspend all" more responsive * and reduce the chances of multiple simultaneous events occurring. * However, it could change the behavior some. * * TODO: method entry/exit events are probably less common than location * breakpoints. We may be able to speed things up a bit if we don't query * the event list unless we know there's at least one lurking within. */ static void updateDebugger(const Method* method, const u2* pc, const u4* fp, bool methodEntry, Thread* self) { int eventFlags = 0; /* * Update xtra.currentPc on every instruction. We need to do this if * there's a chance that we could get suspended. This can happen if * eventFlags != 0 here, or somebody manually requests a suspend * (which gets handled at PERIOD_CHECKS time). One place where this * needs to be correct is in dvmAddSingleStep(). */ EXPORT_PC(); if (methodEntry) eventFlags |= DBG_METHOD_ENTRY; /* * See if we have a breakpoint here. * * Depending on the "mods" associated with event(s) on this address, * we may or may not actually send a message to the debugger. */ #ifdef WITH_DEBUGGER if (INST_INST(*pc) == OP_BREAKPOINT) { LOGV("+++ breakpoint hit at %p\n", pc); eventFlags |= DBG_BREAKPOINT; } #endif /* * If the debugger is single-stepping one of our threads, check to * see if we're that thread and we've reached a step point. */ const StepControl* pCtrl = &gDvm.stepControl; if (pCtrl->active && pCtrl->thread == self) { int line, frameDepth; bool doStop = false; const char* msg = NULL; assert(!dvmIsNativeMethod(method)); if (pCtrl->depth == SD_INTO) { /* * Step into method calls. We break when the line number * or method pointer changes. If we're in SS_MIN mode, we * always stop. */ if (pCtrl->method != method) { doStop = true; msg = "new method"; } else if (pCtrl->size == SS_MIN) { doStop = true; msg = "new instruction"; } else if (!dvmAddressSetGet( pCtrl->pAddressSet, pc - method->insns)) { doStop = true; msg = "new line"; } } else if (pCtrl->depth == SD_OVER) { /* * Step over method calls. We break when the line number is * different and the frame depth is <= the original frame * depth. (We can't just compare on the method, because we * might get unrolled past it by an exception, and it's tricky * to identify recursion.) */ frameDepth = dvmComputeVagueFrameDepth(self, fp); if (frameDepth < pCtrl->frameDepth) { /* popped up one or more frames, always trigger */ doStop = true; msg = "method pop"; } else if (frameDepth == pCtrl->frameDepth) { /* same depth, see if we moved */ if (pCtrl->size == SS_MIN) { doStop = true; msg = "new instruction"; } else if (!dvmAddressSetGet(pCtrl->pAddressSet, pc - method->insns)) { doStop = true; msg = "new line"; } } } else { assert(pCtrl->depth == SD_OUT); /* * Return from the current method. We break when the frame * depth pops up. * * This differs from the "method exit" break in that it stops * with the PC at the next instruction in the returned-to * function, rather than the end of the returning function. */ frameDepth = dvmComputeVagueFrameDepth(self, fp); if (frameDepth < pCtrl->frameDepth) { doStop = true; msg = "method pop"; } } if (doStop) { LOGV("#####S %s\n", msg); eventFlags |= DBG_SINGLE_STEP; } } /* * Check to see if this is a "return" instruction. JDWP says we should * send the event *after* the code has been executed, but it also says * the location we provide is the last instruction. Since the "return" * instruction has no interesting side effects, we should be safe. * (We can't just move this down to the returnFromMethod label because * we potentially need to combine it with other events.) * * We're also not supposed to generate a method exit event if the method * terminates "with a thrown exception". */ u2 inst = INST_INST(FETCH(0)); if (inst == OP_RETURN_VOID || inst == OP_RETURN || inst == OP_RETURN_WIDE || inst == OP_RETURN_OBJECT) { eventFlags |= DBG_METHOD_EXIT; } /* * If there's something interesting going on, see if it matches one * of the debugger filters. */ if (eventFlags != 0) { Object* thisPtr = dvmGetThisPtr(method, fp); if (thisPtr != NULL && !dvmIsValidObject(thisPtr)) { /* * TODO: remove this check if we're confident that the "this" * pointer is where it should be -- slows us down, especially * during single-step. */ char* desc = dexProtoCopyMethodDescriptor(&method->prototype); LOGE("HEY: invalid 'this' ptr %p (%s.%s %s)\n", thisPtr, method->clazz->descriptor, method->name, desc); free(desc); dvmAbort(); } dvmDbgPostLocationEvent(method, pc - method->insns, thisPtr, eventFlags); } }
/* * Visits all stack slots except those belonging to native method * arguments. */ static void visitThreadStack(RootVisitor *visitor, Thread *thread, void *arg) { assert(visitor != NULL); assert(thread != NULL); u4 threadId = thread->threadId; const StackSaveArea *saveArea; for (u4 *fp = (u4 *)thread->interpSave.curFrame; fp != NULL; fp = (u4 *)saveArea->prevFrame) { Method *method; saveArea = SAVEAREA_FROM_FP(fp); method = (Method *)saveArea->method; if (method != NULL && !dvmIsNativeMethod(method)) { #ifdef FASTIVA // @zee do not call any malloc in gc task. // cf) dvmGetExpandedRegisterMap() const RegisterMap* pMap = NULL; #else const RegisterMap* pMap = dvmGetExpandedRegisterMap(method); #endif const u1* regVector = NULL; #ifndef FASTIVA if (pMap != NULL) { /* found map, get registers for this address */ int addr = saveArea->xtra.currentPc - method->insns; regVector = dvmRegisterMapGetLine(pMap, addr); } #endif if (regVector == NULL) { /* * Either there was no register map or there is no * info for the current PC. Perform a conservative * scan. */ for (size_t i = 0; i < method->registersSize; ++i) { if (dvmIsValidObject((Object *)fp[i])) { (*visitor)(&fp[i], threadId, ROOT_JAVA_FRAME, arg); } } } else { /* * Precise scan. v0 is at the lowest address on the * interpreted stack, and is the first bit in the * register vector, so we can walk through the * register map and memory in the same direction. * * A '1' bit indicates a live reference. */ u2 bits = 1 << 1; for (size_t i = 0; i < method->registersSize; ++i) { bits >>= 1; if (bits == 1) { /* set bit 9 so we can tell when we're empty */ bits = *regVector++ | 0x0100; } if ((bits & 0x1) != 0) { /* * Register is marked as live, it's a valid root. */ #if WITH_EXTRA_GC_CHECKS if (fp[i] != 0 && !dvmIsValidObject((Object *)fp[i])) { /* this is very bad */ ALOGE("PGC: invalid ref in reg %d: %#x", method->registersSize - 1 - i, fp[i]); ALOGE("PGC: %s.%s addr %#x", method->clazz->descriptor, method->name, saveArea->xtra.currentPc - method->insns); continue; } #endif (*visitor)(&fp[i], threadId, ROOT_JAVA_FRAME, arg); } } dvmReleaseRegisterMapLine(pMap, regVector); } } /* * Don't fall into an infinite loop if things get corrupted. */ assert((uintptr_t)saveArea->prevFrame > (uintptr_t)fp || saveArea->prevFrame == NULL); } #ifdef FASTIVA int* stack_bottom = (int*)thread->m_pNativeStackBottom; int* stack_top = (int*)thread->m_pNativeStackPointer; const bool DUMP_STACK = 0; if (DUMP_STACK) { ALOGE("##### scan_stack %i %p~%p", thread->systemTid, stack_top, stack_bottom); } assert(thread->status != THREAD_RUNNING || thread == dvmThreadSelf()); while (stack_top < stack_bottom) { if (dvmIsValidObject((Object*)stack_top[0])) { (*visitor)(stack_top, threadId, ROOT_JAVA_FRAME, arg); } stack_top ++; } #endif }
/* * Extract the object that is the target of a monitor-enter instruction * in the top stack frame of "thread". * * The other thread might be alive, so this has to work carefully. * * We assume the thread list lock is currently held. * * Returns "true" if we successfully recover the object. "*pOwner" will * be NULL if we can't determine the owner for some reason (e.g. race * condition on ownership transfer). */ static bool extractMonitorEnterObject(Thread* thread, Object** pLockObj, Thread** pOwner) { void* framePtr = thread->curFrame; if (framePtr == NULL || dvmIsBreakFrame(framePtr)) return false; const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr); const Method* method = saveArea->method; const u2* currentPc = saveArea->xtra.currentPc; /* check Method* */ if (!dvmLinearAllocContains(method, sizeof(Method))) { LOGD("ExtrMon: method %p not valid\n", method); return false; } /* check currentPc */ u4 insnsSize = dvmGetMethodInsnsSize(method); if (currentPc < method->insns || currentPc >= method->insns + insnsSize) { LOGD("ExtrMon: insns %p not valid (%p - %p)\n", currentPc, method->insns, method->insns + insnsSize); return false; } /* check the instruction */ if ((*currentPc & 0xff) != OP_MONITOR_ENTER) { LOGD("ExtrMon: insn at %p is not monitor-enter (0x%02x)\n", currentPc, *currentPc & 0xff); return false; } /* get and check the register index */ unsigned int reg = *currentPc >> 8; if (reg >= method->registersSize) { LOGD("ExtrMon: invalid register %d (max %d)\n", reg, method->registersSize); return false; } /* get and check the object in that register */ u4* fp = (u4*) framePtr; Object* obj = (Object*) fp[reg]; if (!dvmIsValidObject(obj)) { LOGD("ExtrMon: invalid object %p at %p[%d]\n", obj, fp, reg); return false; } *pLockObj = obj; /* * Try to determine the object's lock holder; it's okay if this fails. * * We're assuming the thread list lock is already held by this thread. * If it's not, we may be living dangerously if we have to scan through * the thread list to find a match. (The VM will generally be in a * suspended state when executing here, so this is a minor concern * unless we're dumping while threads are running, in which case there's * a good chance of stuff blowing up anyway.) */ *pOwner = dvmGetObjectLockHolder(obj); return true; }
/* * Visits all stack slots. TODO: visit native methods. */ static void visitThreadStack(Visitor *visitor, Thread *thread, void *arg) { const StackSaveArea *saveArea; u4 *framePtr; assert(visitor != NULL); assert(thread != NULL); framePtr = (u4 *)thread->curFrame; for (; framePtr != NULL; framePtr = saveArea->prevFrame) { Method *method; saveArea = SAVEAREA_FROM_FP(framePtr); method = (Method *)saveArea->method; if (method != NULL && !dvmIsNativeMethod(method)) { const RegisterMap* pMap = dvmGetExpandedRegisterMap(method); const u1* regVector = NULL; size_t i; if (pMap != NULL) { /* found map, get registers for this address */ int addr = saveArea->xtra.currentPc - method->insns; regVector = dvmRegisterMapGetLine(pMap, addr); } if (regVector == NULL) { /* * Either there was no register map or there is no * info for the current PC. Perform a conservative * scan. */ for (i = 0; i < method->registersSize; ++i) { if (dvmIsValidObject((Object *)framePtr[i])) { (*visitor)(&framePtr[i], arg); } } } else { /* * Precise scan. v0 is at the lowest address on the * interpreted stack, and is the first bit in the * register vector, so we can walk through the * register map and memory in the same direction. * * A '1' bit indicates a live reference. */ u2 bits = 1 << 1; for (i = 0; i < method->registersSize; ++i) { bits >>= 1; if (bits == 1) { /* set bit 9 so we can tell when we're empty */ bits = *regVector++ | 0x0100; } if ((bits & 0x1) != 0) { /* * Register is marked as live, it's a valid root. */ (*visitor)(&framePtr[i], arg); } } dvmReleaseRegisterMapLine(pMap, regVector); } } /* * Don't fall into an infinite loop if things get corrupted. */ assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr || saveArea->prevFrame == NULL); } }
/* Mark all objects that obj refers to. * * Called on every object in markList. */ static void scanObject(const Object *obj, GcMarkContext *ctx) { ClassObject *clazz; assert(dvmIsValidObject(obj)); LOGV_SCAN("0x%08x %s\n", (uint)obj, obj->clazz->name); #if WITH_HPROF if (gDvm.gcHeap->hprofContext != NULL) { hprofDumpHeapObject(gDvm.gcHeap->hprofContext, obj); } #endif /* Get and mark the class object for this particular instance. */ clazz = obj->clazz; if (clazz == NULL) { /* This can happen if we catch an object between * dvmMalloc() and DVM_OBJECT_INIT(). The object * won't contain any references yet, so we can * just skip it. */ return; } else if (clazz == gDvm.unlinkedJavaLangClass) { /* This class hasn't been linked yet. We're guaranteed * that the object doesn't contain any references that * aren't already tracked, so we can skip scanning it. * * NOTE: unlinkedJavaLangClass is not on the heap, so * it's very important that we don't try marking it. */ return; } #if WITH_OBJECT_HEADERS gMarkParent = obj; if (ptr2chunk(obj)->scanGeneration == gGeneration) { LOGE("object 0x%08x was already scanned this generation\n", (uintptr_t)obj); dvmAbort(); } ptr2chunk(obj)->oldScanGeneration = ptr2chunk(obj)->scanGeneration; ptr2chunk(obj)->scanGeneration = gGeneration; ptr2chunk(obj)->scanCount++; #endif assert(dvmIsValidObject((Object *)clazz)); markObjectNonNull((Object *)clazz, ctx); /* Mark any references in this object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { /* It's an array object. */ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { /* It's an array of object references. */ scanObjectArray((ArrayObject *)obj, ctx); } // else there's nothing else to scan } else { /* It's a DataObject-compatible object. */ scanInstanceFields((DataObject *)obj, clazz, ctx); if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) { GcHeap *gcHeap = gDvm.gcHeap; Object *referent; /* It's a subclass of java/lang/ref/Reference. * The fields in this class have been arranged * such that scanInstanceFields() did not actually * mark the "referent" field; we need to handle * it specially. * * If the referent already has a strong mark (isMarked(referent)), * we don't care about its reference status. */ referent = dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_referent); if (referent != NULL && !isMarked(ptr2chunk(referent), &gcHeap->markContext)) { u4 refFlags; if (gcHeap->markAllReferents) { LOG_REF("Hard-marking a reference\n"); /* Don't bother with normal reference-following * behavior, just mark the referent. This should * only be used when following objects that just * became scheduled for finalization. */ markObjectNonNull(referent, ctx); goto skip_reference; } /* See if this reference was handled by a previous GC. */ if (dvmGetFieldObject(obj, gDvm.offJavaLangRefReference_vmData) == SCHEDULED_REFERENCE_MAGIC) { LOG_REF("Skipping scheduled reference\n"); /* Don't reschedule it, but make sure that its * referent doesn't get collected (in case it's * a PhantomReference and wasn't cleared automatically). */ //TODO: Mark these after handling all new refs of // this strength, in case the new refs refer // to the same referent. Not a very common // case, though. markObjectNonNull(referent, ctx); goto skip_reference; } /* Find out what kind of reference is pointing * to referent. */ refFlags = GET_CLASS_FLAG_GROUP(clazz, CLASS_ISREFERENCE | CLASS_ISWEAKREFERENCE | CLASS_ISPHANTOMREFERENCE); /* We use the vmData field of Reference objects * as a next pointer in a singly-linked list. * That way, we don't need to allocate any memory * while we're doing a GC. */ #define ADD_REF_TO_LIST(list, ref) \ do { \ Object *ARTL_ref_ = (/*de-const*/Object *)(ref); \ dvmSetFieldObject(ARTL_ref_, \ gDvm.offJavaLangRefReference_vmData, list); \ list = ARTL_ref_; \ } while (false) /* At this stage, we just keep track of all of * the live references that we've seen. Later, * we'll walk through each of these lists and * deal with the referents. */ if (refFlags == CLASS_ISREFERENCE) { /* It's a soft reference. Depending on the state, * we'll attempt to collect all of them, some of * them, or none of them. */ if (gcHeap->softReferenceCollectionState == SR_COLLECT_NONE) { sr_collect_none: markObjectNonNull(referent, ctx); } else if (gcHeap->softReferenceCollectionState == SR_COLLECT_ALL) { sr_collect_all: ADD_REF_TO_LIST(gcHeap->softReferences, obj); } else { /* We'll only try to collect half of the * referents. */ if (gcHeap->softReferenceColor++ & 1) { goto sr_collect_none; } goto sr_collect_all; } } else { /* It's a weak or phantom reference. * Clearing CLASS_ISREFERENCE will reveal which. */ refFlags &= ~CLASS_ISREFERENCE; if (refFlags == CLASS_ISWEAKREFERENCE) { ADD_REF_TO_LIST(gcHeap->weakReferences, obj); } else if (refFlags == CLASS_ISPHANTOMREFERENCE) { ADD_REF_TO_LIST(gcHeap->phantomReferences, obj); } else { assert(!"Unknown reference type"); } } #undef ADD_REF_TO_LIST } } skip_reference: /* If this is a class object, mark various other things that * its internals point to. * * All class objects are instances of java.lang.Class, * including the java.lang.Class class object. */ if (clazz == gDvm.classJavaLangClass) { scanClassObject((ClassObject *)obj, ctx); } } #if WITH_OBJECT_HEADERS gMarkParent = NULL; #endif }
/* * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is * the start of a malloc-ed piece of memory of size used_bytes. If * start is 0 then start is the beginning of any free space not * including dlmalloc's book keeping and end the start of the next * dlmalloc chunk. Regions purely containing book keeping don't * callback. */ static void heap_chunk_callback(void* start, void* end, size_t used_bytes, void* arg) { u1 state; HeapChunkContext *ctx = (HeapChunkContext *)arg; UNUSED_PARAMETER(end); if (used_bytes == 0) { if (start == NULL) { // Reset for start of new heap. ctx->startOfNextMemoryChunk = NULL; flush_hpsg_chunk(ctx); } // Only process in use memory so that free region information // also includes dlmalloc book keeping. return; } /* If we're looking at the native heap, we'll just return * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks */ bool native = ctx->type == CHUNK_TYPE("NHSG"); if (ctx->startOfNextMemoryChunk != NULL) { // Transmit any pending free memory. Native free memory of // over kMaxFreeLen could be because of the use of mmaps, so // don't report. If not free memory then start a new segment. bool flush = true; if (start > ctx->startOfNextMemoryChunk) { const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE; void* freeStart = ctx->startOfNextMemoryChunk; void* freeEnd = start; size_t freeLen = (char*)freeEnd - (char*)freeStart; if (!native || freeLen < kMaxFreeLen) { append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); flush = false; } } if (flush) { ctx->startOfNextMemoryChunk = NULL; flush_hpsg_chunk(ctx); } } const Object *obj = (const Object *)start; /* It's an allocated chunk. Figure out what it is. */ //TODO: if ctx.merge, see if this chunk is different from the last chunk. // If it's the same, we should combine them. if (!native && dvmIsValidObject(obj)) { ClassObject *clazz = obj->clazz; if (clazz == NULL) { /* The object was probably just created * but hasn't been initialized yet. */ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } else if (dvmIsTheClassClass(clazz)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); } else { switch (clazz->elementClass->primitiveType) { case PRIM_BOOLEAN: case PRIM_BYTE: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); break; case PRIM_CHAR: case PRIM_SHORT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); break; case PRIM_INT: case PRIM_FLOAT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); break; case PRIM_DOUBLE: case PRIM_LONG: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); break; default: assert(!"Unknown GC heap object type"); state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); break; } } } else { state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } } else { obj = NULL; // it's not actually an object state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD); ctx->startOfNextMemoryChunk = (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD; }
/* * Issue a method call with a variable number of arguments. We process * the contents of "args" by scanning the method signature. * * Pass in NULL for "obj" on calls to static methods. * * We don't need to take the class as an argument because, in Dalvik, * we don't need to worry about static synchronized methods. */ void dvmCallMethodV(Thread* self, const Method* method, Object* obj, bool fromJni, JValue* pResult, va_list args) { const char* desc = &(method->shorty[1]); // [0] is the return type. int verifyCount = 0; ClassObject* clazz; u4* ins; clazz = callPrep(self, method, obj, false); if (clazz == NULL) return; /* "ins" for new frame start at frame pointer plus locals */ ins = ((u4*)self->curFrame) + (method->registersSize - method->insSize); //LOGD(" FP is %p, INs live at >= %p\n", self->curFrame, ins); /* put "this" pointer into in0 if appropriate */ if (!dvmIsStaticMethod(method)) { #ifdef WITH_EXTRA_OBJECT_VALIDATION assert(obj != NULL && dvmIsValidObject(obj)); #endif *ins++ = (u4) obj; verifyCount++; } JNIEnv* env = self->jniEnv; while (*desc != '\0') { switch (*(desc++)) { case 'D': case 'J': { u8 val = va_arg(args, u8); memcpy(ins, &val, 8); // EABI prevents direct store ins += 2; verifyCount += 2; break; } case 'F': { /* floats were normalized to doubles; convert back */ float f = (float) va_arg(args, double); *ins++ = dvmFloatToU4(f); verifyCount++; break; } case 'L': { /* 'shorty' descr uses L for all refs, incl array */ void* argObj = va_arg(args, void*); assert(obj == NULL || dvmIsValidObject(obj)); if (fromJni) *ins++ = (u4) dvmDecodeIndirectRef(env, argObj); else *ins++ = (u4) argObj; verifyCount++; break; } default: { /* Z B C S I -- all passed as 32-bit integers */ *ins++ = va_arg(args, u4); verifyCount++; break; } } } #ifndef NDEBUG if (verifyCount != method->insSize) { LOGE("Got vfycount=%d insSize=%d for %s.%s\n", verifyCount, method->insSize, clazz->descriptor, method->name); assert(false); goto bail; } #endif //dvmDumpThreadStack(dvmThreadSelf()); if (dvmIsNativeMethod(method)) { #ifdef WITH_PROFILER TRACE_METHOD_ENTER(self, method); #endif /* * Because we leave no space for local variables, "curFrame" points * directly at the method arguments. */ (*method->nativeFunc)(self->curFrame, pResult, method, self); #ifdef WITH_PROFILER TRACE_METHOD_EXIT(self, method); #endif } else { dvmInterpret(self, method, pResult); } bail: dvmPopFrame(self); }