/* * public native int preloadClasses() */ static void Dalvik_dalvik_system_VMRuntime_preloadClasses(const u4* args, JValue* pResult) { ClassObject* caller = dvmGetCallerClass(dvmThreadSelf()->curFrame); Object* loader; int count = 0; unsigned int index; UNUSED_PARAMETER(args); if (caller == NULL) RETURN_INT(0); loader = (Object*)caller->classLoader; for (index = 0; index < sizeof(preloadClassesTable)/sizeof(char*); index ++) { ClassObject* clazz = dvmFindClassByCstrName(preloadClassesTable[index], loader); if (clazz == NULL) { dvmLogExceptionStackTrace(); dvmClearException(dvmThreadSelf()); continue; } count ++; } RETURN_INT(count); }
/* * Resolve a static field reference. The DexFile format doesn't distinguish * between static and instance field references, so the "resolved" pointer * in the Dex struct will have the wrong type. We trivially cast it here. * * Causes the field's class to be initialized. */ StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx) { DvmDex* pDvmDex = referrer->pDvmDex; ClassObject* resClass; const DexFieldId* pFieldId; StaticField* resField; pFieldId = dexGetFieldId(pDvmDex->pDexFile, sfieldIdx); /* * Find the field's class. */ resClass = dvmResolveClass(referrer, pFieldId->classIdx, false); if (resClass == NULL) { assert(dvmCheckException(dvmThreadSelf())); return NULL; } resField = dvmFindStaticFieldHier(resClass, dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx), dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx)); if (resField == NULL) { dvmThrowNoSuchFieldError( dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx)); return NULL; } /* * If we're the first to resolve the field in which this class resides, * we need to do it now. Note that, if the field was inherited from * a superclass, it is not necessarily the same as "resClass". */ if (!dvmIsClassInitialized(resField->clazz) && !dvmInitClass(resField->clazz)) { assert(dvmCheckException(dvmThreadSelf())); return NULL; } /* * If the class has been initialized, add a pointer to our data structure * so we don't have to jump through the hoops again. If it's still * initializing (i.e. this thread is executing <clinit>), don't do * the store, otherwise other threads could use the field without waiting * for class init to finish. */ if (dvmIsClassInitialized(resField->clazz)) { dvmDexSetResolvedField(pDvmDex, sfieldIdx, (Field*) resField); } else { LOGVV("--- not caching resolved field %s.%s (class init=%d/%d)", resField->clazz->descriptor, resField->name, dvmIsClassInitializing(resField->clazz), dvmIsClassInitialized(resField->clazz)); } return resField; }
static void de_robv_android_xposed_XposedBridge_setObjectClassNative(JNIEnv* env, jclass clazz, jobject objIndirect, jclass clzIndirect) { Object* obj = (Object*) dvmDecodeIndirectRef(dvmThreadSelf(), objIndirect); ClassObject* clz = (ClassObject*) dvmDecodeIndirectRef(dvmThreadSelf(), clzIndirect); if (clz->status < CLASS_INITIALIZED && !dvmInitClass(clz)) { ALOGE("Could not initialize class %s", clz->descriptor); return; } obj->clazz = clz; }
/* * Allocate storage on the GC heap. We guarantee 8-byte alignment. * * The new storage is zeroed out. * * Note that, in rare cases, this could get called while a GC is in * progress. If a non-VM thread tries to attach itself through JNI, * it will need to allocate some objects. If this becomes annoying to * deal with, we can block it at the source, but holding the allocation * mutex should be enough. * * In rare circumstances (JNI AttachCurrentThread) we can be called * from a non-VM thread. * * Use ALLOC_DONT_TRACK when we either don't want to track an allocation * (because it's being done for the interpreter "new" operation and will * be part of the root set immediately) or we can't (because this allocation * is for a brand new thread). * * Returns NULL and throws an exception on failure. * * TODO: don't do a GC if the debugger thinks all threads are suspended */ void* dvmMalloc(size_t size, int flags) { void *ptr; dvmLockHeap(); /* Try as hard as possible to allocate some memory. */ ptr = tryMalloc(size); if (ptr != NULL) { /* We've got the memory. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.allocCount++; gDvm.allocProf.allocSize += size; if (self != NULL) { self->allocProf.allocCount++; self->allocProf.allocSize += size; } } } else { /* The allocation failed. */ if (gDvm.allocProf.enabled) { Thread* self = dvmThreadSelf(); gDvm.allocProf.failedAllocCount++; gDvm.allocProf.failedAllocSize += size; if (self != NULL) { self->allocProf.failedAllocCount++; self->allocProf.failedAllocSize += size; } } } dvmUnlockHeap(); if (ptr != NULL) { /* * If caller hasn't asked us not to track it, add it to the * internal tracking list. */ if ((flags & ALLOC_DONT_TRACK) == 0) { dvmAddTrackedAlloc((Object*)ptr, NULL); } } else { /* * The allocation failed; throw an OutOfMemoryError. */ throwOOME(); } return ptr; }
static void de_robv_android_xposed_XposedBridge_hookMethodNative(JNIEnv* env, jclass clazz, jobject reflectedMethodIndirect, jobject declaredClassIndirect, jint slot, jobject additionalInfoIndirect) { // Usage errors? if (declaredClassIndirect == NULL || reflectedMethodIndirect == NULL) { dvmThrowIllegalArgumentException( "method and declaredClass must not be null"); return; } // Find the internal representation of the method ClassObject* declaredClass = (ClassObject*) dvmDecodeIndirectRef( dvmThreadSelf(), declaredClassIndirect); Method* method = dvmSlotToMethod(declaredClass, slot); if (method == NULL) { dvmThrowNoSuchMethodError( "could not get internal representation for method"); return; } if (xposedIsHooked(method)) { ALOGD("Hook: Ignored! [%s] [%s]\n", declaredClass->descriptor, method->name); // already hooked return; } else { ALOGD("Hook: [%s] [%s]\n", declaredClass->descriptor, method->name); } // Save a copy of the original method and other hook info XposedHookInfo* hookInfo = (XposedHookInfo*) calloc(1, sizeof(XposedHookInfo)); memcpy(hookInfo, method, sizeof(hookInfo->originalMethodStruct)); hookInfo->reflectedMethod = dvmDecodeIndirectRef(dvmThreadSelf(), env->NewGlobalRef(reflectedMethodIndirect)); hookInfo->additionalInfo = dvmDecodeIndirectRef(dvmThreadSelf(), env->NewGlobalRef(additionalInfoIndirect)); // Replace method with our own code SET_METHOD_FLAG(method, ACC_NATIVE); method->nativeFunc = &xposedCallHandler; method->insns = (const u2*) hookInfo; method->registersSize = method->insSize; method->outsSize = 0; if (PTR_gDvmJit != NULL) { // reset JIT cache MEMBER_VAL(PTR_gDvmJit, DvmJitGlobals, codeCacheFull) = true; } }
/* * Resolve a string reference. * * Finding the string is easy. We need to return a reference to a * java/lang/String object, not a bunch of characters, which means the * first time we get here we need to create an interned string. */ StringObject* dvmResolveString(const ClassObject* referrer, u4 stringIdx) { DvmDex* pDvmDex = referrer->pDvmDex; StringObject* strObj; StringObject* internStrObj; const char* utf8; u4 utf16Size; LOGVV("+++ resolving string, referrer is %s\n", referrer->descriptor); /* * Create a UTF-16 version so we can trivially compare it to what's * already interned. */ utf8 = dexStringAndSizeById(pDvmDex->pDexFile, stringIdx, &utf16Size); strObj = dvmCreateStringFromCstrAndLength(utf8, utf16Size, ALLOC_DEFAULT); if (strObj == NULL) { /* ran out of space in GC heap? */ assert(dvmCheckException(dvmThreadSelf())); goto bail; } /* * Add it to the intern list. The return value is the one in the * intern list, which (due to race conditions) may or may not be * the one we just created. The intern list is synchronized, so * there will be only one "live" version. * * By requesting an immortal interned string, we guarantee that * the returned object will never be collected by the GC. * * A NULL return here indicates some sort of hashing failure. */ internStrObj = dvmLookupImmortalInternedString(strObj); dvmReleaseTrackedAlloc((Object*) strObj, NULL); strObj = internStrObj; if (strObj == NULL) { assert(dvmCheckException(dvmThreadSelf())); goto bail; } /* save a reference so we can go straight to the object next time */ dvmDexSetResolvedString(pDvmDex, stringIdx, strObj); bail: return strObj; }
/* * Unlinks a thread from a monitor's wait set. The monitor lock must * be held by the caller of this routine. */ static void waitSetRemove(Monitor *mon, Thread *thread) { Thread *elt; assert(mon != NULL); assert(mon->owner == dvmThreadSelf()); assert(thread != NULL); assert(waitSetCheck(mon) == 0); if (mon->waitSet == NULL) { return; } if (mon->waitSet == thread) { mon->waitSet = thread->waitNext; thread->waitNext = NULL; return; } elt = mon->waitSet; while (elt->waitNext != NULL) { if (elt->waitNext == thread) { elt->waitNext = thread->waitNext; thread->waitNext = NULL; return; } elt = elt->waitNext; } }
/* * Print the stack trace of the current exception on stderr. This is called * from the JNI ExceptionDescribe call. * * For consistency we just invoke the Throwable printStackTrace method, * which might be overridden in the exception object. * * Exceptions thrown during the course of printing the stack trace are * ignored. */ void dvmPrintExceptionStackTrace(void) { Thread* self = dvmThreadSelf(); Object* exception; Method* printMethod; exception = self->exception; if (exception == NULL) return; self->exception = NULL; printMethod = dvmFindVirtualMethodHierByDescriptor(exception->clazz, "printStackTrace", "()V"); if (printMethod != NULL) { JValue unused; dvmCallMethod(self, printMethod, exception, &unused); } else { LOGW("WARNING: could not find printStackTrace in %s\n", exception->clazz->descriptor); } if (self->exception != NULL) { LOGW("NOTE: exception thrown while printing stack trace: %s\n", self->exception->clazz->descriptor); } self->exception = exception; }
/* * If the concurrent GC is running, wait for it to finish. The caller * must hold the heap lock. * * Note: the second dvmChangeStatus() could stall if we were in RUNNING * on entry, and some other thread has asked us to suspend. In that * case we will be suspended with the heap lock held, which can lead to * deadlock if the other thread tries to do something with the managed heap. * For example, the debugger might suspend us and then execute a method that * allocates memory. We can avoid this situation by releasing the lock * before self-suspending. (The developer can work around this specific * situation by single-stepping the VM. Alternatively, we could disable * concurrent GC when the debugger is attached, but that might change * behavior more than is desirable.) * * This should not be a problem in production, because any GC-related * activity will grab the lock before issuing a suspend-all. (We may briefly * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads, * but there's no risk of deadlock.) */ bool dvmWaitForConcurrentGcToComplete() { ATRACE_BEGIN("GC: Wait For Concurrent"); bool waited = gDvm.gcHeap->gcRunning; Thread *self = dvmThreadSelf(); assert(self != NULL); u4 start = dvmGetRelativeTimeMsec(); #ifdef FASTIVA // Ensure no Java-object reference is used in local-stack. // and save Java-object reference maybe in registers. FASTIVA_SUSPEND_STACK_unsafe(self); ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); while (gDvm.gcHeap->gcRunning) { dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); } dvmChangeStatus(self, oldStatus); FASTIVA_RESUME_STACK_unsafe(self); #else while (gDvm.gcHeap->gcRunning) { ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); dvmChangeStatus(self, oldStatus); } #endif u4 end = dvmGetRelativeTimeMsec(); if (end - start > 0) { ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start); } ATRACE_END(); return waited; }
/* * Dump the fixed-purpose ARM registers, along with some other info. * * This function MUST be compiled in ARM mode -- THUMB will yield bogus * results. * * This will NOT preserve r0-r3/ip. */ void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3) { register uint32_t rPC asm("r4"); register uint32_t rFP asm("r5"); register uint32_t rSELF asm("r6"); register uint32_t rINST asm("r7"); register uint32_t rIBASE asm("r8"); register uint32_t r9 asm("r9"); register uint32_t r10 asm("r10"); //extern char dvmAsmInstructionStart[]; printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3); printf(" : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n", rPC, rFP, rSELF, rINST); printf(" : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10); //Thread* self = (Thread*) rSELF; //const Method* method = self->method; printf(" + self is %p\n", dvmThreadSelf()); //printf(" + currently in %s.%s %s\n", // method->clazz->descriptor, method->name, method->shorty); //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart); //printf(" + next handler for 0x%02x = %p\n", // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64); }
static void miui_dexspy_DexspyInstaller_hookMethodNative(JNIEnv* env, jclass clazz, jobject declaredClassIndirect, jint slot) { // Usage errors? if (declaredClassIndirect == NULL) { dvmThrowIllegalArgumentException("declaredClass must not be null"); return; } // Find the internal representation of the method ClassObject* declaredClass = (ClassObject*) dvmDecodeIndirectRef(dvmThreadSelf(), declaredClassIndirect); Method* method = dvmSlotToMethod(declaredClass, slot); if (method == NULL) { dvmThrowNoSuchMethodError("could not get internal representation for method"); return; } if (findOriginalMethod(method) != dexspyOriginalMethods.end()) { ALOGD("why this method already hooked: %s:%s(%s)", method->clazz->descriptor, method->name, method->shorty); // already hooked return; } // Save a copy of the original method dexspyOriginalMethods.push_front(*method); // Replace method with our own code SET_METHOD_FLAG(method, ACC_NATIVE); method->nativeFunc = &dexspyCallHandler; method->registersSize = method->insSize; method->outsSize = 0; #ifdef WITH_JIT // reset JIT cache gDvmJit.codeCacheFull = true; #endif }
/* * static Thread currentThread() */ static void Dalvik_java_lang_VMThread_currentThread(const u4* args, JValue* pResult) { UNUSED_PARAMETER(args); RETURN_PTR(dvmThreadSelf()->threadObj); }
static void Dalvik_dalvik_system_VMRuntime_properties(const u4* args, JValue* pResult) { ArrayObject* result = dvmCreateStringArray(*gDvm.properties); dvmReleaseTrackedAlloc((Object*) result, dvmThreadSelf()); RETURN_PTR(result); }
java_lang_Object_p fastiva_Dalvik_dalvik_system_VMRuntime_newNonMovableArray(dalvik_system_VMRuntime_p self, java_lang_Class_p elementClass, jint length) { #endif if (elementClass == NULL) { dvmThrowNullPointerException("elementClass == null"); THROW_V(); } if (length < 0) { dvmThrowNegativeArraySizeException(length); THROW_V(); } // TODO: right now, we don't have a copying collector, so there's no need // to do anything special here, but we ought to pass the non-movability // through to the allocator. ClassObject* arrayClass = dvmFindArrayClassForElement(elementClass); ArrayObject* newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_NON_MOVING); if (newArray == NULL) { assert(dvmCheckException(dvmThreadSelf())); THROW_V(); } dvmReleaseTrackedAlloc((Object*) newArray, NULL); RETURN_PTR(newArray); }
static void de_robv_android_xposed_XposedBridge_hookMethodNative(JNIEnv* env, jclass clazz, jobject declaredClassIndirect, jint slot) { // Usage errors? if (declaredClassIndirect == NULL) { dvmThrowIllegalArgumentException("declaredClass must not be null"); return; } // Find the internal representation of the method ClassObject* declaredClass = (ClassObject*) dvmDecodeIndirectRef(dvmThreadSelf(), declaredClassIndirect); Method* method = dvmSlotToMethod(declaredClass, slot); if (method == NULL) { dvmThrowNoSuchMethodError("could not get internal representation for method"); return; } if (findXposedOriginalMethod(method) != xposedOriginalMethods.end()) { // already hooked return; } // Save a copy of the original method xposedOriginalMethods.push_front(*((MethodXposedExt*)method)); // Replace method with our own code SET_METHOD_FLAG(method, ACC_NATIVE); method->nativeFunc = &xposedCallHandler; method->registersSize = method->insSize; method->outsSize = 0; if (PTR_gDvmJit != NULL) { // reset JIT cache MEMBER_VAL(PTR_gDvmJit, DvmJitGlobals, codeCacheFull) = true; } }
java_lang_String_ap fastiva_Dalvik_dalvik_system_VMRuntime_properties(dalvik_system_VMRuntime_p self) { #endif ArrayObject* result = dvmCreateStringArray(*gDvm.properties); dvmReleaseTrackedAlloc((Object*) result, dvmThreadSelf()); RETURN_PTR((java_lang_String_ap)result); }
/* * "Mterp entry point. */ void dvmMterpStd(Thread* self) { /* configure mterp items */ self->interpSave.methodClassDex = self->interpSave.method->clazz->pDvmDex; IF_LOGVV() { char* desc = dexProtoCopyMethodDescriptor( &self->interpSave.method->prototype); LOGVV("mterp threadid=%d : %s.%s %s", dvmThreadSelf()->threadId, self->interpSave.method->clazz->descriptor, self->interpSave.method->name, desc); free(desc); } //ALOGI("self is %p, pc=%p, fp=%p", self, self->interpSave.pc, // self->interpSave.curFrame); //ALOGI("first instruction is 0x%04x", self->interpSave.pc[0]); /* * Handle any ongoing profiling and prep for debugging */ if (self->interpBreak.ctl.subMode != 0) { TRACE_METHOD_ENTER(self, self->interpSave.method); self->debugIsMethodEntry = true; // Always true on startup } dvmMterpStdRun(self); #ifdef LOG_INSTR ALOGD("|-- Leaving interpreter loop"); #endif }
void fastiva_Dalvik_java_lang_Object_notify(java_lang_Object_p thisPtr) { Thread* self = dvmThreadSelf(); #endif dvmObjectNotify(self, thisPtr); MAY_THROW_VOID(); }
/* * Resolve a native method and invoke it. * * This is executed as if it were a native bridge or function. If the * resolution succeeds, method->insns is replaced, and we don't go through * here again. * * Initializes method's class if necessary. * * An exception is thrown on resolution failure. */ void dvmResolveNativeMethod(const u4* args, JValue* pResult, const Method* method, Thread* self) { ClassObject* clazz = method->clazz; void* func; /* * If this is a static method, it could be called before the class * has been initialized. */ if (dvmIsStaticMethod(method)) { if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz)) { assert(dvmCheckException(dvmThreadSelf())); return; } } else { assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz)); } /* start with our internal-native methods */ func = dvmLookupInternalNativeMethod(method); if (func != NULL) { /* resolution always gets the same answer, so no race here */ IF_LOGVV() { char* desc = dexProtoCopyMethodDescriptor(&method->prototype); LOGVV("+++ resolved native %s.%s %s, invoking\n", clazz->descriptor, method->name, desc); free(desc); }
/* * C mterp entry point. This just calls the various C fallbacks, making * this a slow but portable interpeter. * * This is only used for the "allstubs" variant. */ void dvmMterpStdRun(Thread* self) { jmp_buf jmpBuf; self->interpSave.bailPtr = &jmpBuf; /* We exit via a longjmp */ if (setjmp(jmpBuf)) { LOGVV("mterp threadid=%d returning", dvmThreadSelf()->threadId); return; } /* run until somebody longjmp()s out */ while (true) { typedef void (*Handler)(Thread* self); u2 inst = /*self->interpSave.*/pc[0]; /* * In mterp, dvmCheckBefore is handled via the altHandlerTable, * while in the portable interpreter it is part of the handler * FINISH code. For allstubs, we must do an explicit check * in the interpretation loop. */ if (self->interpBreak.ctl.subMode) { dvmCheckBefore(pc, fp, self); } Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff]; (void) gDvmMterpHandlerNames; /* avoid gcc "defined but not used" */ LOGVV("handler %p %s", handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]); (*handler)(self); } }
static void Dalvik_dalvik_system_VMRuntime_newNonMovableArray(const u4* args, JValue* pResult) { ClassObject* elementClass = (ClassObject*) args[1]; int length = args[2]; if (elementClass == NULL) { dvmThrowNullPointerException("elementClass == null"); RETURN_VOID(); } if (length < 0) { dvmThrowNegativeArraySizeException(length); RETURN_VOID(); } // TODO: right now, we don't have a copying collector, so there's no need // to do anything special here, but we ought to pass the non-movability // through to the allocator. ClassObject* arrayClass = dvmFindArrayClassForElement(elementClass); ArrayObject* newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_NON_MOVING); if (newArray == NULL) { assert(dvmCheckException(dvmThreadSelf())); RETURN_VOID(); } dvmReleaseTrackedAlloc((Object*) newArray, NULL); RETURN_PTR(newArray); }
void fastiva_Dalvik_java_lang_Object_wait(java_lang_Object_p thisPtr, jlonglong millis, jint nanos) { Thread* self = dvmThreadSelf(); #endif dvmObjectWait(self, thisPtr, millis, nanos, true); MAY_THROW_VOID(); }
/* * Resolve a static field reference. The DexFile format doesn't distinguish * between static and instance field references, so the "resolved" pointer * in the Dex struct will have the wrong type. We trivially cast it here. * * Causes the field's class to be initialized. */ StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx) { DvmDex* pDvmDex = referrer->pDvmDex; ClassObject* resClass; const DexFieldId* pFieldId; StaticField* resField; pFieldId = dexGetFieldId(pDvmDex->pDexFile, sfieldIdx); /* * Find the field's class. */ resClass = dvmResolveClass(referrer, pFieldId->classIdx, false); if (resClass == NULL) { assert(dvmCheckException(dvmThreadSelf())); return NULL; } resField = dvmFindStaticFieldHier(resClass, dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx), dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->typeIdx)); if (resField == NULL) { dvmThrowException("Ljava/lang/NoSuchFieldError;", dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx)); return NULL; } /* * If we're the first to resolve the field in which this class resides, * we need to do it now. Note that, if the field was inherited from * a superclass, it is not necessarily the same as "resClass". */ if (!dvmIsClassInitialized(resField->field.clazz) && !dvmInitClass(resField->field.clazz)) { assert(dvmCheckException(dvmThreadSelf())); return NULL; } /* * The class is initialized, the method has been found. Add a pointer * to our data structure so we don't have to jump through the hoops again. */ dvmDexSetResolvedField(pDvmDex, sfieldIdx, (Field*) resField); return resField; }
/* * Create a new java.lang.reflect.Field object from "field". * * The Field spec doesn't specify the constructor. We're going to use the * one from our existing class libs: * * private Field(Class declaringClass, Class type, String name, int slot) */ static Object* createFieldObject(Field* field, const ClassObject* clazz) { Object* result = NULL; Object* fieldObj = NULL; StringObject* nameObj = NULL; ClassObject* type; char* mangle; char* cp; int slot, field_idx; assert(dvmIsClassInitialized(gDvm.classJavaLangReflectField)); fieldObj = dvmAllocObject(gDvm.classJavaLangReflectField, ALLOC_DEFAULT); if (fieldObj == NULL) goto bail; cp = mangle = strdup(field->signature); type = convertSignaturePartToClass(&cp, clazz); free(mangle); if (type == NULL) goto bail; nameObj = dvmCreateStringFromCstr(field->name); if (nameObj == NULL) goto bail; slot = fieldToSlot(field, clazz); field_idx = dvmGetFieldIdx(field); JValue unused; dvmCallMethod(dvmThreadSelf(), gDvm.methJavaLangReflectField_init, fieldObj, &unused, clazz, type, nameObj, slot, field_idx); if (dvmCheckException(dvmThreadSelf())) { ALOGD("Field class init threw exception"); goto bail; } result = fieldObj; bail: dvmReleaseTrackedAlloc((Object*) nameObj, NULL); if (result == NULL) dvmReleaseTrackedAlloc((Object*) fieldObj, NULL); /* caller must dvmReleaseTrackedAlloc(result) */ return result; }
/* * public static void resetAllocCount(int kinds) */ static void Dalvik_dalvik_system_VMDebug_resetAllocCount(const u4* args, JValue* pResult) { unsigned int kinds = args[0]; clearAllocProfStateFields(&gDvm.allocProf, kinds & 0xffff); clearAllocProfStateFields(&dvmThreadSelf()->allocProf, kinds >> 16); RETURN_VOID(); }
static jobject de_robv_android_xposed_XposedBridge_cloneToSubclassNative(JNIEnv* env, jclass clazz, jobject objIndirect, jclass clzIndirect) { Object* obj = (Object*) dvmDecodeIndirectRef(dvmThreadSelf(), objIndirect); ClassObject* clz = (ClassObject*) dvmDecodeIndirectRef(dvmThreadSelf(), clzIndirect); jobject copyIndirect = env->AllocObject(clzIndirect); if (copyIndirect == NULL) return NULL; Object* copy = (Object*) dvmDecodeIndirectRef(dvmThreadSelf(), copyIndirect); size_t size = obj->clazz->objectSize; size_t offset = sizeof(Object); memcpy((char*)copy + offset, (char*)obj + offset, size - offset); if (IS_CLASS_FLAG_SET(clz, CLASS_ISFINALIZABLE)) dvmSetFinalizable(copy); return copyIndirect; }
/* * "Standard" mterp entry point. This sets up a "glue" structure and then * calls into the assembly interpreter implementation. * * (There is presently no "debug" entry point.) */ bool dvmMterpStd(Thread* self, InterpState* glue) { int changeInterp; /* configure mterp items */ glue->self = self; glue->methodClassDex = glue->method->clazz->pDvmDex; glue->interpStackEnd = self->interpStackEnd; glue->pSelfSuspendCount = &self->suspendCount; glue->cardTable = gDvm.biasedCardTableBase; #if defined(WITH_JIT) glue->pJitProfTable = gDvmJit.pProfTable; glue->ppJitProfTable = &gDvmJit.pProfTable; glue->jitThreshold = gDvmJit.threshold; #endif if (gDvm.jdwpConfigured) { glue->pDebuggerActive = &gDvm.debuggerActive; } else { glue->pDebuggerActive = NULL; } glue->pActiveProfilers = &gDvm.activeProfilers; IF_LOGVV() { char* desc = dexProtoCopyMethodDescriptor(&glue->method->prototype); LOGVV("mterp threadid=%d entry %d: %s.%s %s\n", dvmThreadSelf()->threadId, glue->entryPoint, glue->method->clazz->descriptor, glue->method->name, desc); free(desc); } //LOGI("glue is %p, pc=%p, fp=%p\n", glue, glue->pc, glue->fp); //LOGI("first instruction is 0x%04x\n", glue->pc[0]); changeInterp = dvmMterpStdRun(glue); #if defined(WITH_JIT) if (glue->jitState != kJitSingleStep) { glue->self->inJitCodeCache = NULL; } #endif if (!changeInterp) { /* this is a "normal" exit; we're not coming back */ #ifdef LOG_INSTR LOGD("|-- Leaving interpreter loop"); #endif return false; } else { /* we're "standard", so switch to "debug" */ LOGVV(" mterp returned, changeInterp=%d\n", changeInterp); glue->nextMode = INTERP_DBG; return true; } }
void dvmHeapSourceRegisterNativeAllocation(int bytes) { /* If we have just done a GC, ensure that the finalizers are done and update * the native watermarks. */ if (gHs->nativeNeedToRunFinalization) { dvmRunFinalization(); dvmHeapSourceUpdateMaxNativeFootprint(); gHs->nativeNeedToRunFinalization = false; } android_atomic_add(bytes, &gHs->nativeBytesAllocated); if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) { /* The second watermark is higher than the gc watermark. If you hit * this it means you are allocating native objects faster than the GC * can keep up with. If this occurs, we do a GC for alloc. */ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { Thread* self = dvmThreadSelf(); dvmRunFinalization(); if (dvmCheckException(self)) { return; } dvmLockHeap(); bool waited = dvmWaitForConcurrentGcToComplete(); dvmUnlockHeap(); if (waited) { // Just finished a GC, attempt to run finalizers. dvmRunFinalization(); if (dvmCheckException(self)) { return; } } // If we still are over the watermark, attempt a GC for alloc and run finalizers. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_FOR_MALLOC); dvmUnlockHeap(); dvmRunFinalization(); gHs->nativeNeedToRunFinalization = false; if (dvmCheckException(self)) { return; } } /* We have just run finalizers, update the native watermark since * it is very likely that finalizers released native managed * allocations. */ dvmHeapSourceUpdateMaxNativeFootprint(); } else { dvmSignalCond(&gHs->gcThreadCond); } } }
/* * Try to load all classes in the specified DEX. If they have some sort * of broken dependency, e.g. their superclass lives in a different DEX * that wasn't previously loaded into the bootstrap class path, loading * will fail. This is the desired behavior. * * We have no notion of class loader at this point, so we load all of * the classes with the bootstrap class loader. It turns out this has * exactly the behavior we want, and has no ill side effects because we're * running in a separate process and anything we load here will be forgotten. * * We set the CLASS_MULTIPLE_DEFS flag here if we see multiple definitions. * This works because we only call here as part of optimization / pre-verify, * not during verification as part of loading a class into a running VM. * * This returns "false" if the world is too screwed up to do anything * useful at all. */ static bool loadAllClasses(DvmDex* pDvmDex) { u4 count = pDvmDex->pDexFile->pHeader->classDefsSize; u4 idx; int loaded = 0; LOGV("DexOpt: +++ trying to load %d classes\n", count); dvmSetBootPathExtraDex(pDvmDex); /* * We have some circularity issues with Class and Object that are most * easily avoided by ensuring that Object is never the first thing we * try to find. Take care of that here. (We only need to do this when * loading classes from the DEX file that contains Object, and only * when Object comes first in the list, but it costs very little to * do it in all cases.) */ if (dvmFindSystemClass("Ljava/lang/Class;") == NULL) { LOGE("ERROR: java.lang.Class does not exist!\n"); return false; } for (idx = 0; idx < count; idx++) { const DexClassDef* pClassDef; const char* classDescriptor; ClassObject* newClass; pClassDef = dexGetClassDef(pDvmDex->pDexFile, idx); classDescriptor = dexStringByTypeIdx(pDvmDex->pDexFile, pClassDef->classIdx); LOGV("+++ loading '%s'", classDescriptor); //newClass = dvmDefineClass(pDexFile, classDescriptor, // NULL); newClass = dvmFindSystemClassNoInit(classDescriptor); if (newClass == NULL) { LOGV("DexOpt: failed loading '%s'\n", classDescriptor); dvmClearOptException(dvmThreadSelf()); } else if (newClass->pDvmDex != pDvmDex) { /* * We don't load the new one, and we tag the first one found * with the "multiple def" flag so the resolver doesn't try * to make it available. */ LOGD("DexOpt: '%s' has an earlier definition; blocking out\n", classDescriptor); SET_CLASS_FLAG(newClass, CLASS_MULTIPLE_DEFS); } else { loaded++; } } LOGV("DexOpt: +++ successfully loaded %d classes\n", loaded); dvmSetBootPathExtraDex(NULL); return true; }
/* * static void crash() * * Dump the current thread's interpreted stack and abort the VM. Useful * for seeing both interpreted and native stack traces. * * (Might want to restrict this to debuggable processes as a security * measure, or check SecurityManager.checkExit().) */ static void Dalvik_dalvik_system_VMDebug_crash(const u4* args, JValue* pResult) { UNUSED_PARAMETER(args); UNUSED_PARAMETER(pResult); ALOGW("Crashing VM on request"); dvmDumpThread(dvmThreadSelf(), false); dvmAbort(); }