/** * Jit编译器入口点函数 * @retval 0 表示失败 * @retval 1 表示成功 */ bool dvmCompilerStartup(void) { /* 初始化一些线程同步方面的变量 */ dvmInitMutex(&gDvmJit.compilerLock); dvmInitMutex(&gDvmJit.compilerICPatchLock); dvmInitMutex(&gDvmJit.codeCacheProtectionLock); dvmLockMutex(&gDvmJit.compilerLock); pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL); pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL); /* Reset the work queue */ /* 设置工作队列 */ gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0; gDvmJit.compilerQueueLength = 0; dvmUnlockMutex(&gDvmJit.compilerLock); /* * Defer rest of initialization until we're sure JIT'ng makes sense. Launch * the compiler thread, which will do the real initialization if and * when it is signalled to do so. */ /* * 以下这个函数创建compilerThreadStart编译线程,当这条线程会执行真正的初始化工作 */ return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler", compilerThreadStart, NULL); }
/* * Create and initialize a hash table. */ HashTable* dvmHashTableCreate(size_t initialSize, HashFreeFunc freeFunc) { HashTable* pHashTable; assert(initialSize > 0); pHashTable = (HashTable*) malloc(sizeof(*pHashTable)); if (pHashTable == NULL) return NULL; dvmInitMutex(&pHashTable->lock); pHashTable->tableSize = dexRoundUpPower2(initialSize); pHashTable->numEntries = pHashTable->numDeadEntries = 0; pHashTable->freeFunc = freeFunc; pHashTable->pEntries = (HashEntry*) malloc(pHashTable->tableSize * sizeof(HashEntry)); if (pHashTable->pEntries == NULL) { free(pHashTable); return NULL; } memset(pHashTable->pEntries, 0, pHashTable->tableSize * sizeof(HashEntry)); return pHashTable; }
static bool gcDaemonStartup() { dvmInitMutex(&gHs->gcThreadMutex); pthread_cond_init(&gHs->gcThreadCond, NULL); gHs->gcThreadShutdown = false; gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC", gcDaemonThread, NULL); return gHs->hasGcThread; }
/* * Initialize any HeapWorker state that Heap.c * cares about. This lets the GC start before the * HeapWorker thread is initialized. */ void dvmInitializeHeapWorkerState() { assert(!gDvm.heapWorkerInitialized); dvmInitMutex(&gDvm.heapWorkerLock); pthread_cond_init(&gDvm.heapWorkerCond, NULL); pthread_cond_init(&gDvm.heapWorkerIdleCond, NULL); gDvm.heapWorkerInitialized = true; }
/* * Prep string interning. */ bool dvmStringInternStartup() { dvmInitMutex(&gDvm.internLock); gDvm.internedStrings = dvmHashTableCreate(256, NULL); if (gDvm.internedStrings == NULL) return false; gDvm.literalStrings = dvmHashTableCreate(256, NULL); if (gDvm.literalStrings == NULL) return false; return true; }
/* * Crank up the stdout/stderr converter thread. * * Returns immediately. */ bool dvmStdioConverterStartup() { gDvm.haltStdioConverter = false; dvmInitMutex(&gDvm.stdioConverterLock); pthread_cond_init(&gDvm.stdioConverterCond, NULL); if (pipe(gDvm.stdoutPipe) != 0) { ALOGW("pipe failed: %s", strerror(errno)); return false; } if (pipe(gDvm.stderrPipe) != 0) { ALOGW("pipe failed: %s", strerror(errno)); return false; } if (dup2(gDvm.stdoutPipe[1], kFilenoStdout) != kFilenoStdout) { ALOGW("dup2(1) failed: %s", strerror(errno)); return false; } close(gDvm.stdoutPipe[1]); gDvm.stdoutPipe[1] = -1; #ifdef HAVE_ANDROID_OS /* don't redirect stderr on sim -- logs get written there! */ /* (don't need this on the sim anyway) */ if (dup2(gDvm.stderrPipe[1], kFilenoStderr) != kFilenoStderr) { ALOGW("dup2(2) failed: %d %s", errno, strerror(errno)); return false; } close(gDvm.stderrPipe[1]); gDvm.stderrPipe[1] = -1; #endif /* * Create the thread. */ dvmLockMutex(&gDvm.stdioConverterLock); if (!dvmCreateInternalThread(&gDvm.stdioConverterHandle, "Stdio Converter", stdioConverterThreadStart, NULL)) { return false; } while (!gDvm.stdioConverterReady) { dvmWaitCond(&gDvm.stdioConverterCond, &gDvm.stdioConverterLock); } dvmUnlockMutex(&gDvm.stdioConverterLock); return true; }
bool dvmCompilerStartup(void) { dvmInitMutex(&gDvmJit.compilerLock); dvmInitMutex(&gDvmJit.compilerICPatchLock); dvmInitMutex(&gDvmJit.codeCacheProtectionLock); dvmLockMutex(&gDvmJit.compilerLock); dvmInitCondForTimedWait(&gDvmJit.compilerQueueActivity); dvmInitCondForTimedWait(&gDvmJit.compilerQueueEmpty); /* Reset the work queue */ gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0; gDvmJit.compilerQueueLength = 0; dvmUnlockMutex(&gDvmJit.compilerLock); /* * Defer rest of initialization until we're sure JIT'ng makes sense. Launch * the compiler thread, which will do the real initialization if and * when it is signalled to do so. */ return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler", compilerThreadStart, NULL); }
static bool gcDaemonStartup() { dvmInitMutex(&gHs->gcThreadMutex); #if defined(__APPLE__) pthread_cond_init(&gHs->gcThreadCond, NULL); #else pthread_condattr_t condAttr; pthread_condattr_init(&condAttr); pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); pthread_cond_init(&gHs->gcThreadCond, &condAttr); #endif // defined(__APPLE__) gHs->gcThreadShutdown = false; gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC", gcDaemonThread, NULL); return gHs->hasGcThread; }
/* * Given an open optimized DEX file, map it into read-only shared memory and * parse the contents. * * Returns nonzero on error. */ int dvmDexFileOpenFromFd(int fd, DvmDex** ppDvmDex) { DvmDex* pDvmDex; DexFile* pDexFile; MemMapping memMap; int parseFlags = kDexParseDefault; int result = -1; if (gDvm.verifyDexChecksum) parseFlags |= kDexParseVerifyChecksum; if (lseek(fd, 0, SEEK_SET) < 0) { LOGE("lseek rewind failed\n"); goto bail; } if (sysMapFileInShmemWritableReadOnly(fd, &memMap) != 0) { LOGE("Unable to map file\n"); goto bail; } pDexFile = dexFileParse(memMap.addr, memMap.length, parseFlags); if (pDexFile == NULL) { LOGE("DEX parse failed\n"); sysReleaseShmem(&memMap); goto bail; } pDvmDex = allocateAuxStructures(pDexFile); if (pDvmDex == NULL) { dexFileFree(pDexFile); sysReleaseShmem(&memMap); goto bail; } //This seems like the best place to initialize the DvmDex mutex. dvmInitMutex(&pDvmDex->modLock); /* tuck this into the DexFile so it gets released later */ sysCopyMap(&pDvmDex->memMap, &memMap); *ppDvmDex = pDvmDex; result = 0; bail: return result; }
static DvmDex* allocateAuxStructures(DexFile* pDexFile) { DvmDex* pDvmDex; const DexHeader* pHeader; u4 stringSize, classSize, methodSize, fieldSize; pHeader = pDexFile->pHeader; stringSize = pHeader->stringIdsSize * sizeof(struct StringObject*); classSize = pHeader->typeIdsSize * sizeof(struct ClassObject*); methodSize = pHeader->methodIdsSize * sizeof(struct Method*); fieldSize = pHeader->fieldIdsSize * sizeof(struct Field*); u4 totalSize = sizeof(DvmDex) + stringSize + classSize + methodSize + fieldSize; u1 *blob = (u1 *)dvmAllocRegion(totalSize, PROT_READ | PROT_WRITE, "dalvik-aux-structure"); if ((void *)blob == MAP_FAILED) return NULL; pDvmDex = (DvmDex*)blob; blob += sizeof(DvmDex); pDvmDex->pDexFile = pDexFile; pDvmDex->pHeader = pHeader; pDvmDex->pResStrings = (struct StringObject**)blob; blob += stringSize; pDvmDex->pResClasses = (struct ClassObject**)blob; blob += classSize; pDvmDex->pResMethods = (struct Method**)blob; blob += methodSize; pDvmDex->pResFields = (struct Field**)blob; ALOGV("+++ DEX %p: allocateAux (%d+%d+%d+%d)*4 = %d bytes", pDvmDex, stringSize/4, classSize/4, methodSize/4, fieldSize/4, stringSize + classSize + methodSize + fieldSize); pDvmDex->pInterfaceCache = dvmAllocAtomicCache(DEX_INTERFACE_CACHE_SIZE); dvmInitMutex(&pDvmDex->modLock); return pDvmDex; }
/* * Create and initialize a monitor. */ Monitor* dvmCreateMonitor(Object* obj) { Monitor* mon; mon = (Monitor*) calloc(1, sizeof(Monitor)); if (mon == NULL) { ALOGE("Unable to allocate monitor"); dvmAbort(); } mon->obj = obj; dvmInitMutex(&mon->lock); /* replace the head of the list with the new monitor */ do { mon->next = gDvm.monitorList; } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon, (int32_t*)(void*)&gDvm.monitorList) != 0); return mon; }
/* * Initialize the GC universe. * * We're currently using a memory-mapped arena to keep things off of the * main heap. This needs to be replaced with something real. */ bool dvmGcStartup() { dvmInitMutex(&gDvm.gcHeapLock); pthread_cond_init(&gDvm.gcHeapCond, NULL); return dvmHeapStartup(); }
/* * Initialize the GC universe. * * We're currently using a memory-mapped arena to keep things off of the * main heap. This needs to be replaced with something real. */ bool dvmGcStartup(void) { dvmInitMutex(&gDvm.gcHeapLock); return dvmHeapStartup(); }
static bool compilerThreadStartup(void) { JitEntry *pJitTable = NULL; unsigned char *pJitProfTable = NULL; JitTraceProfCounters *pJitTraceProfCounters = NULL; unsigned int i; if (!dvmCompilerArchInit()) goto fail; /* * Setup the code cache if we have not inherited a valid code cache * from the zygote. */ if (gDvmJit.codeCache == NULL) { if (!dvmCompilerSetupCodeCache()) goto fail; } /* Allocate the initial arena block */ if (dvmCompilerHeapInit() == false) { goto fail; } /* Cache the thread pointer */ gDvmJit.compilerThread = dvmThreadSelf(); dvmLockMutex(&gDvmJit.compilerLock); /* Track method-level compilation statistics */ gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL); #if defined(WITH_JIT_TUNING) gDvm.verboseShutdown = true; #endif dvmUnlockMutex(&gDvmJit.compilerLock); /* Set up the JitTable */ /* Power of 2? */ assert(gDvmJit.jitTableSize && !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1))); dvmInitMutex(&gDvmJit.tableLock); dvmLockMutex(&gDvmJit.tableLock); pJitTable = (JitEntry*) calloc(gDvmJit.jitTableSize, sizeof(*pJitTable)); if (!pJitTable) { ALOGE("jit table allocation failed"); dvmUnlockMutex(&gDvmJit.tableLock); goto fail; } /* * NOTE: the profile table must only be allocated once, globally. * Profiling is turned on and off by nulling out gDvm.pJitProfTable * and then restoring its original value. However, this action * is not synchronized for speed so threads may continue to hold * and update the profile table after profiling has been turned * off by null'ng the global pointer. Be aware. */ pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE); if (!pJitProfTable) { ALOGE("jit prof table allocation failed"); free(pJitTable); dvmUnlockMutex(&gDvmJit.tableLock); goto fail; } memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE); for (i=0; i < gDvmJit.jitTableSize; i++) { pJitTable[i].u.info.chain = gDvmJit.jitTableSize; } /* Is chain field wide enough for termination pattern? */ assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize); /* Allocate the trace profiling structure */ pJitTraceProfCounters = (JitTraceProfCounters*) calloc(1, sizeof(*pJitTraceProfCounters)); if (!pJitTraceProfCounters) { ALOGE("jit trace prof counters allocation failed"); free(pJitTable); free(pJitProfTable); dvmUnlockMutex(&gDvmJit.tableLock); goto fail; } gDvmJit.pJitEntryTable = pJitTable; gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1; gDvmJit.jitTableEntriesUsed = 0; gDvmJit.compilerHighWater = COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4); /* * If the VM is launched with wait-on-the-debugger, we will need to hide * the profile table here */ gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable; gDvmJit.pProfTableCopy = pJitProfTable; gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters; dvmJitUpdateThreadStateAll(); dvmUnlockMutex(&gDvmJit.tableLock); /* Signal running threads to refresh their cached pJitTable pointers */ dvmSuspendAllThreads(SUSPEND_FOR_REFRESH); dvmResumeAllThreads(SUSPEND_FOR_REFRESH); /* Enable signature breakpoints by customizing the following code */ #if defined(SIGNATURE_BREAKPOINT) /* * Suppose one sees the following native crash in the bugreport: * I/DEBUG ( 1638): Build fingerprint: 'unknown' * I/DEBUG ( 1638): pid: 2468, tid: 2507 >>> com.google.android.gallery3d * I/DEBUG ( 1638): signal 11 (SIGSEGV), fault addr 00001400 * I/DEBUG ( 1638): r0 44ea7190 r1 44e4f7b8 r2 44ebc710 r3 00000000 * I/DEBUG ( 1638): r4 00000a00 r5 41862dec r6 4710dc10 r7 00000280 * I/DEBUG ( 1638): r8 ad010f40 r9 46a37a12 10 001116b0 fp 42a78208 * I/DEBUG ( 1638): ip 00000090 sp 4710dbc8 lr ad060e67 pc 46b90682 * cpsr 00000030 * I/DEBUG ( 1638): #00 pc 46b90682 /dev/ashmem/dalvik-jit-code-cache * I/DEBUG ( 1638): #01 pc 00060e62 /system/lib/libdvm.so * * I/DEBUG ( 1638): code around pc: * I/DEBUG ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68 * I/DEBUG ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e * I/DEBUG ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028 * I/DEBUG ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b * I/DEBUG ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70 * * Clearly it is a JIT bug. To find out which translation contains the * offending code, the content of the memory dump around the faulting PC * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time * when a similar compilation is being created, the JIT compiler replay the * trace in the verbose mode and one can investigate the instruction * sequence in details. * * The length of the signature may need additional experiments to determine. * The rule of thumb is don't include PC-relative instructions in the * signature since it may be affected by the alignment of the compiled code. * However, a signature that's too short might increase the chance of false * positive matches. Using gdbjithelper to disassembly the memory content * first might be a good companion approach. * * For example, if the next 4 words starting from 46b90680 is pasted into * the data structure: */ gDvmJit.signatureBreakpointSize = 4; gDvmJit.signatureBreakpoint = malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize); gDvmJit.signatureBreakpoint[0] = 0x512000bc; gDvmJit.signatureBreakpoint[1] = 0x37016eaf; gDvmJit.signatureBreakpoint[2] = 0x6ea866af; gDvmJit.signatureBreakpoint[3] = 0x6f696028; /* * The following log will be printed when a match is found in subsequent * testings: * * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words) * D/dalvikvm( 2468): -------- * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems, * offset 0x1f7 * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26 * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42 * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0) * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1) * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0) * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0) * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0) * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10) * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624 * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8 * blocks * : * : * D/dalvikvm( 2468): 0x20 (0020): ldr r0, [r5, #52] * D/dalvikvm( 2468): 0x22 (0022): ldr r2, [pc, #96] * D/dalvikvm( 2468): 0x24 (0024): cmp r0, #0 * D/dalvikvm( 2468): 0x26 (0026): beq 0x00000034 * D/dalvikvm( 2468): 0x28 (0028): ldr r1, [r1, #0] * D/dalvikvm( 2468): 0x2a (002a): ldr r0, [r0, #0] * D/dalvikvm( 2468): 0x2c (002c): blx r2 * D/dalvikvm( 2468): 0x2e (002e): cmp r0, #0 * D/dalvikvm( 2468): 0x30 (0030): beq 0x00000050 * D/dalvikvm( 2468): 0x32 (0032): ldr r0, [r5, #52] * D/dalvikvm( 2468): 0x34 (0034): lsls r4, r7, #2 * D/dalvikvm( 2468): 0x36 (0036): str r0, [r4, r4] * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0) * D/dalvikvm( 2468): L0x0195: * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26, * v26, (#1) * D/dalvikvm( 2468): 0x38 (0038): ldr r7, [r5, #104] * D/dalvikvm( 2468): 0x3a (003a): adds r7, r7, #1 * D/dalvikvm( 2468): 0x3c (003c): str r7, [r5, #104] * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0) * D/dalvikvm( 2468): L0x0165: * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26, * (#0) * D/dalvikvm( 2468): 0x3e (003e): ldr r0, [r5, #104] * D/dalvikvm( 2468): 0x40 (0040): str r0, [r5, #0] * * The "str r0, [r4, r4]" is indeed the culprit of the native crash. */ #endif return true; fail: return false; }
/* * Create a new linear allocation block. */ LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader) { #ifdef DISABLE_LINEAR_ALLOC return (LinearAllocHdr*) 0x12345; #endif LinearAllocHdr* pHdr; pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr)); /* * "curOffset" points to the location of the next pre-block header, * which means we have to advance to the next BLOCK_ALIGN address and * back up. * * Note we leave the first page empty (see below), and start the * first entry on the second page at an offset that ensures the next * chunk of data will be properly aligned. */ assert(BLOCK_ALIGN >= HEADER_EXTRA); pHdr->curOffset = pHdr->firstOffset = (BLOCK_ALIGN-HEADER_EXTRA) + PAGESIZE; pHdr->mapLength = DEFAULT_MAX_LENGTH; #ifdef USE_ASHMEM int fd; fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH); if (fd < 0) { LOGE("ashmem LinearAlloc failed %s", strerror(errno)); free(pHdr); return NULL; } pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (pHdr->mapAddr == MAP_FAILED) { LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength, strerror(errno)); free(pHdr); close(fd); return NULL; } close(fd); #else /*USE_ASHMEM*/ // MAP_ANON is listed as "deprecated" on Linux, // but MAP_ANONYMOUS is not defined under Mac OS X. pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (pHdr->mapAddr == MAP_FAILED) { LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength, strerror(errno)); free(pHdr); return NULL; } #endif /*USE_ASHMEM*/ /* region expected to begin on a page boundary */ assert(((int) pHdr->mapAddr & (PAGESIZE-1)) == 0); /* the system should initialize newly-mapped memory to zero */ assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0); /* * Disable access to all except starting page. We will enable pages * as we use them. This helps prevent bad pointers from working. The * pages start out PROT_NONE, become read/write while we access them, * then go to read-only after we finish our changes. * * We have to make the first page readable because we have 4 pad bytes, * followed by 4 length bytes, giving an initial offset of 8. The * generic code below assumes that there could have been a previous * allocation that wrote into those 4 pad bytes, therefore the page * must have been marked readable by the previous allocation. * * We insert an extra page in here to force a break in the memory map * so we can see ourselves more easily in "showmap". Otherwise this * stuff blends into the neighboring pages. [TODO: do we still need * the extra page now that we have ashmem?] */ if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) { LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno)); free(pHdr); return NULL; } if (mprotect(pHdr->mapAddr + PAGESIZE, PAGESIZE, ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0) { LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno)); free(pHdr); return NULL; } if (ENFORCE_READ_ONLY) { /* allocate the per-page ref count */ int numPages = (pHdr->mapLength+PAGESIZE-1) / PAGESIZE; pHdr->writeRefCount = calloc(numPages, sizeof(short)); if (pHdr->writeRefCount == NULL) { free(pHdr); return NULL; } } dvmInitMutex(&pHdr->lock); LOGV("LinearAlloc: created region at %p-%p\n", pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1); return pHdr; }