/* * Dump a bytecode disassembly. */ void dumpBytecodes(DexFile* pDexFile, const DexMethod* pDexMethod) { const DexCode* pCode = dexGetCode(pDexFile, pDexMethod); const u2* insns; int insnIdx; FieldMethodInfo methInfo; int startAddr; char* className = NULL; assert(pCode->insnsSize > 0); insns = pCode->insns; getMethodInfo(pDexFile, pDexMethod->methodIdx, &methInfo); startAddr = ((u1*)pCode - pDexFile->baseAddr); className = descriptorToDot(methInfo.classDescriptor); printf("%06x: |[%06x] %s.%s:%s\n", startAddr, startAddr, className, methInfo.name, methInfo.signature); insnIdx = 0; while (insnIdx < (int) pCode->insnsSize) { int insnWidth; OpCode opCode; DecodedInstruction decInsn; u2 instr; instr = get2LE((const u1*)insns); if (instr == kPackedSwitchSignature) { insnWidth = 4 + get2LE((const u1*)(insns+1)) * 2; } else if (instr == kSparseSwitchSignature) { insnWidth = 2 + get2LE((const u1*)(insns+1)) * 4; } else if (instr == kArrayDataSignature) { int width = get2LE((const u1*)(insns+1)); int size = get2LE((const u1*)(insns+2)) | (get2LE((const u1*)(insns+3))<<16); // The plus 1 is to round up for odd size and width insnWidth = 4 + ((size * width) + 1) / 2; } else { opCode = instr & 0xff; insnWidth = dexGetInstrWidthAbs(gInstrWidth, opCode); if (insnWidth == 0) { fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", insnIdx); break; } } dexDecodeInstruction(gInstrFormat, insns, &decInsn); dumpInstruction(pDexFile, pCode, insnIdx, insnWidth, &decInsn); insns += insnWidth; insnIdx += insnWidth; } free(className); }
/* Perform static verification on instructions. As a side effect, this sets the "branch target" flags in InsnFlags. "(CF)" items are handled during code-flow analysis. v3 4.10.1 - target of each jump and branch instruction must be valid - targets of switch statements must be valid - operands referencing constant pool entries must be valid - (CF) operands of getfield, putfield, getstatic, putstatic must be valid - (new) verify operands of "quick" field ops - (CF) operands of method invocation instructions must be valid - (new) verify operands of "quick" method invoke ops - (CF) only invoke-direct can call a method starting with '<' - (CF) <clinit> must never be called explicitly - operands of instanceof, checkcast, new (and variants) must be valid - new-array[-type] limited to 255 dimensions - can't use "new" on an array class - (?) limit dimensions in multi-array creation - local variable load/store register values must be in valid range v3 4.11.1.2 - branches must be within the bounds of the code array - targets of all control-flow instructions are the start of an instruction - register accesses fall within range of allocated registers - (N/A) access to constant pool must be of appropriate type - code does not end in the middle of an instruction - execution cannot fall off the end of the code - (earlier) for each exception handler, the "try" area must begin and end at the start of an instruction (end can be at the end of the code) - (earlier) for each exception handler, the handler must start at a valid instruction 执行指令的静态检查。 */ static bool verifyInstructions(VerifierData* vdata) { const Method* meth = vdata->method; const DvmDex* pDvmDex = meth->clazz->pDvmDex; InsnFlags* insnFlags = vdata->insnFlags; const u2* insns = meth->insns; unsigned int codeOffset; /* the start of the method is a "branch target" */ /* 方法起始是一个分支目标 */ dvmInsnSetBranchTarget(insnFlags, 0, true); for (codeOffset = 0; codeOffset < vdata->insnsSize; /**/) { /* Pull the instruction apart. 将指令分开。 */ int width = dvmInsnGetWidth(insnFlags, codeOffset); DecodedInstruction decInsn; bool okay = true; dexDecodeInstruction(meth->insns + codeOffset, &decInsn); /* Check register, type, class, field, method, and string indices for out-of-range values. Do additional checks on branch targets and some special cases like new-instance and new-array. 检查寄存器,类型,类,域,方法,字符串索引下标越界。 在分支目标上做额外检查和一些特殊的情况,如:new-instance 和 new-array。 */ switch (decInsn.opcode) { case OP_NOP: case OP_RETURN_VOID: /* nothing to check */ /* 不做任何检查 */ break; case OP_MOVE_RESULT: case OP_MOVE_RESULT_OBJECT: case OP_MOVE_EXCEPTION: case OP_RETURN: case OP_RETURN_OBJECT: case OP_CONST_4: case OP_CONST_16: case OP_CONST: case OP_CONST_HIGH16: case OP_MONITOR_ENTER: case OP_MONITOR_EXIT: case OP_THROW: okay &= checkRegisterIndex(meth, decInsn.vA); break; case OP_MOVE_RESULT_WIDE: case OP_RETURN_WIDE: case OP_CONST_WIDE_16: case OP_CONST_WIDE_32: case OP_CONST_WIDE: case OP_CONST_WIDE_HIGH16: okay &= checkWideRegisterIndex(meth, decInsn.vA); break; case OP_GOTO: case OP_GOTO_16: okay &= checkBranchTarget(meth, insnFlags, codeOffset, false); break; case OP_GOTO_32: okay &= checkBranchTarget(meth, insnFlags, codeOffset, true); break; case OP_MOVE: case OP_MOVE_FROM16: case OP_MOVE_16: case OP_MOVE_OBJECT: case OP_MOVE_OBJECT_FROM16: case OP_MOVE_OBJECT_16: case OP_ARRAY_LENGTH: case OP_NEG_INT: case OP_NOT_INT: case OP_NEG_FLOAT: case OP_INT_TO_FLOAT: case OP_FLOAT_TO_INT: case OP_INT_TO_BYTE: case OP_INT_TO_CHAR: case OP_INT_TO_SHORT: case OP_ADD_INT_2ADDR: case OP_SUB_INT_2ADDR: case OP_MUL_INT_2ADDR: case OP_DIV_INT_2ADDR: case OP_REM_INT_2ADDR: case OP_AND_INT_2ADDR: case OP_OR_INT_2ADDR: case OP_XOR_INT_2ADDR: case OP_SHL_INT_2ADDR: case OP_SHR_INT_2ADDR: case OP_USHR_INT_2ADDR: case OP_ADD_FLOAT_2ADDR: case OP_SUB_FLOAT_2ADDR: case OP_MUL_FLOAT_2ADDR: case OP_DIV_FLOAT_2ADDR: case OP_REM_FLOAT_2ADDR: case OP_ADD_INT_LIT16: case OP_RSUB_INT: case OP_MUL_INT_LIT16: case OP_DIV_INT_LIT16: case OP_REM_INT_LIT16: case OP_AND_INT_LIT16: case OP_OR_INT_LIT16: case OP_XOR_INT_LIT16: case OP_ADD_INT_LIT8: case OP_RSUB_INT_LIT8: case OP_MUL_INT_LIT8: case OP_DIV_INT_LIT8: case OP_REM_INT_LIT8: case OP_AND_INT_LIT8: case OP_OR_INT_LIT8: case OP_XOR_INT_LIT8: case OP_SHL_INT_LIT8: case OP_SHR_INT_LIT8: case OP_USHR_INT_LIT8: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); break; case OP_INT_TO_LONG: case OP_INT_TO_DOUBLE: case OP_FLOAT_TO_LONG: case OP_FLOAT_TO_DOUBLE: case OP_SHL_LONG_2ADDR: case OP_SHR_LONG_2ADDR: case OP_USHR_LONG_2ADDR: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); break; case OP_LONG_TO_INT: case OP_LONG_TO_FLOAT: case OP_DOUBLE_TO_INT: case OP_DOUBLE_TO_FLOAT: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkWideRegisterIndex(meth, decInsn.vB); break; case OP_MOVE_WIDE: case OP_MOVE_WIDE_FROM16: case OP_MOVE_WIDE_16: case OP_DOUBLE_TO_LONG: case OP_LONG_TO_DOUBLE: case OP_NEG_DOUBLE: case OP_NEG_LONG: case OP_NOT_LONG: case OP_ADD_LONG_2ADDR: case OP_SUB_LONG_2ADDR: case OP_MUL_LONG_2ADDR: case OP_DIV_LONG_2ADDR: case OP_REM_LONG_2ADDR: case OP_AND_LONG_2ADDR: case OP_OR_LONG_2ADDR: case OP_XOR_LONG_2ADDR: case OP_ADD_DOUBLE_2ADDR: case OP_SUB_DOUBLE_2ADDR: case OP_MUL_DOUBLE_2ADDR: case OP_DIV_DOUBLE_2ADDR: case OP_REM_DOUBLE_2ADDR: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkWideRegisterIndex(meth, decInsn.vB); break; case OP_CONST_STRING: case OP_CONST_STRING_JUMBO: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkStringIndex(pDvmDex, decInsn.vB); break; case OP_CONST_CLASS: case OP_CHECK_CAST: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkTypeIndex(pDvmDex, decInsn.vB); break; case OP_INSTANCE_OF: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkTypeIndex(pDvmDex, decInsn.vC); break; case OP_NEW_INSTANCE: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkNewInstance(pDvmDex, decInsn.vB); break; case OP_NEW_ARRAY: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkNewArray(pDvmDex, decInsn.vC); break; case OP_FILL_ARRAY_DATA: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkArrayData(meth, codeOffset); break; case OP_PACKED_SWITCH: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkSwitchTargets(meth, insnFlags, codeOffset); break; case OP_SPARSE_SWITCH: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkSwitchTargets(meth, insnFlags, codeOffset); break; case OP_CMPL_FLOAT: case OP_CMPG_FLOAT: case OP_AGET: case OP_AGET_OBJECT: case OP_AGET_BOOLEAN: case OP_AGET_BYTE: case OP_AGET_CHAR: case OP_AGET_SHORT: case OP_APUT: case OP_APUT_OBJECT: case OP_APUT_BOOLEAN: case OP_APUT_BYTE: case OP_APUT_CHAR: case OP_APUT_SHORT: case OP_ADD_INT: case OP_SUB_INT: case OP_MUL_INT: case OP_DIV_INT: case OP_REM_INT: case OP_AND_INT: case OP_OR_INT: case OP_XOR_INT: case OP_SHL_INT: case OP_SHR_INT: case OP_USHR_INT: case OP_ADD_FLOAT: case OP_SUB_FLOAT: case OP_MUL_FLOAT: case OP_DIV_FLOAT: case OP_REM_FLOAT: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkRegisterIndex(meth, decInsn.vC); break; case OP_AGET_WIDE: case OP_APUT_WIDE: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkRegisterIndex(meth, decInsn.vC); break; case OP_CMPL_DOUBLE: case OP_CMPG_DOUBLE: case OP_CMP_LONG: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkWideRegisterIndex(meth, decInsn.vB); okay &= checkWideRegisterIndex(meth, decInsn.vC); break; case OP_ADD_DOUBLE: case OP_SUB_DOUBLE: case OP_MUL_DOUBLE: case OP_DIV_DOUBLE: case OP_REM_DOUBLE: case OP_ADD_LONG: case OP_SUB_LONG: case OP_MUL_LONG: case OP_DIV_LONG: case OP_REM_LONG: case OP_AND_LONG: case OP_OR_LONG: case OP_XOR_LONG: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkWideRegisterIndex(meth, decInsn.vB); okay &= checkWideRegisterIndex(meth, decInsn.vC); break; case OP_SHL_LONG: case OP_SHR_LONG: case OP_USHR_LONG: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkWideRegisterIndex(meth, decInsn.vB); okay &= checkRegisterIndex(meth, decInsn.vC); break; case OP_IF_EQ: case OP_IF_NE: case OP_IF_LT: case OP_IF_GE: case OP_IF_GT: case OP_IF_LE: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkBranchTarget(meth, insnFlags, codeOffset, false); break; case OP_IF_EQZ: case OP_IF_NEZ: case OP_IF_LTZ: case OP_IF_GEZ: case OP_IF_GTZ: case OP_IF_LEZ: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkBranchTarget(meth, insnFlags, codeOffset, false); break; case OP_IGET: case OP_IGET_OBJECT: case OP_IGET_BOOLEAN: case OP_IGET_BYTE: case OP_IGET_CHAR: case OP_IGET_SHORT: case OP_IPUT: case OP_IPUT_OBJECT: case OP_IPUT_BOOLEAN: case OP_IPUT_BYTE: case OP_IPUT_CHAR: case OP_IPUT_SHORT: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkFieldIndex(pDvmDex, decInsn.vC); break; case OP_IGET_WIDE: case OP_IPUT_WIDE: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkRegisterIndex(meth, decInsn.vB); okay &= checkFieldIndex(pDvmDex, decInsn.vC); break; case OP_SGET: case OP_SGET_OBJECT: case OP_SGET_BOOLEAN: case OP_SGET_BYTE: case OP_SGET_CHAR: case OP_SGET_SHORT: case OP_SPUT: case OP_SPUT_OBJECT: case OP_SPUT_BOOLEAN: case OP_SPUT_BYTE: case OP_SPUT_CHAR: case OP_SPUT_SHORT: okay &= checkRegisterIndex(meth, decInsn.vA); okay &= checkFieldIndex(pDvmDex, decInsn.vB); break; case OP_SGET_WIDE: case OP_SPUT_WIDE: okay &= checkWideRegisterIndex(meth, decInsn.vA); okay &= checkFieldIndex(pDvmDex, decInsn.vB); break; case OP_FILLED_NEW_ARRAY: /* decoder uses B, not C, for type ref */ okay &= checkTypeIndex(pDvmDex, decInsn.vB); okay &= checkVarargRegs(meth, &decInsn); break; case OP_FILLED_NEW_ARRAY_RANGE: okay &= checkTypeIndex(pDvmDex, decInsn.vB); okay &= checkVarargRangeRegs(meth, &decInsn); break; case OP_INVOKE_VIRTUAL: case OP_INVOKE_SUPER: case OP_INVOKE_DIRECT: case OP_INVOKE_STATIC: case OP_INVOKE_INTERFACE: /* decoder uses B, not C, for type ref */ okay &= checkMethodIndex(pDvmDex, decInsn.vB); okay &= checkVarargRegs(meth, &decInsn); break; case OP_INVOKE_VIRTUAL_RANGE: case OP_INVOKE_SUPER_RANGE: case OP_INVOKE_DIRECT_RANGE: case OP_INVOKE_STATIC_RANGE: case OP_INVOKE_INTERFACE_RANGE: okay &= checkMethodIndex(pDvmDex, decInsn.vB); okay &= checkVarargRangeRegs(meth, &decInsn); break; /* verifier/optimizer output; we should never see these */ case OP_IGET_VOLATILE: case OP_IPUT_VOLATILE: case OP_SGET_VOLATILE: case OP_SPUT_VOLATILE: case OP_IGET_OBJECT_VOLATILE: case OP_IPUT_OBJECT_VOLATILE: case OP_SGET_OBJECT_VOLATILE: case OP_SPUT_OBJECT_VOLATILE: case OP_IGET_WIDE_VOLATILE: case OP_IPUT_WIDE_VOLATILE: case OP_SGET_WIDE_VOLATILE: case OP_SPUT_WIDE_VOLATILE: case OP_BREAKPOINT: case OP_THROW_VERIFICATION_ERROR: case OP_EXECUTE_INLINE: case OP_EXECUTE_INLINE_RANGE: case OP_INVOKE_OBJECT_INIT_RANGE: case OP_RETURN_VOID_BARRIER: case OP_IGET_QUICK: case OP_IGET_WIDE_QUICK: case OP_IGET_OBJECT_QUICK: case OP_IPUT_QUICK: case OP_IPUT_WIDE_QUICK: case OP_IPUT_OBJECT_QUICK: case OP_INVOKE_VIRTUAL_QUICK: case OP_INVOKE_VIRTUAL_QUICK_RANGE: case OP_INVOKE_SUPER_QUICK: case OP_INVOKE_SUPER_QUICK_RANGE: case OP_UNUSED_3E: case OP_UNUSED_3F: case OP_UNUSED_40: case OP_UNUSED_41: case OP_UNUSED_42: case OP_UNUSED_43: case OP_UNUSED_73: case OP_UNUSED_79: case OP_UNUSED_7A: case OP_UNUSED_FF: ALOGE("VFY: unexpected opcode %04x", decInsn.opcode); okay = false; break; /* * DO NOT add a "default" clause here. Without it the compiler will * complain if an instruction is missing (which is desirable). */ } if (!okay) { LOG_VFY_METH(meth, "VFY: rejecting opcode 0x%02x at 0x%04x", decInsn.opcode, codeOffset); return false; } OpcodeFlags opFlags = dexGetFlagsFromOpcode(decInsn.opcode); if ((opFlags & VERIFY_GC_INST_MASK) != 0) { /* * This instruction is a GC point. If space is a concern, * the set of GC points could be reduced by eliminating * foward branches. * * TODO: we could also scan the targets of a "switch" statement, * and if none of them branch backward we could ignore that * instruction as well. */ dvmInsnSetGcPoint(insnFlags, codeOffset, true); } assert(width > 0); codeOffset += width; insns += width; } /* make sure the last instruction ends at the end of the insn area */ if (codeOffset != vdata->insnsSize) { LOG_VFY_METH(meth, "VFY: code did not end when expected (end at %d, count %d)", codeOffset, vdata->insnsSize); return false; } return true; }
/* * Process a single instruction. * * Returns "false" if something goes fatally wrong. */ static bool processInstruction(VerifierData* vdata, u4 insnIdx, BitVector* workBits) { const Method* meth = vdata->method; const u2* insns = meth->insns + insnIdx; DecodedInstruction decInsn; dexDecodeInstruction(insns, &decInsn); /* * Add registers to the "GEN" or "KILL" sets. We want to do KILL * before GEN to handle cases where the source and destination * register is the same. */ switch (decInsn.opcode) { case OP_NOP: case OP_RETURN_VOID: case OP_GOTO: case OP_GOTO_16: case OP_GOTO_32: /* no registers are used */ break; case OP_RETURN: case OP_RETURN_OBJECT: case OP_MONITOR_ENTER: case OP_MONITOR_EXIT: case OP_CHECK_CAST: case OP_THROW: case OP_PACKED_SWITCH: case OP_SPARSE_SWITCH: case OP_FILL_ARRAY_DATA: case OP_IF_EQZ: case OP_IF_NEZ: case OP_IF_LTZ: case OP_IF_GEZ: case OP_IF_GTZ: case OP_IF_LEZ: case OP_SPUT: case OP_SPUT_BOOLEAN: case OP_SPUT_BYTE: case OP_SPUT_CHAR: case OP_SPUT_SHORT: case OP_SPUT_OBJECT: /* action <- vA */ GEN(workBits, decInsn.vA); break; case OP_RETURN_WIDE: case OP_SPUT_WIDE: /* action <- vA(wide) */ GENW(workBits, decInsn.vA); break; case OP_IF_EQ: case OP_IF_NE: case OP_IF_LT: case OP_IF_GE: case OP_IF_GT: case OP_IF_LE: case OP_IPUT: case OP_IPUT_BOOLEAN: case OP_IPUT_BYTE: case OP_IPUT_CHAR: case OP_IPUT_SHORT: case OP_IPUT_OBJECT: /* action <- vA, vB */ GEN(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_IPUT_WIDE: /* action <- vA(wide), vB */ GENW(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_APUT: case OP_APUT_BOOLEAN: case OP_APUT_BYTE: case OP_APUT_CHAR: case OP_APUT_SHORT: case OP_APUT_OBJECT: /* action <- vA, vB, vC */ GEN(workBits, decInsn.vA); GEN(workBits, decInsn.vB); GEN(workBits, decInsn.vC); break; case OP_APUT_WIDE: /* action <- vA(wide), vB, vC */ GENW(workBits, decInsn.vA); GEN(workBits, decInsn.vB); GEN(workBits, decInsn.vC); break; case OP_FILLED_NEW_ARRAY: case OP_INVOKE_VIRTUAL: case OP_INVOKE_SUPER: case OP_INVOKE_DIRECT: case OP_INVOKE_STATIC: case OP_INVOKE_INTERFACE: /* action <- vararg */ { unsigned int idx; for (idx = 0; idx < decInsn.vA; idx++) { GEN(workBits, decInsn.arg[idx]); } } break; case OP_FILLED_NEW_ARRAY_RANGE: case OP_INVOKE_VIRTUAL_RANGE: case OP_INVOKE_SUPER_RANGE: case OP_INVOKE_DIRECT_RANGE: case OP_INVOKE_STATIC_RANGE: case OP_INVOKE_INTERFACE_RANGE: /* action <- vararg/range */ { unsigned int idx; for (idx = 0; idx < decInsn.vA; idx++) { GEN(workBits, decInsn.vC + idx); } } break; case OP_MOVE_RESULT: case OP_MOVE_RESULT_WIDE: case OP_MOVE_RESULT_OBJECT: case OP_MOVE_EXCEPTION: case OP_CONST_4: case OP_CONST_16: case OP_CONST: case OP_CONST_HIGH16: case OP_CONST_STRING: case OP_CONST_STRING_JUMBO: case OP_CONST_CLASS: case OP_NEW_INSTANCE: case OP_SGET: case OP_SGET_BOOLEAN: case OP_SGET_BYTE: case OP_SGET_CHAR: case OP_SGET_SHORT: case OP_SGET_OBJECT: /* vA <- value */ KILL(workBits, decInsn.vA); break; case OP_CONST_WIDE_16: case OP_CONST_WIDE_32: case OP_CONST_WIDE: case OP_CONST_WIDE_HIGH16: case OP_SGET_WIDE: /* vA(wide) <- value */ KILLW(workBits, decInsn.vA); break; case OP_MOVE: case OP_MOVE_FROM16: case OP_MOVE_16: case OP_MOVE_OBJECT: case OP_MOVE_OBJECT_FROM16: case OP_MOVE_OBJECT_16: case OP_INSTANCE_OF: case OP_ARRAY_LENGTH: case OP_NEW_ARRAY: case OP_IGET: case OP_IGET_BOOLEAN: case OP_IGET_BYTE: case OP_IGET_CHAR: case OP_IGET_SHORT: case OP_IGET_OBJECT: case OP_NEG_INT: case OP_NOT_INT: case OP_NEG_FLOAT: case OP_INT_TO_FLOAT: case OP_FLOAT_TO_INT: case OP_INT_TO_BYTE: case OP_INT_TO_CHAR: case OP_INT_TO_SHORT: case OP_ADD_INT_LIT16: case OP_RSUB_INT: case OP_MUL_INT_LIT16: case OP_DIV_INT_LIT16: case OP_REM_INT_LIT16: case OP_AND_INT_LIT16: case OP_OR_INT_LIT16: case OP_XOR_INT_LIT16: case OP_ADD_INT_LIT8: case OP_RSUB_INT_LIT8: case OP_MUL_INT_LIT8: case OP_DIV_INT_LIT8: case OP_REM_INT_LIT8: case OP_SHL_INT_LIT8: case OP_SHR_INT_LIT8: case OP_USHR_INT_LIT8: case OP_AND_INT_LIT8: case OP_OR_INT_LIT8: case OP_XOR_INT_LIT8: /* vA <- vB */ KILL(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_IGET_WIDE: case OP_INT_TO_LONG: case OP_INT_TO_DOUBLE: case OP_FLOAT_TO_LONG: case OP_FLOAT_TO_DOUBLE: /* vA(wide) <- vB */ KILLW(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_LONG_TO_INT: case OP_LONG_TO_FLOAT: case OP_DOUBLE_TO_INT: case OP_DOUBLE_TO_FLOAT: /* vA <- vB(wide) */ KILL(workBits, decInsn.vA); GENW(workBits, decInsn.vB); break; case OP_MOVE_WIDE: case OP_MOVE_WIDE_FROM16: case OP_MOVE_WIDE_16: case OP_NEG_LONG: case OP_NOT_LONG: case OP_NEG_DOUBLE: case OP_LONG_TO_DOUBLE: case OP_DOUBLE_TO_LONG: /* vA(wide) <- vB(wide) */ KILLW(workBits, decInsn.vA); GENW(workBits, decInsn.vB); break; case OP_CMPL_FLOAT: case OP_CMPG_FLOAT: case OP_AGET: case OP_AGET_BOOLEAN: case OP_AGET_BYTE: case OP_AGET_CHAR: case OP_AGET_SHORT: case OP_AGET_OBJECT: case OP_ADD_INT: case OP_SUB_INT: case OP_MUL_INT: case OP_REM_INT: case OP_DIV_INT: case OP_AND_INT: case OP_OR_INT: case OP_XOR_INT: case OP_SHL_INT: case OP_SHR_INT: case OP_USHR_INT: case OP_ADD_FLOAT: case OP_SUB_FLOAT: case OP_MUL_FLOAT: case OP_DIV_FLOAT: case OP_REM_FLOAT: /* vA <- vB, vC */ KILL(workBits, decInsn.vA); GEN(workBits, decInsn.vB); GEN(workBits, decInsn.vC); break; case OP_AGET_WIDE: /* vA(wide) <- vB, vC */ KILLW(workBits, decInsn.vA); GEN(workBits, decInsn.vB); GEN(workBits, decInsn.vC); break; case OP_CMPL_DOUBLE: case OP_CMPG_DOUBLE: case OP_CMP_LONG: /* vA <- vB(wide), vC(wide) */ KILL(workBits, decInsn.vA); GENW(workBits, decInsn.vB); GENW(workBits, decInsn.vC); break; case OP_SHL_LONG: case OP_SHR_LONG: case OP_USHR_LONG: /* vA(wide) <- vB(wide), vC */ KILLW(workBits, decInsn.vA); GENW(workBits, decInsn.vB); GEN(workBits, decInsn.vC); break; case OP_ADD_LONG: case OP_SUB_LONG: case OP_MUL_LONG: case OP_DIV_LONG: case OP_REM_LONG: case OP_AND_LONG: case OP_OR_LONG: case OP_XOR_LONG: case OP_ADD_DOUBLE: case OP_SUB_DOUBLE: case OP_MUL_DOUBLE: case OP_DIV_DOUBLE: case OP_REM_DOUBLE: /* vA(wide) <- vB(wide), vC(wide) */ KILLW(workBits, decInsn.vA); GENW(workBits, decInsn.vB); GENW(workBits, decInsn.vC); break; case OP_ADD_INT_2ADDR: case OP_SUB_INT_2ADDR: case OP_MUL_INT_2ADDR: case OP_REM_INT_2ADDR: case OP_SHL_INT_2ADDR: case OP_SHR_INT_2ADDR: case OP_USHR_INT_2ADDR: case OP_AND_INT_2ADDR: case OP_OR_INT_2ADDR: case OP_XOR_INT_2ADDR: case OP_DIV_INT_2ADDR: /* vA <- vA, vB */ /* KILL(workBits, decInsn.vA); */ GEN(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_SHL_LONG_2ADDR: case OP_SHR_LONG_2ADDR: case OP_USHR_LONG_2ADDR: /* vA(wide) <- vA(wide), vB */ /* KILLW(workBits, decInsn.vA); */ GENW(workBits, decInsn.vA); GEN(workBits, decInsn.vB); break; case OP_ADD_LONG_2ADDR: case OP_SUB_LONG_2ADDR: case OP_MUL_LONG_2ADDR: case OP_DIV_LONG_2ADDR: case OP_REM_LONG_2ADDR: case OP_AND_LONG_2ADDR: case OP_OR_LONG_2ADDR: case OP_XOR_LONG_2ADDR: case OP_ADD_FLOAT_2ADDR: case OP_SUB_FLOAT_2ADDR: case OP_MUL_FLOAT_2ADDR: case OP_DIV_FLOAT_2ADDR: case OP_REM_FLOAT_2ADDR: case OP_ADD_DOUBLE_2ADDR: case OP_SUB_DOUBLE_2ADDR: case OP_MUL_DOUBLE_2ADDR: case OP_DIV_DOUBLE_2ADDR: case OP_REM_DOUBLE_2ADDR: /* vA(wide) <- vA(wide), vB(wide) */ /* KILLW(workBits, decInsn.vA); */ GENW(workBits, decInsn.vA); GENW(workBits, decInsn.vB); break; /* we will only see this if liveness analysis is done after general vfy */ case OP_THROW_VERIFICATION_ERROR: /* no registers used */ break; /* quickened instructions, not expected to appear */ case OP_EXECUTE_INLINE: case OP_EXECUTE_INLINE_RANGE: case OP_IGET_QUICK: case OP_IGET_WIDE_QUICK: case OP_IGET_OBJECT_QUICK: case OP_IPUT_QUICK: case OP_IPUT_WIDE_QUICK: case OP_IPUT_OBJECT_QUICK: case OP_INVOKE_VIRTUAL_QUICK: case OP_INVOKE_VIRTUAL_QUICK_RANGE: case OP_INVOKE_SUPER_QUICK: case OP_INVOKE_SUPER_QUICK_RANGE: /* fall through to failure */ /* correctness fixes, not expected to appear */ case OP_INVOKE_OBJECT_INIT_RANGE: case OP_RETURN_VOID_BARRIER: case OP_SPUT_VOLATILE: case OP_SPUT_OBJECT_VOLATILE: case OP_SPUT_WIDE_VOLATILE: case OP_IPUT_VOLATILE: case OP_IPUT_OBJECT_VOLATILE: case OP_IPUT_WIDE_VOLATILE: case OP_SGET_VOLATILE: case OP_SGET_OBJECT_VOLATILE: case OP_SGET_WIDE_VOLATILE: case OP_IGET_VOLATILE: case OP_IGET_OBJECT_VOLATILE: case OP_IGET_WIDE_VOLATILE: /* fall through to failure */ /* these should never appear during verification */ case OP_UNUSED_3E: case OP_UNUSED_3F: case OP_UNUSED_40: case OP_UNUSED_41: case OP_UNUSED_42: case OP_UNUSED_43: case OP_UNUSED_73: case OP_UNUSED_79: case OP_UNUSED_7A: case OP_BREAKPOINT: case OP_UNUSED_FF: return false; } return true; }
static void inlineGetter(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, bool isPredicted, bool isRange) { BasicBlock *moveResultBB = invokeBB->fallThrough; MIR *moveResultMIR = moveResultBB->firstMIRInsn; MIR *newGetterMIR = dvmCompilerNew(sizeof(MIR), true); DecodedInstruction getterInsn; dexDecodeInstruction(gDvm.instrFormat, calleeMethod->insns, &getterInsn); if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &getterInsn)) return; /* * Some getters (especially invoked through interface) are not followed * by a move result. */ if ((moveResultMIR == NULL) || (moveResultMIR->dalvikInsn.opCode != OP_MOVE_RESULT && moveResultMIR->dalvikInsn.opCode != OP_MOVE_RESULT_OBJECT && moveResultMIR->dalvikInsn.opCode != OP_MOVE_RESULT_WIDE)) { return; } int dfFlags = dvmCompilerDataFlowAttributes[getterInsn.opCode]; /* Expecting vA to be the destination register */ if (dfFlags & (DF_UA | DF_UA_WIDE)) { LOGE("opcode %d has DF_UA set (not expected)", getterInsn.opCode); dvmAbort(); } if (dfFlags & DF_UB) { getterInsn.vB = convertRegId(&invokeMIR->dalvikInsn, calleeMethod, getterInsn.vB, isRange); } if (dfFlags & DF_UC) { getterInsn.vC = convertRegId(&invokeMIR->dalvikInsn, calleeMethod, getterInsn.vC, isRange); } getterInsn.vA = moveResultMIR->dalvikInsn.vA; /* Now setup the Dalvik instruction with converted src/dst registers */ newGetterMIR->dalvikInsn = getterInsn; newGetterMIR->width = gDvm.instrWidth[getterInsn.opCode]; newGetterMIR->OptimizationFlags |= MIR_CALLEE; /* * If the getter instruction is about to raise any exception, punt to the * interpreter and re-execute the invoke. */ newGetterMIR->offset = invokeMIR->offset; newGetterMIR->meta.calleeMethod = calleeMethod; dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, newGetterMIR); if (isPredicted) { MIR *invokeMIRSlow = dvmCompilerNew(sizeof(MIR), true); *invokeMIRSlow = *invokeMIR; invokeMIR->dalvikInsn.opCode = kMirOpCheckInlinePrediction; /* Use vC to denote the first argument (ie this) */ if (!isRange) { invokeMIR->dalvikInsn.vC = invokeMIRSlow->dalvikInsn.arg[0]; } moveResultMIR->OptimizationFlags |= MIR_INLINED_PRED; dvmCompilerInsertMIRAfter(invokeBB, newGetterMIR, invokeMIRSlow); invokeMIRSlow->OptimizationFlags |= MIR_INLINED_PRED; #if defined(WITH_JIT_TUNING) gDvmJit.invokePolyGetterInlined++; #endif } else { invokeMIR->OptimizationFlags |= MIR_INLINED; moveResultMIR->OptimizationFlags |= MIR_INLINED; #if defined(WITH_JIT_TUNING) gDvmJit.invokeMonoGetterInlined++; #endif } return; }
static void inlineSetter(CompilationUnit *cUnit, const Method *calleeMethod, MIR *invokeMIR, BasicBlock *invokeBB, bool isPredicted, bool isRange) { MIR *newSetterMIR = dvmCompilerNew(sizeof(MIR), true); DecodedInstruction setterInsn; dexDecodeInstruction(gDvm.instrFormat, calleeMethod->insns, &setterInsn); if (!dvmCompilerCanIncludeThisInstruction(calleeMethod, &setterInsn)) return; int dfFlags = dvmCompilerDataFlowAttributes[setterInsn.opCode]; if (dfFlags & (DF_UA | DF_UA_WIDE)) { setterInsn.vA = convertRegId(&invokeMIR->dalvikInsn, calleeMethod, setterInsn.vA, isRange); } if (dfFlags & DF_UB) { setterInsn.vB = convertRegId(&invokeMIR->dalvikInsn, calleeMethod, setterInsn.vB, isRange); } if (dfFlags & DF_UC) { setterInsn.vC = convertRegId(&invokeMIR->dalvikInsn, calleeMethod, setterInsn.vC, isRange); } /* Now setup the Dalvik instruction with converted src/dst registers */ newSetterMIR->dalvikInsn = setterInsn; newSetterMIR->width = gDvm.instrWidth[setterInsn.opCode]; newSetterMIR->OptimizationFlags |= MIR_CALLEE; /* * If the setter instruction is about to raise any exception, punt to the * interpreter and re-execute the invoke. */ newSetterMIR->offset = invokeMIR->offset; newSetterMIR->meta.calleeMethod = calleeMethod; dvmCompilerInsertMIRAfter(invokeBB, invokeMIR, newSetterMIR); if (isPredicted) { MIR *invokeMIRSlow = dvmCompilerNew(sizeof(MIR), true); *invokeMIRSlow = *invokeMIR; invokeMIR->dalvikInsn.opCode = kMirOpCheckInlinePrediction; /* Use vC to denote the first argument (ie this) */ if (!isRange) { invokeMIR->dalvikInsn.vC = invokeMIRSlow->dalvikInsn.arg[0]; } dvmCompilerInsertMIRAfter(invokeBB, newSetterMIR, invokeMIRSlow); invokeMIRSlow->OptimizationFlags |= MIR_INLINED_PRED; #if defined(WITH_JIT_TUNING) gDvmJit.invokePolySetterInlined++; #endif } else { /* * The invoke becomes no-op so it needs an explicit branch to jump to * the chaining cell. */ invokeBB->needFallThroughBranch = true; invokeMIR->OptimizationFlags |= MIR_INLINED; #if defined(WITH_JIT_TUNING) gDvmJit.invokeMonoSetterInlined++; #endif } return; }
/* * Decode the current instruction. */ static void decodeInstruction(const Method* meth, int insnIdx, DecodedInstruction* pDecInsn) { dexDecodeInstruction(gDvm.instrFormat, meth->insns + insnIdx, pDecInsn); }