예제 #1
0
/*
 * Compute the width of the instruction at each address in the instruction
 * stream.  Addresses that are in the middle of an instruction, or that
 * are part of switch table data, are not set (so the caller should probably
 * initialize "insnFlags" to zero).
 *
 * If "pNewInstanceCount" is not NULL, it will be set to the number of
 * new-instance instructions in the method.
 *
 * Performs some static checks, notably:
 * - opcode of first instruction begins at index 0
 * - only documented instructions may appear
 * - each instruction follows the last
 * - last byte of last instruction is at (code_length-1)
 *
 * Logs an error and returns "false" on failure.
 */
bool dvmComputeCodeWidths(const Method* meth, InsnFlags* insnFlags,
    int* pNewInstanceCount)
{
    size_t insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns;
    bool result = false;
    int newInstanceCount = 0;
    int i;


    for (i = 0; i < (int) insnCount; /**/) {
        size_t width = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, insns);
        if (width == 0) {
            LOG_VFY_METH(meth,
                "VFY: invalid post-opt instruction (0x%04x)\n", *insns);
            goto bail;
        }

        if ((*insns & 0xff) == OP_NEW_INSTANCE)
            newInstanceCount++;

        if (width > 65535) {
            LOG_VFY_METH(meth, "VFY: insane width %d\n", width);
            goto bail;
        }

        insnFlags[i] |= width;
        i += width;
        insns += width;
    }
    if (i != (int) dvmGetMethodInsnsSize(meth)) {
        LOG_VFY_METH(meth, "VFY: code did not end where expected (%d vs. %d)\n",
            i, dvmGetMethodInsnsSize(meth));
        goto bail;
    }

    result = true;
    if (pNewInstanceCount != NULL)
        *pNewInstanceCount = newInstanceCount;

bail:
    return result;
}
예제 #2
0
/*
 * Verify an array data table.  "curOffset" is the offset of the fill-array-data
 * instruction.
 */
static bool checkArrayData(const Method* meth, int curOffset)
{
    const int insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns + curOffset;
    const u2* arrayData;
    int valueCount, valueWidth, tableSize;
    int offsetToArrayData;

    assert(curOffset >= 0 && curOffset < insnCount);

    /* make sure the start of the array data table is in range */
    offsetToArrayData = insns[1] | (((s4)insns[2]) << 16);
    if (curOffset + offsetToArrayData < 0 ||
        curOffset + offsetToArrayData + 2 >= insnCount)
    {
        LOG_VFY_METH(meth,
            "VFY: invalid array data start: at %d, data offset %d, count %d\n",
            curOffset, offsetToArrayData, insnCount);
        return false;
    }

    /* offset to array data table is a relative branch-style offset */
    arrayData = insns + offsetToArrayData;

    /* make sure the table is 32-bit aligned */
    if ((((u4) arrayData) & 0x03) != 0) {
        LOG_VFY_METH(meth,
            "VFY: unaligned array data table: at %d, data offset %d\n",
            curOffset, offsetToArrayData);
        return false;
    }

    valueWidth = arrayData[1];
    valueCount = *(u4*)(&arrayData[2]);

    tableSize = 4 + (valueWidth * valueCount + 1) / 2;

    /* make sure the end of the switch is in range */
    if (curOffset + offsetToArrayData + tableSize > insnCount) {
        LOG_VFY_METH(meth,
            "VFY: invalid array data end: at %d, data offset %d, end %d, "
            "count %d\n",
            curOffset, offsetToArrayData, 
            curOffset + offsetToArrayData + tableSize,
            insnCount);
        return false;
    }

    return true;
}
예제 #3
0
/*
Compute the width of the instruction at each address in the instruction
stream, and store it in vdata->insnFlags.  Addresses that are in the
middle of an instruction, or that are part of switch table data, are not
touched (so the caller should probably initialize "insnFlags" to zero).

The "newInstanceCount" and "monitorEnterCount" fields in vdata are
also set.

Performs some static checks, notably:
- opcode of first instruction begins at index 0
- only documented instructions may appear
- each instruction follows the last
- last byte of last instruction is at (code_length-1)

Logs an error and returns "false" on failure.

计算指令流中每个地址上指令的宽度,并且存储在 vdata->insnFlags。在指令中间的地址,
或者部分switch表数据的地址,不做处理(因此调用者应该正确初始化“insnFlags”到0)。

在vdata中的“newInstanceCount”和“monitorEnterCount”域也要设置值。

执行一些静态检查,值得注意的是:
- 第一条指令操作码从索引0开始
- 只有记录过的指令可以出现
- 每个指令跟在最后面	
- 最后一条指令的最后字节是在(code_length-1)

失败时记录错误并返回“false”。
*/
static bool computeWidthsAndCountOps(VerifierData* vdata)
{
    const Method* meth = vdata->method;
    InsnFlags* insnFlags = vdata->insnFlags;
    size_t insnCount = vdata->insnsSize;
    const u2* insns = meth->insns;
    bool result = false;
    int newInstanceCount = 0;
    int monitorEnterCount = 0;
    int i;

    for (i = 0; i < (int) insnCount; /**/) {
        size_t width = dexGetWidthFromInstruction(insns);
        if (width == 0) {
            LOG_VFY_METH(meth, "VFY: invalid instruction (0x%04x)", *insns);
            goto bail;
        } else if (width > 65535) {
            LOG_VFY_METH(meth,
                "VFY: warning: unusually large instr width (%d)", width);
        }
        /* CodeUnit代码单元(对应第一个问题)
        即使dex文件映射在内存和指令一一对应的实际字节码,可以这样说,进过解析转码后的字节码是操作码,
        之前的是代码单元。
        */
        Opcode opcode = dexOpcodeFromCodeUnit(*insns);
        if (opcode == OP_NEW_INSTANCE)
            newInstanceCount++;
        if (opcode == OP_MONITOR_ENTER)
            monitorEnterCount++;

        insnFlags[i] |= width;
        i += width;
        insns += width;
    }
    if (i != (int) vdata->insnsSize) {
        LOG_VFY_METH(meth, "VFY: code did not end where expected (%d vs. %d)",
            i, dvmGetMethodInsnsSize(meth));
        goto bail;
    }

    result = true;
    vdata->newInstanceCount = newInstanceCount;
    vdata->monitorEnterCount = monitorEnterCount;

bail:
    return result;
}
예제 #4
0
/*
Verify that the target of a branch instruction is valid.

We don't expect code to jump directly into an exception handler, but
it's valid to do so as long as the target isn't a "move-exception"
instruction.  We verify that in a later stage.

The VM spec doesn't forbid an instruction from branching to itself,
but the Dalvik spec declares that only certain instructions can do so.

Updates "insnFlags", setting the "branch target" flag.

校验分支指令目标是否有效
*/
static bool checkBranchTarget(const Method* meth, InsnFlags* insnFlags,
    int curOffset, bool selfOkay)
{
    const int insnCount = dvmGetMethodInsnsSize(meth);
    s4 offset, absOffset;
    bool isConditional;

    if (!dvmGetBranchOffset(meth, insnFlags, curOffset, &offset,
            &isConditional))
        return false;

    if (!selfOkay && offset == 0) {
        LOG_VFY_METH(meth, "VFY: branch offset of zero not allowed at %#x",
            curOffset);
        return false;
    }

    /*
    Check for 32-bit overflow.  This isn't strictly necessary if we can
    depend on the VM to have identical "wrap-around" behavior, but
    it's unwise to depend on that.
    
		检查32位溢出。    
    */
    if (((s8) curOffset + (s8) offset) != (s8)(curOffset + offset)) {
        LOG_VFY_METH(meth, "VFY: branch target overflow %#x +%d",
            curOffset, offset);
        return false;
    }
    absOffset = curOffset + offset;
    if (absOffset < 0 || absOffset >= insnCount ||
        !dvmInsnIsOpcode(insnFlags, absOffset))
    {
        LOG_VFY_METH(meth,
            "VFY: invalid branch target %d (-> %#x) at %#x",
            offset, absOffset, curOffset);
        return false;
    }
    dvmInsnSetBranchTarget(insnFlags, absOffset, true);

    return true;
}
예제 #5
0
파일: Stack.cpp 프로젝트: nesl/CAreDroid
/*
 * Extract the object that is the target of a monitor-enter instruction
 * in the top stack frame of "thread".
 *
 * The other thread might be alive, so this has to work carefully.
 *
 * The thread list lock must be held.
 *
 * Returns "true" if we successfully recover the object.  "*pOwner" will
 * be NULL if we can't determine the owner for some reason (e.g. race
 * condition on ownership transfer).
 */
static bool extractMonitorEnterObject(Thread* thread, Object** pLockObj,
    Thread** pOwner)
{
    void* framePtr = thread->interpSave.curFrame;

    if (framePtr == NULL || dvmIsBreakFrame((u4*)framePtr))
        return false;

    const StackSaveArea* saveArea = SAVEAREA_FROM_FP(framePtr);
    const Method* method = saveArea->method;
    const u2* currentPc = saveArea->xtra.currentPc;

    /* check Method* */
    if (!dvmLinearAllocContains(method, sizeof(Method))) {
        ALOGD("ExtrMon: method %p not valid", method);
        return false;
    }

    /* check currentPc */
    u4 insnsSize = dvmGetMethodInsnsSize(method);
    if (currentPc < method->insns ||
        currentPc >= method->insns + insnsSize)
    {
        ALOGD("ExtrMon: insns %p not valid (%p - %p)",
            currentPc, method->insns, method->insns + insnsSize);
        return false;
    }

    /* check the instruction */
    if ((*currentPc & 0xff) != OP_MONITOR_ENTER) {
        ALOGD("ExtrMon: insn at %p is not monitor-enter (0x%02x)",
            currentPc, *currentPc & 0xff);
        return false;
    }

    /* get and check the register index */
    unsigned int reg = *currentPc >> 8;
    if (reg >= method->registersSize) {
        ALOGD("ExtrMon: invalid register %d (max %d)",
            reg, method->registersSize);
        return false;
    }

    /* get and check the object in that register */
    u4* fp = (u4*) framePtr;
    Object* obj = (Object*) fp[reg];
    if (obj != NULL && !dvmIsHeapAddress(obj)) {
        ALOGD("ExtrMon: invalid object %p at %p[%d]", obj, fp, reg);
        return false;
    }
    *pLockObj = obj;

    /*
     * Try to determine the object's lock holder; it's okay if this fails.
     *
     * We're assuming the thread list lock is already held by this thread.
     * If it's not, we may be living dangerously if we have to scan through
     * the thread list to find a match.  (The VM will generally be in a
     * suspended state when executing here, so this is a minor concern
     * unless we're dumping while threads are running, in which case there's
     * a good chance of stuff blowing up anyway.)
     */
    *pOwner = dvmGetObjectLockHolder(obj);

    return true;
}
예제 #6
0
void
hprofFillInStackTrace(void *objectPtr)

{
    DvmHeapChunk *chunk;
    StackTraceEntry stackTraceEntry;
    Thread* self;
    void* fp;
    int i;
    
    if (objectPtr == NULL) {
        return;
    }
    self = dvmThreadSelf();
    if (self == NULL) {
        return;
    }
    fp = self->curFrame;

    /* Serial number to be filled in later. */
    stackTraceEntry.trace.serialNumber = -1;

    /*
     * TODO - The HAT tool doesn't care about thread data, so we can defer
     * actually emitting thread records and assigning thread serial numbers.
     */
    stackTraceEntry.trace.threadSerialNumber = (int) self;

    memset(&stackTraceEntry.trace.frameIds, 0,
            sizeof(stackTraceEntry.trace.frameIds));

    i = 0;
    while ((fp != NULL) && (i < STACK_DEPTH)) {
        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
        const Method* method = saveArea->method;
        StackFrameEntry frame;

        if (!dvmIsBreakFrame(fp)) {
            frame.frame.method = method;
            if (dvmIsNativeMethod(method)) {
                frame.frame.pc = 0; /* no saved PC for native methods */
            } else {
                assert(saveArea->xtra.currentPc >= method->insns &&
                        saveArea->xtra.currentPc <
                        method->insns + dvmGetMethodInsnsSize(method));
                frame.frame.pc = (int) (saveArea->xtra.currentPc -
                        method->insns);
            }

            // Canonicalize the frame and cache it in the hprof context
            stackTraceEntry.trace.frameIds[i++] =
                hprofLookupStackFrameId(&frame);
        }

        assert(fp != saveArea->prevFrame);
        fp = saveArea->prevFrame;
    }

    /* Store the stack trace serial number in the object header */
    chunk = ptr2chunk(objectPtr);
    chunk->stackTraceSerialNumber =
            hprofLookupStackSerialNumber(&stackTraceEntry);
}
예제 #7
0
/*
 * Perform some operations at the "top" of the interpreter loop.
 * This stuff is required to support debugging and profiling.
 *
 * Using" __attribute__((noinline))" seems to do more harm than good.  This
 * is best when inlined due to the large number of parameters, most of
 * which are local vars in the main interp loop.
 */
static void checkDebugAndProf(const u2* pc, const u4* fp, Thread* self,
    const Method* method, bool* pIsMethodEntry)
{
    /* check to see if we've run off end of method */
    assert(pc >= method->insns && pc <
            method->insns + dvmGetMethodInsnsSize(method));

#if 0
    /*
     * When we hit a specific method, enable verbose instruction logging.
     * Sometimes it's helpful to use the debugger attach as a trigger too.
     */
    if (*pIsMethodEntry) {
        static const char* cd = "Landroid/test/Arithmetic;";
        static const char* mn = "shiftTest2";
        static const char* sg = "()V";

        if (/*gDvm.debuggerActive &&*/
            strcmp(method->clazz->descriptor, cd) == 0 &&
            strcmp(method->name, mn) == 0 &&
            strcmp(method->shorty, sg) == 0)
        {
            LOGW("Reached %s.%s, enabling verbose mode\n",
                method->clazz->descriptor, method->name);
            android_setMinPriority(LOG_TAG"i", ANDROID_LOG_VERBOSE);
            dumpRegs(method, fp, true);
        }

        if (!gDvm.debuggerActive)
            *pIsMethodEntry = false;
    }
#endif

    /*
     * If the debugger is attached, check for events.  If the profiler is
     * enabled, update that too.
     *
     * This code is executed for every instruction we interpret, so for
     * performance we use a couple of #ifdef blocks instead of runtime tests.
     */
#ifdef WITH_PROFILER
    /* profiler and probably debugger */
    bool isEntry = *pIsMethodEntry;
    if (isEntry) {
        *pIsMethodEntry = false;
        TRACE_METHOD_ENTER(self, method);
    }
    if (gDvm.debuggerActive) {
        updateDebugger(method, pc, fp, isEntry, self);
    }
    if (gDvm.instructionCountEnableCount != 0) {
        /*
         * Count up the #of executed instructions.  This isn't synchronized
         * for thread-safety; if we need that we should make this
         * thread-local and merge counts into the global area when threads
         * exit (perhaps suspending all other threads GC-style and pulling
         * the data out of them).
         */
        int inst = *pc & 0xff;
        gDvm.executedInstrCounts[inst]++;
    }
#else
    /* debugger only */
    if (gDvm.debuggerActive) {
        bool isEntry = *pIsMethodEntry;
        updateDebugger(method, pc, fp, isEntry, self);
        if (isEntry)
            *pIsMethodEntry = false;
    }
#endif
}
예제 #8
0
/*
Verify a switch table.  "curOffset" is the offset of the switch
instruction.

Updates "insnFlags", setting the "branch target" flag.

校验一个分支table。“curOffset”是分支指令偏移。

更新“insnFlags”,设置“branch target“”标识。
*/
static bool checkSwitchTargets(const Method* meth, InsnFlags* insnFlags,
    u4 curOffset)
{
    const u4 insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns + curOffset;
    const u2* switchInsns;
    u2 expectedSignature;
    u4 switchCount, tableSize;
    s4 offsetToSwitch, offsetToKeys, offsetToTargets;
    s4 offset, absOffset;
    u4 targ;

    assert(curOffset < insnCount);

    /* make sure the start of the switch is in range */
    offsetToSwitch = insns[1] | ((s4) insns[2]) << 16;
    if ((s4) curOffset + offsetToSwitch < 0 ||
        curOffset + offsetToSwitch + 2 >= insnCount)
    {
        LOG_VFY("VFY: invalid switch start: at %d, switch offset %d, "
                "count %d",
            curOffset, offsetToSwitch, insnCount);
        return false;
    }

    /* offset to switch table is a relative branch-style offset */
    switchInsns = insns + offsetToSwitch;

    /* make sure the table is 32-bit aligned */
    if ((((u4) switchInsns) & 0x03) != 0) {
        LOG_VFY("VFY: unaligned switch table: at %d, switch offset %d",
            curOffset, offsetToSwitch);
        return false;
    }

    switchCount = switchInsns[1];

    if ((*insns & 0xff) == OP_PACKED_SWITCH) {
        /* 0=sig, 1=count, 2/3=firstKey */
        offsetToTargets = 4;
        offsetToKeys = -1;
        expectedSignature = kPackedSwitchSignature;
    } else {
        /* 0=sig, 1=count, 2..count*2 = keys */
        offsetToKeys = 2;
        offsetToTargets = 2 + 2*switchCount;
        expectedSignature = kSparseSwitchSignature;
    }
    tableSize = offsetToTargets + switchCount*2;

    if (switchInsns[0] != expectedSignature) {
        LOG_VFY("VFY: wrong signature for switch table (0x%04x, wanted 0x%04x)",
            switchInsns[0], expectedSignature);
        return false;
    }

    /* make sure the end of the switch is in range */
    if (curOffset + offsetToSwitch + tableSize > (u4) insnCount) {
        LOG_VFY("VFY: invalid switch end: at %d, switch offset %d, end %d, "
                "count %d",
            curOffset, offsetToSwitch, curOffset + offsetToSwitch + tableSize,
            insnCount);
        return false;
    }

    /* for a sparse switch, verify the keys are in ascending order */
    if (offsetToKeys > 0 && switchCount > 1) {
        s4 lastKey;

        lastKey = switchInsns[offsetToKeys] |
                  (switchInsns[offsetToKeys+1] << 16);
        for (targ = 1; targ < switchCount; targ++) {
            s4 key = (s4) switchInsns[offsetToKeys + targ*2] |
                    (s4) (switchInsns[offsetToKeys + targ*2 +1] << 16);
            if (key <= lastKey) {
                LOG_VFY("VFY: invalid packed switch: last key=%d, this=%d",
                    lastKey, key);
                return false;
            }

            lastKey = key;
        }
    }

    /* verify each switch target */
    for (targ = 0; targ < switchCount; targ++) {
        offset = (s4) switchInsns[offsetToTargets + targ*2] |
                (s4) (switchInsns[offsetToTargets + targ*2 +1] << 16);
        absOffset = curOffset + offset;

        if (absOffset < 0 || absOffset >= (s4)insnCount ||
            !dvmInsnIsOpcode(insnFlags, absOffset))
        {
            LOG_VFY("VFY: invalid switch target %d (-> %#x) at %#x[%d]",
                offset, absOffset, curOffset, targ);
            return false;
        }
        dvmInsnSetBranchTarget(insnFlags, absOffset, true);
    }

    return true;
}
예제 #9
0
/*
Verify an array data table.  "curOffset" is the offset of the
fill-array-data instruction.

校验一个数组数据表。“curOffset”是fill-array-data指令的偏移。
*/
static bool checkArrayData(const Method* meth, u4 curOffset)
{
    const u4 insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns + curOffset;
    const u2* arrayData;
    u4 valueCount, valueWidth, tableSize;
    s4 offsetToArrayData;

    assert(curOffset < insnCount);

    /* 
    make sure the start of the array data table is in range 
    
    确保数组数据table的起始在范围之内。
    */
    offsetToArrayData = insns[1] | (((s4)insns[2]) << 16);
    if ((s4)curOffset + offsetToArrayData < 0 ||
        curOffset + offsetToArrayData + 2 >= insnCount)
    {
        LOG_VFY("VFY: invalid array data start: at %d, data offset %d, "
                "count %d",
            curOffset, offsetToArrayData, insnCount);
        return false;
    }

    /* 
    offset to array data table is a relative branch-style offset 
    
    数组数据table的偏移是一个相对branch-style偏移
    */
    arrayData = insns + offsetToArrayData;

    /* 
    make sure the table is 32-bit aligned 
    
    确保table是32位对齐。
    */
    if ((((u4) arrayData) & 0x03) != 0) {
        LOG_VFY("VFY: unaligned array data table: at %d, data offset %d",
            curOffset, offsetToArrayData);
        return false;
    }

    valueWidth = arrayData[1];
    valueCount = *(u4*)(&arrayData[2]);

    tableSize = 4 + (valueWidth * valueCount + 1) / 2;

    /* 
    make sure the end of the switch is in range 
    
    确保switch尾部在范围内
    */
    if (curOffset + offsetToArrayData + tableSize > insnCount) {
        LOG_VFY("VFY: invalid array data end: at %d, data offset %d, end %d, "
                "count %d",
            curOffset, offsetToArrayData,
            curOffset + offsetToArrayData + tableSize, insnCount);
        return false;
    }

    return true;
}
예제 #10
0
/*
Perform verification on a single method.

执行单个方法的校验

We do this in three passes:
 (1) Walk through all code units, determining instruction locations,
     widths, and other characteristics.
 (2) Walk through all code units, performing static checks on
     operands.
 (3) Iterate through the method, checking type safety and looking
     for code flow problems.

校验分3次处理:
 (1) 遍历所有代码单元,确定指令位置,宽度,和其它特征。
 (2) 遍历所有代码单元,对操作码执行静态检查。
 (3) 通过方法迭代,检查类型安全并且寻找代码流的问题。

Some checks may be bypassed depending on the verification mode.  We can't
turn this stuff off completely if we want to do "exact" GC.

一些检查可以避免,这取决于校验模式。我们不能完全关闭这些校验,如果想做“精确”的垃圾回收。

TODO: cite source?
Confirmed here:
- code array must not be empty
- (N/A) code_length must be less than 65536
Confirmed by computeWidthsAndCountOps():
- opcode of first instruction begins at index 0
- only documented instructions may appear
- each instruction follows the last
- last byte of last instruction is at (code_length-1)


*/
static bool verifyMethod(Method* meth)
{
    bool result = false;

    /*
    Verifier state blob.  Various values will be cached here so we
    can avoid expensive lookups and pass fewer arguments around.
    
    数据校验器数据结构体。不同的值将被缓存到这里,因此我们可以通过极少的参数进行更有效查找。
    
    可以缓存数据。数据结构参见于CodeVerify.h头文件。
    */
    VerifierData vdata;
#if 1   // ndef NDEBUG
    memset(&vdata, 0x99, sizeof(vdata));
#endif
    
    // 校验器数据初始化
    vdata.method = meth;
    vdata.insnsSize = dvmGetMethodInsnsSize(meth);
    vdata.insnRegCount = meth->registersSize;
    vdata.insnFlags = NULL;
    vdata.uninitMap = NULL;
    vdata.basicBlocks = NULL;

    /*
    If there aren't any instructions, make sure that's expected, then
    exit successfully.  Note: for native methods, meth->insns gets set
    to a native function pointer on first call, so don't use that as
    an indicator.
    
    如果没有任何指令,确保本地方法和抽象方法均可被访问,
    然后成功退出。
    */
    if (vdata.insnsSize == 0) {
        if (!dvmIsNativeMethod(meth) && !dvmIsAbstractMethod(meth)) {
            LOG_VFY_METH(meth,
                "VFY: zero-length code in concrete non-native method");
            goto bail;
        }

        goto success;
    }

    /*
    Sanity-check the register counts.  ins + locals = registers, so make
    sure that ins <= registers.
    
    校验寄存器个数。 指令寄存器ins + 本地寄存器locals = All,因此确保指令寄存器<= All。
    */
    if (meth->insSize > meth->registersSize) {
        LOG_VFY_METH(meth, "VFY: bad register counts (ins=%d regs=%d)",
            meth->insSize, meth->registersSize);
        goto bail;
    }

    /*
    Allocate and populate an array to hold instruction data.
    
    TODO: Consider keeping a reusable pre-allocated array sitting
    around for smaller methods.
    
    给指令分配空间
    */
    vdata.insnFlags = (InsnFlags*) calloc(vdata.insnsSize, sizeof(InsnFlags));
    if (vdata.insnFlags == NULL)
        goto bail;

    /*
    Compute the width of each instruction and store the result in insnFlags.
    Count up the #of occurrences of certain opcodes while we're at it.
    
    计算每条指令宽度并且存储 insnFlags 中的结果。
    */
    if (!computeWidthsAndCountOps(&vdata))
        goto bail;

    /*
    Allocate a map to hold the classes of uninitialized instances.
    
    分配一个map来持有未初始化的实例的类(s)
    */
    vdata.uninitMap = dvmCreateUninitInstanceMap(meth, vdata.insnFlags,
        vdata.newInstanceCount);
    if (vdata.uninitMap == NULL)
        goto bail;

    /*
    Set the "in try" flags for all instructions guarded by a "try" block.
    Also sets the "branch target" flag on exception handlers.
    
    对所有try块指令设置“in try”标识。
    同时在异常句柄处设置“branch target”标识。
    */
    if (!scanTryCatchBlocks(meth, vdata.insnFlags))
        goto bail;

    /*
    Perform static instruction verification.  Also sets the "branch
    target" flags.
    
    执行静态校验。同时设置“brach target”标识。
    */
    if (!verifyInstructions(&vdata))
        goto bail;

    /*
    Do code-flow analysis.
    
    We could probably skip this for a method with no registers, but
    that's so rare that there's little point in checking.
    
    做代码流分析
    
    也许可能会跳过一个没有寄存器的方法,但这种情况不见罕见,没有必要检查。
    */
    if (!dvmVerifyCodeFlow(&vdata)) {
        //ALOGD("+++ %s failed code flow", meth->name);
        goto bail;
    }

success:
    result = true;

bail:
    dvmFreeVfyBasicBlocks(&vdata);
    dvmFreeUninitInstanceMap(vdata.uninitMap);
    free(vdata.insnFlags);
    return result;
}
예제 #11
0
/*
Set the "in try" flags for all instructions protected by "try" statements.
Also sets the "branch target" flags for exception handlers.

Call this after widths have been set in "insnFlags".

Returns "false" if something in the exception table looks fishy, but
we're expecting the exception table to be somewhat sane.

对被“try”声明保护的所有指令的设置"in try"标识。同时为异常句柄设置“branch target(分支目标)”
标识。

在宽度被设置“inFlags”后调用。

返回“false”,如果异常表中出现可疑的东西,我们期望它能有点理智。
*/
static bool scanTryCatchBlocks(const Method* meth, InsnFlags* insnFlags)
{
    u4 insnsSize = dvmGetMethodInsnsSize(meth);
    const DexCode* pCode = dvmGetMethodCode(meth);
    u4 triesSize = pCode->triesSize;
    const DexTry* pTries;
    u4 idx;

    if (triesSize == 0) {
        return true;
    }

    pTries = dexGetTries(pCode);

    for (idx = 0; idx < triesSize; idx++) {
        const DexTry* pTry = &pTries[idx];
        u4 start = pTry->startAddr;
        u4 end = start + pTry->insnCount;
        u4 addr;

        if ((start >= end) || (start >= insnsSize) || (end > insnsSize)) {
            LOG_VFY_METH(meth,
                "VFY: bad exception entry: startAddr=%d endAddr=%d (size=%d)",
                start, end, insnsSize);
            return false;
        }

        if (dvmInsnGetWidth(insnFlags, start) == 0) {
            LOG_VFY_METH(meth,
                "VFY: 'try' block starts inside an instruction (%d)",
                start);
            return false;
        }

        for (addr = start; addr < end;
            addr += dvmInsnGetWidth(insnFlags, addr))
        {
            assert(dvmInsnGetWidth(insnFlags, addr) != 0);
            dvmInsnSetInTry(insnFlags, addr, true);
        }
    }

    /* Iterate over each of the handlers to verify target addresses. */
    u4 handlersSize = dexGetHandlersSize(pCode);
    u4 offset = dexGetFirstHandlerOffset(pCode);
    for (idx = 0; idx < handlersSize; idx++) {
        DexCatchIterator iterator;
        dexCatchIteratorInit(&iterator, pCode, offset);

        for (;;) {
            DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
            u4 addr;

            if (handler == NULL) {
                break;
            }

            addr = handler->address;
            if (dvmInsnGetWidth(insnFlags, addr) == 0) {
                LOG_VFY_METH(meth,
                    "VFY: exception handler starts at bad address (%d)",
                    addr);
                return false;
            }

            dvmInsnSetBranchTarget(insnFlags, addr, true);
        }

        offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
    }

    return true;
}
예제 #12
0
/*
 * Optimize instructions in a method.
 *
 * This does a single pass through the code, examining each instruction.
 *
 * This is not expected to fail if the class was successfully verified.
 * The only significant failure modes on unverified code occur when an
 * "essential" update fails, but we can't generally identify those: if we
 * can't look up a field, we can't know if the field access was supposed
 * to be handled as volatile.
 *
 * Instead, we give it our best effort, and hope for the best.  For 100%
 * reliability, only optimize a class after verification succeeds.
 */
static void optimizeMethod(Method* method, bool essentialOnly)
{
    bool needRetBar, forSmp;
    u4 insnsSize;
    u2* insns;

    if (dvmIsNativeMethod(method) || dvmIsAbstractMethod(method))
        return;

    forSmp = gDvm.dexOptForSmp;
    needRetBar = needsReturnBarrier(method);

    insns = (u2*) method->insns;
    assert(insns != NULL);
    insnsSize = dvmGetMethodInsnsSize(method);

    while (insnsSize > 0) {
        Opcode opc, quickOpc, volatileOpc;
        size_t width;
        bool matched = true;

        opc = dexOpcodeFromCodeUnit(*insns);
        width = dexGetWidthFromInstruction(insns);
        volatileOpc = OP_NOP;

        /*
         * Each instruction may have:
         * - "volatile" replacement
         *   - may be essential or essential-on-SMP
         * - correctness replacement
         *   - may be essential or essential-on-SMP
         * - performance replacement
         *   - always non-essential
         *
         * Replacements are considered in the order shown, and the first
         * match is applied.  For example, iget-wide will convert to
         * iget-wide-volatile rather than iget-wide-quick if the target
         * field is volatile.
         */

        /*
         * essential substitutions:
         *  {iget,iput,sget,sput}-wide --> {op}-wide-volatile
         *  invoke-direct[/range] --> invoke-object-init/range
         *
         * essential-on-SMP substitutions:
         *  {iget,iput,sget,sput}-* --> {op}-volatile
         *  return-void --> return-void-barrier
         *
         * non-essential substitutions:
         *  {iget,iput}-* --> {op}-quick
         *
         * TODO: might be time to merge this with the other two switches
         */
        switch (opc) {
        case OP_IGET:
        case OP_IGET_BOOLEAN:
        case OP_IGET_BYTE:
        case OP_IGET_CHAR:
        case OP_IGET_SHORT:
            quickOpc = OP_IGET_QUICK;
            if (forSmp)
                volatileOpc = OP_IGET_VOLATILE;
            goto rewrite_inst_field;
        case OP_IGET_WIDE:
            quickOpc = OP_IGET_WIDE_QUICK;
            volatileOpc = OP_IGET_WIDE_VOLATILE;
            goto rewrite_inst_field;
        case OP_IGET_OBJECT:
            quickOpc = OP_IGET_OBJECT_QUICK;
            if (forSmp)
                volatileOpc = OP_IGET_OBJECT_VOLATILE;
            goto rewrite_inst_field;
        case OP_IPUT:
        case OP_IPUT_BOOLEAN:
        case OP_IPUT_BYTE:
        case OP_IPUT_CHAR:
        case OP_IPUT_SHORT:
            quickOpc = OP_IPUT_QUICK;
            if (forSmp)
                volatileOpc = OP_IPUT_VOLATILE;
            goto rewrite_inst_field;
        case OP_IPUT_WIDE:
            quickOpc = OP_IPUT_WIDE_QUICK;
            volatileOpc = OP_IPUT_WIDE_VOLATILE;
            goto rewrite_inst_field;
        case OP_IPUT_OBJECT:
            quickOpc = OP_IPUT_OBJECT_QUICK;
            if (forSmp)
                volatileOpc = OP_IPUT_OBJECT_VOLATILE;
            /* fall through */
rewrite_inst_field:
            if (essentialOnly)
                quickOpc = OP_NOP;      /* if essential-only, no "-quick" sub */
            if (quickOpc != OP_NOP || volatileOpc != OP_NOP)
                rewriteInstField(method, insns, quickOpc, volatileOpc);
            break;

        case OP_SGET:
        case OP_SGET_BOOLEAN:
        case OP_SGET_BYTE:
        case OP_SGET_CHAR:
        case OP_SGET_SHORT:
            if (forSmp)
                volatileOpc = OP_SGET_VOLATILE;
            goto rewrite_static_field;
        case OP_SGET_WIDE:
            volatileOpc = OP_SGET_WIDE_VOLATILE;
            goto rewrite_static_field;
        case OP_SGET_OBJECT:
            if (forSmp)
                volatileOpc = OP_SGET_OBJECT_VOLATILE;
            goto rewrite_static_field;
        case OP_SPUT:
        case OP_SPUT_BOOLEAN:
        case OP_SPUT_BYTE:
        case OP_SPUT_CHAR:
        case OP_SPUT_SHORT:
            if (forSmp)
                volatileOpc = OP_SPUT_VOLATILE;
            goto rewrite_static_field;
        case OP_SPUT_WIDE:
            volatileOpc = OP_SPUT_WIDE_VOLATILE;
            goto rewrite_static_field;
        case OP_SPUT_OBJECT:
            if (forSmp)
                volatileOpc = OP_SPUT_OBJECT_VOLATILE;
            /* fall through */
rewrite_static_field:
            if (volatileOpc != OP_NOP)
                rewriteStaticField(method, insns, volatileOpc);
            break;

        case OP_INVOKE_DIRECT:
        case OP_INVOKE_DIRECT_RANGE:
            if (!rewriteInvokeObjectInit(method, insns)) {
                /* may want to try execute-inline, below */
                matched = false;
            }
            break;
        case OP_RETURN_VOID:
            if (needRetBar)
                rewriteReturnVoid(method, insns);
            break;
        default:
            matched = false;
            break;
        }


        /*
         * non-essential substitutions:
         *  invoke-{virtual,direct,static}[/range] --> execute-inline
         *  invoke-{virtual,super}[/range] --> invoke-*-quick
         */
        if (!matched && !essentialOnly) {
            switch (opc) {
            case OP_INVOKE_VIRTUAL:
                if (!rewriteExecuteInline(method, insns, METHOD_VIRTUAL)) {
                    rewriteVirtualInvoke(method, insns,
                        OP_INVOKE_VIRTUAL_QUICK);
                }
                break;
            case OP_INVOKE_VIRTUAL_RANGE:
                if (!rewriteExecuteInlineRange(method, insns, METHOD_VIRTUAL)) {
                    rewriteVirtualInvoke(method, insns,
                        OP_INVOKE_VIRTUAL_QUICK_RANGE);
                }
                break;
            case OP_INVOKE_SUPER:
                rewriteVirtualInvoke(method, insns, OP_INVOKE_SUPER_QUICK);
                break;
            case OP_INVOKE_SUPER_RANGE:
                rewriteVirtualInvoke(method, insns, OP_INVOKE_SUPER_QUICK_RANGE);
                break;
            case OP_INVOKE_DIRECT:
                rewriteExecuteInline(method, insns, METHOD_DIRECT);
                break;
            case OP_INVOKE_DIRECT_RANGE:
                rewriteExecuteInlineRange(method, insns, METHOD_DIRECT);
                break;
            case OP_INVOKE_STATIC:
                rewriteExecuteInline(method, insns, METHOD_STATIC);
                break;
            case OP_INVOKE_STATIC_RANGE:
                rewriteExecuteInlineRange(method, insns, METHOD_STATIC);
                break;
            default:
                /* nothing to do for this instruction */
                ;
            }
        }

        assert(width > 0);
        assert(width <= insnsSize);
        assert(width == dexGetWidthFromInstruction(insns));

        insns += width;
        insnsSize -= width;
    }

    assert(insnsSize == 0);
}
예제 #13
0
/*
 * We have to carry the exception's stack trace around, but in many cases
 * it will never be examined.  It makes sense to keep it in a compact,
 * VM-specific object, rather than an array of Objects with strings.
 *
 * Pass in the thread whose stack we're interested in.  If "thread" is
 * not self, the thread must be suspended.  This implies that the thread
 * list lock is held, which means we can't allocate objects or we risk
 * jamming the GC.  So, we allow this function to return different formats.
 * (This shouldn't be called directly -- see the inline functions in the
 * header file.)
 *
 * If "wantObject" is true, this returns a newly-allocated Object, which is
 * presently an array of integers, but could become something else in the
 * future.  If "wantObject" is false, return plain malloc data.
 *
 * NOTE: if we support class unloading, we will need to scan the class
 * object references out of these arrays.
 */
void* dvmFillInStackTraceInternal(Thread* thread, bool wantObject, int* pCount)
{
    ArrayObject* stackData = NULL;
    int* simpleData = NULL;
    void* fp;
    void* startFp;
    int stackDepth;
    int* intPtr;

    if (pCount != NULL)
        *pCount = 0;
    fp = thread->curFrame;

    assert(thread == dvmThreadSelf() || dvmIsSuspended(thread));

    /*
     * We're looking at a stack frame for code running below a Throwable
     * constructor.  We want to remove the Throwable methods and the
     * superclass initializations so the user doesn't see them when they
     * read the stack dump.
     *
     * TODO: this just scrapes off the top layers of Throwable.  Might not do
     * the right thing if we create an exception object or cause a VM
     * exception while in a Throwable method.
     */
    while (fp != NULL) {
        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
        const Method* method = saveArea->method;

        if (dvmIsBreakFrame(fp))
            break;
        if (!dvmInstanceof(method->clazz, gDvm.classJavaLangThrowable))
            break;
        //LOGD("EXCEP: ignoring %s.%s\n",
        //         method->clazz->descriptor, method->name);
        fp = saveArea->prevFrame;
    }
    startFp = fp;

    /*
     * Compute the stack depth.
     */
    stackDepth = 0;
    while (fp != NULL) {
        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);

        if (!dvmIsBreakFrame(fp))
            stackDepth++;

        assert(fp != saveArea->prevFrame);
        fp = saveArea->prevFrame;
    }
    //LOGD("EXCEP: stack depth is %d\n", stackDepth);

    if (!stackDepth)
        goto bail;

    /*
     * We need to store a pointer to the Method and the program counter.
     * We have 4-byte pointers, so we use '[I'.
     */
    if (wantObject) {
        assert(sizeof(Method*) == 4);
        stackData = dvmAllocPrimitiveArray('I', stackDepth*2, ALLOC_DEFAULT);
        if (stackData == NULL) {
            assert(dvmCheckException(dvmThreadSelf()));
            goto bail;
        }
        intPtr = (int*) stackData->contents;
    } else {
        /* array of ints; first entry is stack depth */
        assert(sizeof(Method*) == sizeof(int));
        simpleData = (int*) malloc(sizeof(int) * stackDepth*2);
        if (simpleData == NULL)
            goto bail;

        assert(pCount != NULL);
        intPtr = simpleData;
    }
    if (pCount != NULL)
        *pCount = stackDepth;

    fp = startFp;
    while (fp != NULL) {
        const StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
        const Method* method = saveArea->method;

        if (!dvmIsBreakFrame(fp)) {
            //LOGD("EXCEP keeping %s.%s\n", method->clazz->descriptor,
            //         method->name);

            *intPtr++ = (int) method;
            if (dvmIsNativeMethod(method)) {
                *intPtr++ = 0;      /* no saved PC for native methods */
            } else {
                assert(saveArea->xtra.currentPc >= method->insns &&
                        saveArea->xtra.currentPc <
                        method->insns + dvmGetMethodInsnsSize(method));
                *intPtr++ = (int) (saveArea->xtra.currentPc - method->insns);
            }

            stackDepth--;       // for verification
        }

        assert(fp != saveArea->prevFrame);
        fp = saveArea->prevFrame;
    }
    assert(stackDepth == 0);

bail:
    if (wantObject) {
        dvmReleaseTrackedAlloc((Object*) stackData, dvmThreadSelf());
        return stackData;
    } else {
        return simpleData;
    }
}
예제 #14
0
/*
 * Perform static verification on instructions.
 *
 * As a side effect, this sets the "branch target" flags in InsnFlags.
 *
 * "(CF)" items are handled during code-flow analysis.
 *
 * v3 4.10.1
 * - target of each jump and branch instruction must be valid
 * - targets of switch statements must be valid
 * - (CF) operands referencing constant pool entries must be valid
 * - (CF) operands of getfield, putfield, getstatic, putstatic must be valid
 * - (new) verify operands of "quick" field ops
 * - (CF) operands of method invocation instructions must be valid
 * - (new) verify operands of "quick" method invoke ops
 * - (CF) only invoke-direct can call a method starting with '<'
 * - (CF) <clinit> must never be called explicitly
 * - (CF) operands of instanceof, checkcast, new (and variants) must be valid
 * - new-array[-type] limited to 255 dimensions
 * - can't use "new" on an array class
 * - (?) limit dimensions in multi-array creation
 * - (CF) local variable load/store register values must be in valid range
 *
 * v3 4.11.1.2
 * - branches must be within the bounds of the code array
 * - targets of all control-flow instructions are the start of an instruction
 * - (CF) register accesses fall within range of allocated registers
 * - (N/A) access to constant pool must be of appropriate type
 * - (CF) code does not end in the middle of an instruction
 * - (CF) execution cannot fall off the end of the code
 * - (earlier) for each exception handler, the "try" area must begin and
 *   end at the start of an instruction (end can be at the end of the code)
 * - (earlier) for each exception handler, the handler must start at a valid
 *   instruction
 *
 * TODO: move some of the "CF" items in here for better performance (the
 * code-flow analysis sometimes has to process the same instruction several
 * times).
 */
static bool verifyInstructions(const Method* meth, InsnFlags* insnFlags,
    int verifyFlags)
{
    const int insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns;
    int i;

    /* the start of the method is a "branch target" */
    dvmInsnSetBranchTarget(insnFlags, 0, true);

    for (i = 0; i < insnCount; /**/) {
        /*
         * These types of instructions can be GC points.  To support precise
         * GC, all such instructions must export the PC in the interpreter,
         * or the GC won't be able to identify the current PC for the thread.
         */
        static const int gcMask = kInstrCanBranch | kInstrCanSwitch |
            kInstrCanThrow | kInstrCanReturn;

        int width = dvmInsnGetWidth(insnFlags, i);
        OpCode opcode = *insns & 0xff;
        InstructionFlags opFlags = dexGetInstrFlags(gDvm.instrFlags, opcode);
        int offset, absOffset;

        if ((opFlags & gcMask) != 0) {
            /*
             * This instruction is probably a GC point.  Branch instructions
             * only qualify if they go backward, so we need to check the
             * offset.
             */
            int offset = -1;
            bool unused;
            if (dvmGetBranchTarget(meth, insnFlags, i, &offset, &unused)) {
                if (offset < 0) {
                    dvmInsnSetGcPoint(insnFlags, i, true);
                }
            } else {
                /* not a branch target */
                dvmInsnSetGcPoint(insnFlags, i, true);
            }
        }

        switch (opcode) {
        case OP_NOP:
            /* plain no-op or switch table data; nothing to do here */
            break;

        case OP_CONST_STRING:
        case OP_CONST_STRING_JUMBO:
            if (!checkStringIndex(meth, i))
                return false;
            break;

        case OP_CONST_CLASS:
        case OP_CHECK_CAST:
            if (!checkTypeIndex(meth, i, true))
                return false;
            break;
        case OP_INSTANCE_OF:
            if (!checkTypeIndex(meth, i, false))
                return false;
            break;

        case OP_PACKED_SWITCH:
        case OP_SPARSE_SWITCH:
            /* verify the associated table */
            if (!dvmCheckSwitchTargets(meth, insnFlags, i))
                return false;
            break;

        case OP_FILL_ARRAY_DATA:
            /* verify the associated table */
            if (!checkArrayData(meth, i))
                return false;
            break;

        case OP_GOTO:
        case OP_GOTO_16:
        case OP_IF_EQ:
        case OP_IF_NE:
        case OP_IF_LT:
        case OP_IF_GE:
        case OP_IF_GT:
        case OP_IF_LE:
        case OP_IF_EQZ:
        case OP_IF_NEZ:
        case OP_IF_LTZ:
        case OP_IF_GEZ:
        case OP_IF_GTZ:
        case OP_IF_LEZ:
            /* check the destination */
            if (!dvmCheckBranchTarget(meth, insnFlags, i, false))
                return false;
            break;
        case OP_GOTO_32:
            /* check the destination; self-branch is okay */
            if (!dvmCheckBranchTarget(meth, insnFlags, i, true))
                return false;
            break;

        case OP_NEW_INSTANCE:
            if (!checkNewInstance(meth, i))
                return false;
            break;

        case OP_NEW_ARRAY:
            if (!checkNewArray(meth, i))
                return false;
            break;

        case OP_FILLED_NEW_ARRAY:
            if (!checkTypeIndex(meth, i, true))
                return false;
            break;
        case OP_FILLED_NEW_ARRAY_RANGE:
            if (!checkTypeIndex(meth, i, true))
                return false;
            break;

        case OP_IGET:
        case OP_IGET_WIDE:
        case OP_IGET_OBJECT:
        case OP_IGET_BOOLEAN:
        case OP_IGET_BYTE:
        case OP_IGET_CHAR:
        case OP_IGET_SHORT:
        case OP_IPUT:
        case OP_IPUT_WIDE:
        case OP_IPUT_OBJECT:
        case OP_IPUT_BOOLEAN:
        case OP_IPUT_BYTE:
        case OP_IPUT_CHAR:
        case OP_IPUT_SHORT:
            /* check the field index */
            if (!checkFieldIndex(meth, i, false))
                return false;
            break;
        case OP_SGET:
        case OP_SGET_WIDE:
        case OP_SGET_OBJECT:
        case OP_SGET_BOOLEAN:
        case OP_SGET_BYTE:
        case OP_SGET_CHAR:
        case OP_SGET_SHORT:
        case OP_SPUT:
        case OP_SPUT_WIDE:
        case OP_SPUT_OBJECT:
        case OP_SPUT_BOOLEAN:
        case OP_SPUT_BYTE:
        case OP_SPUT_CHAR:
        case OP_SPUT_SHORT:
            /* check the field index */
            if (!checkFieldIndex(meth, i, true))
                return false;
            break;

        case OP_INVOKE_VIRTUAL:
        case OP_INVOKE_SUPER:
        case OP_INVOKE_DIRECT:
        case OP_INVOKE_STATIC:
        case OP_INVOKE_INTERFACE:
        case OP_INVOKE_VIRTUAL_RANGE:
        case OP_INVOKE_SUPER_RANGE:
        case OP_INVOKE_DIRECT_RANGE:
        case OP_INVOKE_STATIC_RANGE:
        case OP_INVOKE_INTERFACE_RANGE:
            /* check the method index */
            if (!checkMethodIndex(meth, i))
                return false;
            break;

        case OP_EXECUTE_INLINE:
        case OP_INVOKE_DIRECT_EMPTY:
        case OP_IGET_QUICK:
        case OP_IGET_WIDE_QUICK:
        case OP_IGET_OBJECT_QUICK:
        case OP_IPUT_QUICK:
        case OP_IPUT_WIDE_QUICK:
        case OP_IPUT_OBJECT_QUICK:
        case OP_INVOKE_VIRTUAL_QUICK:
        case OP_INVOKE_VIRTUAL_QUICK_RANGE:
        case OP_INVOKE_SUPER_QUICK:
        case OP_INVOKE_SUPER_QUICK_RANGE:
            if ((verifyFlags & VERIFY_ALLOW_OPT_INSTRS) == 0) {
                LOG_VFY("VFY: not expecting optimized instructions\n");
                return false;
            }
            break;

        default:
            /* nothing to do */
            break;
        }

        assert(width > 0);
        i += width;
        insns += width;
    }

    /* make sure the last instruction ends at the end of the insn area */
    if (i != insnCount) {
        LOG_VFY_METH(meth,
            "VFY: code did not end when expected (end at %d, count %d)\n",
            i, insnCount);
        return false;
    }

    return true;
}
예제 #15
0
/*
 * Perform verification on a single method.
 *
 * We do this in three passes:
 *  (1) Walk through all code units, determining instruction lengths.
 *  (2) Do static checks, including branch target and operand validation.
 *  (3) Do structural checks, including data-flow analysis.
 *
 * Some checks may be bypassed depending on the verification mode.  We can't
 * turn this stuff off completely if we want to do "exact" GC.
 *
 * - operands of getfield, putfield, getstatic, putstatic must be valid
 * - operands of method invocation instructions must be valid
 *
 * - code array must not be empty
 * - (N/A) code_length must be less than 65536
 * - opcode of first instruction begins at index 0
 * - only documented instructions may appear
 * - each instruction follows the last
 * - (below) last byte of last instruction is at (code_length-1)
 */
static bool verifyMethod(Method* meth, int verifyFlags)
{
    bool result = false;
    UninitInstanceMap* uninitMap = NULL;
    InsnFlags* insnFlags = NULL;
    int i, newInstanceCount;

    /*
     * If there aren't any instructions, make sure that's expected, then
     * exit successfully. Note: meth->insns gets set to a native function
     * pointer on first call.
     */
    if (dvmGetMethodInsnsSize(meth) == 0) {
        if (!dvmIsNativeMethod(meth) && !dvmIsAbstractMethod(meth)) {
            LOG_VFY_METH(meth,
                "VFY: zero-length code in concrete non-native method\n");
            goto bail;
        }

        goto success;
    }

    /*
     * Sanity-check the register counts.  ins + locals = registers, so make
     * sure that ins <= registers.
     */
    if (meth->insSize > meth->registersSize) {
        LOG_VFY_METH(meth, "VFY: bad register counts (ins=%d regs=%d)\n",
            meth->insSize, meth->registersSize);
        goto bail;
    }

    /*
     * Allocate and populate an array to hold instruction data.
     *
     * TODO: Consider keeping a reusable pre-allocated array sitting
     * around for smaller methods.
     */
    insnFlags = (InsnFlags*)
        calloc(dvmGetMethodInsnsSize(meth), sizeof(InsnFlags));
    if (insnFlags == NULL)
        goto bail;

    /*
     * Compute the width of each instruction and store the result in insnFlags.
     * Count up the #of occurrences of new-instance instructions while we're
     * at it.
     */
    if (!dvmComputeCodeWidths(meth, insnFlags, &newInstanceCount))
        goto bail;

    /*
     * Allocate a map to hold the classes of uninitialized instances.
     */
    uninitMap = dvmCreateUninitInstanceMap(meth, insnFlags, newInstanceCount);
    if (uninitMap == NULL)
        goto bail;

    /*
     * Set the "in try" flags for all instructions guarded by a "try" block.
     */
    if (!dvmSetTryFlags(meth, insnFlags))
        goto bail;

    /*
     * Perform static instruction verification.
     */
    if (!verifyInstructions(meth, insnFlags, verifyFlags))
        goto bail;

    /*
     * Do code-flow analysis.  Do this after verifying the branch targets
     * so we don't need to worry about it here.
     *
     * If there are no registers, we don't need to do much in the way of
     * analysis, but we still need to verify that nothing actually tries
     * to use a register.
     */
    if (!dvmVerifyCodeFlow(meth, insnFlags, uninitMap)) {
        //LOGD("+++ %s failed code flow\n", meth->name);
        goto bail;
    }

success:
    result = true;

bail:
    dvmFreeUninitInstanceMap(uninitMap);
    free(insnFlags);
    return result;
}
예제 #16
0
/*
 * Compute the width of the instruction at each address in the instruction
 * stream.  Addresses that are in the middle of an instruction, or that
 * are part of switch table data, are not set (so the caller should probably
 * initialize "insnFlags" to zero).
 *
 * If "pNewInstanceCount" is not NULL, it will be set to the number of
 * new-instance instructions in the method.
 *
 * Logs an error and returns "false" on failure.
 */
bool dvmComputeCodeWidths(const Method* meth, InsnFlags* insnFlags,
    int* pNewInstanceCount)
{
    const int insnCount = dvmGetMethodInsnsSize(meth);
    const u2* insns = meth->insns;
    bool result = false;
    int newInstanceCount = 0;
    int i;


    for (i = 0; i < insnCount; /**/) {
        int width;

        /*
         * Switch tables and array data tables are identified with
         * "extended NOP" opcodes.  They contain no executable code,
         * so we can just skip past them.
         */
        if (*insns == kPackedSwitchSignature) {
            width = 4 + insns[1] * 2;
        } else if (*insns == kSparseSwitchSignature) {
            width = 2 + insns[1] * 4;
        } else if (*insns == kArrayDataSignature) {
            u4 size = insns[2] | (((u4)insns[3]) << 16);
            width = 4 + (insns[1] * size + 1) / 2;
        } else {
            int instr = *insns & 0xff;
            width = dexGetInstrWidthAbs(gDvm.instrWidth, instr);
            if (width == 0) {
                LOG_VFY_METH(meth,
                    "VFY: invalid post-opt instruction (0x%x)\n", instr);
                LOGI("### instr=%d width=%d table=%d\n",
                    instr, width, dexGetInstrWidthAbs(gDvm.instrWidth, instr));
                goto bail;
            }
            if (width < 0 || width > 5) {
                LOGE("VFY: bizarre width value %d\n", width);
                dvmAbort();
            }

            if (instr == OP_NEW_INSTANCE)
                newInstanceCount++;
        }

        if (width > 65535) {
            LOG_VFY_METH(meth, "VFY: insane width %d\n", width);
            goto bail;
        }

        insnFlags[i] |= width;
        i += width;
        insns += width;
    }
    if (i != (int) dvmGetMethodInsnsSize(meth)) {
        LOG_VFY_METH(meth, "VFY: code did not end where expected (%d vs. %d)\n",
            i, dvmGetMethodInsnsSize(meth));
        goto bail;
    }

    result = true;
    if (pNewInstanceCount != NULL)
        *pNewInstanceCount = newInstanceCount;

bail:
    return result;
}