Esempio n. 1
0
static void genMulLong(CompilationUnit *cUnit, RegLocation rlDest,
                       RegLocation rlSrc1, RegLocation rlSrc2)
{
    RegLocation rlResult;
    loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
    loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
    genDispatchToHandler(cUnit, TEMPLATE_MUL_LONG);
    rlResult = dvmCompilerGetReturnWide(cUnit);
    storeValueWide(cUnit, rlDest, rlResult);
}
Esempio n. 2
0
static bool genInlineSqrt(CompilationUnit *cUnit, MIR *mir)
{
    RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
    RegLocation rlResult = LOC_C_RETURN_WIDE;
    RegLocation rlDest = LOC_DALVIK_RETURN_VAL_WIDE;
    loadValueAddressDirect(cUnit, rlSrc, r2);
    genDispatchToHandler(cUnit, TEMPLATE_SQRT_DOUBLE_VFP);
    storeValueWide(cUnit, rlDest, rlResult);
    return false;
}
Esempio n. 3
0
/*
 * For monitor unlock, we don't have to use ldrex/strex.  Once
 * we've determined that the lock is thin and that we own it with
 * a zero recursion count, it's safe to punch it back to the
 * initial, unlock thin state with a store word.
 */
static void genMonitorExit(CompilationUnit *cUnit, MIR *mir)
{
    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
    ArmLIR *target;
    ArmLIR *branch;
    ArmLIR *hopTarget;
    ArmLIR *hopBranch;

    assert(LW_SHAPE_THIN == 0);
    loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
    dvmCompilerLockAllTemps(cUnit);  // Prepare for explicit register usage
    dvmCompilerFreeTemp(cUnit, r4PC);  // Free up r4 for general use
    genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
    loadWordDisp(cUnit, r1, offsetof(Object, lock), r2); // Get object->lock
    loadWordDisp(cUnit, r6SELF, offsetof(Thread, threadId), r3); // Get threadId
    // Is lock unheld on lock or held by us (==threadId) on unlock?
    opRegRegImm(cUnit, kOpAnd, r7, r2,
                (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
    opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT); // Align owner
    newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT,
            LW_LOCK_OWNER_SHIFT - 1);
    opRegReg(cUnit, kOpSub, r2, r3);
    hopBranch = opCondBranch(cUnit, kArmCondNe);
    dvmCompilerGenMemBarrier(cUnit, kSY);
    storeWordDisp(cUnit, r1, offsetof(Object, lock), r7);
    branch = opNone(cUnit, kOpUncondBr);

    hopTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
    hopTarget->defMask = ENCODE_ALL;
    hopBranch->generic.target = (LIR *)hopTarget;

    // Export PC (part 1)
    loadConstant(cUnit, r3, (int) (cUnit->method->insns + mir->offset));

    LOAD_FUNC_ADDR(cUnit, r7, (int)dvmUnlockObject);
    genRegCopy(cUnit, r0, r6SELF);
    // Export PC (part 2)
    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, r5FP,
            sizeof(StackSaveArea) -
            offsetof(StackSaveArea, xtra.currentPc));
    opReg(cUnit, kOpBlx, r7);
    /* Did we throw? */
    ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
    loadConstant(cUnit, r0,
                 (int) (cUnit->method->insns + mir->offset +
                 dexGetWidthFromOpcode(OP_MONITOR_EXIT)));
    genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);

    // Resume here
    target = newLIR0(cUnit, kArmPseudoTargetLabel);
    target->defMask = ENCODE_ALL;
    branch->generic.target = (LIR *)target;
    branchOver->generic.target = (LIR *) target;
}
Esempio n. 4
0
/*
 * Handle simple case (thin lock) inline.  If it's complicated, bail
 * out to the heavyweight lock/unlock routines.  We'll use dedicated
 * registers here in order to be in the right position in case we
 * to bail to dvm[Lock/Unlock]Object(self, object)
 *
 * r0 -> self pointer [arg0 for dvm[Lock/Unlock]Object
 * r1 -> object [arg1 for dvm[Lock/Unlock]Object
 * r2 -> intial contents of object->lock, later result of strex
 * r3 -> self->threadId
 * r7 -> temp to hold new lock value [unlock only]
 * r4 -> allow to be used by utilities as general temp
 *
 * The result of the strex is 0 if we acquire the lock.
 *
 * See comments in Sync.c for the layout of the lock word.
 * Of particular interest to this code is the test for the
 * simple case - which we handle inline.  For monitor enter, the
 * simple case is thin lock, held by no-one.  For monitor exit,
 * the simple case is thin lock, held by the unlocking thread with
 * a recurse count of 0.
 *
 * A minor complication is that there is a field in the lock word
 * unrelated to locking: the hash state.  This field must be ignored, but
 * preserved.
 *
 */
static void genMonitorEnter(CompilationUnit *cUnit, MIR *mir)
{
    RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
    bool enter = (mir->dalvikInsn.opCode == OP_MONITOR_ENTER);
    ArmLIR *target;
    ArmLIR *hopTarget;
    ArmLIR *branch;
    ArmLIR *hopBranch;

    assert(LW_SHAPE_THIN == 0);
    loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
    dvmCompilerLockAllTemps(cUnit);  // Prepare for explicit register usage
    dvmCompilerFreeTemp(cUnit, r4PC);  // Free up r4 for general use
    loadWordDisp(cUnit, rGLUE, offsetof(InterpState, self), r0); // Get self
    genNullCheck(cUnit, rlSrc.sRegLow, r1, mir->offset, NULL);
    loadWordDisp(cUnit, r0, offsetof(Thread, threadId), r3); // Get threadId
    newLIR3(cUnit, kThumb2Ldrex, r2, r1,
            offsetof(Object, lock) >> 2); // Get object->lock
    opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT); // Align owner
    // Is lock unheld on lock or held by us (==threadId) on unlock?
    newLIR4(cUnit, kThumb2Bfi, r3, r2, 0, LW_LOCK_OWNER_SHIFT - 1);
    newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT,
            LW_LOCK_OWNER_SHIFT - 1);
    hopBranch = newLIR2(cUnit, kThumb2Cbnz, r2, 0);
    newLIR4(cUnit, kThumb2Strex, r2, r3, r1, offsetof(Object, lock) >> 2);
    branch = newLIR2(cUnit, kThumb2Cbz, r2, 0);

    hopTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
    hopTarget->defMask = ENCODE_ALL;
    hopBranch->generic.target = (LIR *)hopTarget;

    // Clear the lock
    ArmLIR *inst = newLIR0(cUnit, kThumb2Clrex);
    // ...and make it a scheduling barrier
    inst->defMask = ENCODE_ALL;

    // Export PC (part 1)
    loadConstant(cUnit, r3, (int) (cUnit->method->insns + mir->offset));

    /* Get dPC of next insn */
    loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
                                    dexGetInstrWidthAbs(gDvm.instrWidth, OP_MONITOR_ENTER)));
    // Export PC (part 2)
    newLIR3(cUnit, kThumb2StrRRI8Predec, r3, rFP,
            sizeof(StackSaveArea) -
            offsetof(StackSaveArea, xtra.currentPc));
    /* Call template, and don't return */
    genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
    // Resume here
    target = newLIR0(cUnit, kArmPseudoTargetLabel);
    target->defMask = ENCODE_ALL;
    branch->generic.target = (LIR *)target;
}
Esempio n. 5
0
/*
 * TUNING: On some implementations, it is quicker to pass addresses
 * to the handlers rather than load the operands into core registers
 * and then move the values to FP regs in the handlers.  Other implementations
 * may prefer passing data in registers (and the latter approach would
 * yeild cleaner register handling - avoiding the requirement that operands
 * be flushed to memory prior to the call).
 */
static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir,
                            RegLocation rlDest, RegLocation rlSrc1,
                            RegLocation rlSrc2)
{
    TemplateOpCode opCode;

    /*
     * Don't attempt to optimize register usage since these opcodes call out to
     * the handlers.
     */
    switch (mir->dalvikInsn.opCode) {
        case OP_ADD_FLOAT_2ADDR:
        case OP_ADD_FLOAT:
            opCode = TEMPLATE_ADD_FLOAT_VFP;
            break;
        case OP_SUB_FLOAT_2ADDR:
        case OP_SUB_FLOAT:
            opCode = TEMPLATE_SUB_FLOAT_VFP;
            break;
        case OP_DIV_FLOAT_2ADDR:
        case OP_DIV_FLOAT:
            opCode = TEMPLATE_DIV_FLOAT_VFP;
            break;
        case OP_MUL_FLOAT_2ADDR:
        case OP_MUL_FLOAT:
            opCode = TEMPLATE_MUL_FLOAT_VFP;
            break;
        case OP_REM_FLOAT_2ADDR:
        case OP_REM_FLOAT:
        case OP_NEG_FLOAT: {
            return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
        }
        default:
            return true;
    }
    loadValueAddressDirect(cUnit, rlDest, r0);
    loadValueAddressDirect(cUnit, rlSrc1, r1);
    loadValueAddressDirect(cUnit, rlSrc2, r2);
    genDispatchToHandler(cUnit, opCode);
    rlDest = dvmCompilerUpdateLoc(cUnit, rlDest);
    if (rlDest.location == kLocPhysReg) {
        dvmCompilerClobber(cUnit, rlDest.lowReg);
    }
    return false;
}
Esempio n. 6
0
static bool genArithOpDouble(CompilationUnit *cUnit, MIR *mir,
                             RegLocation rlDest, RegLocation rlSrc1,
                             RegLocation rlSrc2)
{
    TemplateOpCode opCode;

    switch (mir->dalvikInsn.opCode) {
        case OP_ADD_DOUBLE_2ADDR:
        case OP_ADD_DOUBLE:
            opCode = TEMPLATE_ADD_DOUBLE_VFP;
            break;
        case OP_SUB_DOUBLE_2ADDR:
        case OP_SUB_DOUBLE:
            opCode = TEMPLATE_SUB_DOUBLE_VFP;
            break;
        case OP_DIV_DOUBLE_2ADDR:
        case OP_DIV_DOUBLE:
            opCode = TEMPLATE_DIV_DOUBLE_VFP;
            break;
        case OP_MUL_DOUBLE_2ADDR:
        case OP_MUL_DOUBLE:
            opCode = TEMPLATE_MUL_DOUBLE_VFP;
            break;
        case OP_REM_DOUBLE_2ADDR:
        case OP_REM_DOUBLE:
        case OP_NEG_DOUBLE: {
            return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1,
                                               rlSrc2);
        }
        default:
            return true;
    }
    loadValueAddressDirect(cUnit, rlDest, r0);
    loadValueAddressDirect(cUnit, rlSrc1, r1);
    loadValueAddressDirect(cUnit, rlSrc2, r2);
    genDispatchToHandler(cUnit, opCode);
    rlDest = dvmCompilerUpdateLocWide(cUnit, rlDest);
    if (rlDest.location == kLocPhysReg) {
        dvmCompilerClobber(cUnit, rlDest.lowReg);
        dvmCompilerClobber(cUnit, rlDest.highReg);
    }
    return false;
}