Esempio n. 1
0
void JIT::emit_op_urshift(Instruction* currentInstruction)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;

    // Slow case of urshift makes assumptions about what registers hold the
    // shift arguments, so any changes must be updated there as well.
    if (isOperandConstantImmediateInt(op2)) {
        emitGetVirtualRegister(op1, regT0);
        emitJumpSlowCaseIfNotImmediateInteger(regT0);
        emitFastArithImmToInt(regT0);
        int shift = getConstantOperand(op2).asInt32();
        if (shift)
            urshift32(Imm32(shift & 0x1f), regT0);
        // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
        // a toUint conversion, which can result in a value we can represent
        // as an immediate int.
        if (shift < 0 || !(shift & 31))
            addSlowCase(branch32(LessThan, regT0, Imm32(0)));
        emitFastArithReTagImmediate(regT0, regT0);
        emitPutVirtualRegister(dst, regT0);
        return;
    }
    emitGetVirtualRegisters(op1, regT0, op2, regT1);
    if (!isOperandConstantImmediateInt(op1))
        emitJumpSlowCaseIfNotImmediateInteger(regT0);
    emitJumpSlowCaseIfNotImmediateInteger(regT1);
    emitFastArithImmToInt(regT0);
    emitFastArithImmToInt(regT1);
    urshift32(regT1, regT0);
    addSlowCase(branch32(LessThan, regT0, Imm32(0)));
    emitFastArithReTagImmediate(regT0, regT0);
    emitPutVirtualRegister(dst, regT0);
}
Esempio n. 2
0
void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;
    if (isOperandConstantImmediateInt(op2)) {
        int shift = getConstantOperand(op2).asInt32();
        // op1 = regT0
        linkSlowCase(iter); // int32 check
        if (supportsFloatingPointTruncate()) {
            JumpList failures;
            failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
            addPtr(tagTypeNumberRegister, regT0);
            movePtrToDouble(regT0, fpRegT0);
            failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
            if (shift)
                urshift32(Imm32(shift & 0x1f), regT0);
            if (shift < 0 || !(shift & 31))
                failures.append(branch32(LessThan, regT0, Imm32(0)));
            emitFastArithReTagImmediate(regT0, regT0);
            emitPutVirtualRegister(dst, regT0);
            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
            failures.link(this);
        }
        if (shift < 0 || !(shift & 31))
            linkSlowCase(iter); // failed to box in hot path
    } else {
        // op1 = regT0
        // op2 = regT1
        if (!isOperandConstantImmediateInt(op1)) {
            linkSlowCase(iter); // int32 check -- op1 is not an int
            if (supportsFloatingPointTruncate()) {
                JumpList failures;
                failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
                addPtr(tagTypeNumberRegister, regT0);
                movePtrToDouble(regT0, fpRegT0);
                failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
                failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
                emitFastArithImmToInt(regT1);
                urshift32(regT1, regT0);
                failures.append(branch32(LessThan, regT0, Imm32(0)));
                emitFastArithReTagImmediate(regT0, regT0);
                emitPutVirtualRegister(dst, regT0);
                emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
                failures.link(this);
            }
        }
        
        linkSlowCase(iter); // int32 check - op2 is not an int
        linkSlowCase(iter); // Can't represent unsigned result as an immediate
    }
    
    JITStubCall stubCall(this, cti_op_urshift);
    stubCall.addArgument(op1, regT0);
    stubCall.addArgument(op2, regT1);
    stubCall.call(dst);
}
Esempio n. 3
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    killLastResultRegister();

    JumpList slowCase;
    JumpList end;
    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT1);
        lshift32(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1));

        // Initialize ArgumentCount.
        emitFastArithReTagImmediate(regT0, regT2);
        storePtr(regT2, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        neg32(regT0);
        signExtend32ToPtr(regT0, regT0);
        end.append(branchAddPtr(Zero, Imm32(1), regT0));
        // regT0: -argumentCount

        Label copyLoop = label();
        loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue, regT0);
    stubCall.addArgument(arguments, regT0);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT1);

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        end.link(this);
}
Esempio n. 4
0
void JIT::emit_op_mul(Instruction* currentInstruction)
{
    unsigned result = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;
    OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);

    // For now, only plant a fast int case if the constant operand is greater than zero.
    int32_t value;
    if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
        emitGetVirtualRegister(op2, regT0);
        emitJumpSlowCaseIfNotImmediateInteger(regT0);
        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
        emitFastArithReTagImmediate(regT0, regT0);
    } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
        emitGetVirtualRegister(op1, regT0);
        emitJumpSlowCaseIfNotImmediateInteger(regT0);
        addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
        emitFastArithReTagImmediate(regT0, regT0);
    } else
        compileBinaryArithOp(op_mul, result, op1, op2, types);

    emitPutVirtualRegister(result);
}
Esempio n. 5
0
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
    unsigned result = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;

    emitGetVirtualRegisters(op1, regT0, op2, regT2);
    // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
    emitJumpSlowCaseIfNotImmediateInteger(regT0);
    emitJumpSlowCaseIfNotImmediateInteger(regT2);
    emitFastArithImmToInt(regT0);
    emitFastArithImmToInt(regT2);
    lshift32(regT2, regT0);
    emitFastArithReTagImmediate(regT0, regT0);
    emitPutVirtualRegister(result);
}
Esempio n. 6
0
void JIT::emit_op_mod(Instruction* currentInstruction)
{
    unsigned result = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;

#if CPU(X86) || CPU(X86_64)
    // Make sure registers are correct for x86 IDIV instructions.
    ASSERT(regT0 == X86Registers::eax);
    ASSERT(regT1 == X86Registers::edx);
    ASSERT(regT2 == X86Registers::ecx);
#endif

    emitGetVirtualRegisters(op1, regT0, op2, regT2);
    emitJumpSlowCaseIfNotImmediateInteger(regT0);
    emitJumpSlowCaseIfNotImmediateInteger(regT2);

    addSlowCase(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
    m_assembler.cdq();
    m_assembler.idivl_r(regT2);
    emitFastArithReTagImmediate(regT1, regT0);
    emitPutVirtualRegister(result);
}