Example #1
0
void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
{
    unsigned result = currentInstruction[1].u.operand;
    unsigned thisReg = currentInstruction[2].u.operand;

    // We could JIT generate the deref, only calling out to C when the refcount hits zero.
    if (m_codeBlock->needsFullScopeChain())
        JITStubCall(this, cti_op_ret_scopeChain).call();

    emitLoad(result, regT1, regT0);
    Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
    Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));

    emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
    emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);

    restoreReturnAddressBeforeReturn(regT2);
    ret();

    notJSCell.link(this);
    notObject.link(this);
    emitLoad(thisReg, regT1, regT0);

    emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
    emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);

    restoreReturnAddressBeforeReturn(regT2);
    ret();
}
Example #2
0
void Data::performAssertions(VM& vm)
{
    UNUSED_PARAM(vm);

    // Assertions to match LowLevelInterpreter.asm.  If you change any of this code, be
    // prepared to change LowLevelInterpreter.asm as well!!
    ASSERT(JSStack::CallFrameHeaderSize * 8 == 48);
    ASSERT(JSStack::ArgumentCount * 8 == -48);
    ASSERT(JSStack::CallerFrame * 8 == -40);
    ASSERT(JSStack::Callee * 8 == -32);
    ASSERT(JSStack::ScopeChain * 8 == -24);
    ASSERT(JSStack::ReturnPC * 8 == -16);
    ASSERT(JSStack::CodeBlock * 8 == -8);
    ASSERT(CallFrame::argumentOffsetIncludingThis(0) == -JSStack::CallFrameHeaderSize - 1);
#if CPU(BIG_ENDIAN)
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
#else
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
#if USE(JSVALUE32_64)
    ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
    ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
    ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
    ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
    ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
    ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
    ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
    ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
#else
    ASSERT(TagBitTypeOther == 0x2);
    ASSERT(TagBitBool == 0x4);
    ASSERT(TagBitUndefined == 0x8);
    ASSERT(ValueEmpty == 0x0);
    ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
    ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
    ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
    ASSERT(ValueNull == TagBitTypeOther);
#endif
    ASSERT(StringType == 5);
    ASSERT(ObjectType == 17);
    ASSERT(MasqueradesAsUndefined == 1);
    ASSERT(ImplementsHasInstance == 2);
    ASSERT(ImplementsDefaultHasInstance == 8);
    ASSERT(FirstConstantRegisterIndex == 0x40000000);
    ASSERT(GlobalCode == 0);
    ASSERT(EvalCode == 1);
    ASSERT(FunctionCode == 2);

    // FIXME: make these assertions less horrible.
#if !ASSERT_DISABLED
    Vector<int> testVector;
    testVector.resize(42);
    ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
    ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif

    ASSERT(StringImpl::s_hashFlag8BitBuffer == 64);
}
Example #3
0
void JIT::compileOpCallInitializeCallFrame()
{
    // regT0 holds callee, regT1 holds argCount
    store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // scopeChain
    storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
    storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
}
Example #4
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[1].u.operand;
    int argCount = instruction[2].u.operand;
    int registerOffset = instruction[3].u.operand;

    Jump wasEval;
    if (opcodeID == op_call_eval) {
        JITStubCall stubCall(this, cti_op_call_eval);
        stubCall.addArgument(callee);
        stubCall.addArgument(JIT::Imm32(registerOffset));
        stubCall.addArgument(JIT::Imm32(argCount));
        stubCall.call();
        wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
    }

    emitLoad(callee, regT1, regT0);

    DataLabelPtr addressOfLinkedFunctionCheck;

    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);

    Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));

    END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);

    addSlowCase(jumpToSlow);
    ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;

    addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));

    // The following is the fast case, only used whan a callee can be linked.

    // Fast version of stack frame initialization, directly relative to edi.
    // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT2);

    store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
    storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
    emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
    storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
    addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);

    // Call to the callee
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
    
    if (opcodeID == op_call_eval)
        wasEval.link(this);

    sampleCodeBlock(m_codeBlock);
}
Example #5
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[1].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Caller initializes ScopeChain.
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ScopeChain; ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */
    
    if (opcodeID == op_call_varargs)
        compileLoadVarargs(instruction);
    else {
        int argCount = instruction[2].u.operand;
        int registerOffset = instruction[3].u.operand;

        addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);

        store32(TrustedImm32(argCount), payloadFor(RegisterFile::ArgumentCount, regT3));
    } // regT3 holds newCallFrame with ArgumentCount initialized.
    emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.

    storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
    emitStore(RegisterFile::Callee, regT1, regT0, regT3);
    move(regT3, callFrameRegister);

    if (opcodeID == op_call_eval) {
        compileCallEval();
        return;
    }

    DataLabelPtr addressOfLinkedFunctionCheck;
    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
    END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);

    addSlowCase(slowCase);
    addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));

    ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, slowCase), patchOffsetOpCallCompareToJump);
    ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
    m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
    emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    sampleCodeBlock(m_codeBlock);
}
Example #6
0
void JIT::compileOpCallInitializeCallFrame()
{
    // regT0 holds callee, regT1 holds argCount
    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // scopeChain
    emitPutIntToCallFrameHeader(regT1, RegisterFile::ArgumentCount);
    emitPutCellToCallFrameHeader(regT0, RegisterFile::Callee);
    emitPutCellToCallFrameHeader(regT3, RegisterFile::ScopeChain);
}
Example #7
0
void InferredType::InferredStructureWatchpoint::fireInternal(const FireDetail&)
{
    InferredStructure* inferredStructure =
        bitwise_cast<InferredStructure*>(
            bitwise_cast<char*>(this) - OBJECT_OFFSETOF(InferredStructure, m_watchpoint));

    inferredStructure->m_parent->removeStructure();
}
void JSFloat64Array::finishCreation(VM& vm)
{
    Base::finishCreation(vm);
    TypedArrayDescriptor descriptor(&JSFloat64Array::s_info, OBJECT_OFFSETOF(JSFloat64Array, m_storage), OBJECT_OFFSETOF(JSFloat64Array, m_storageLength));
    vm.registerTypedArrayDescriptor(impl(), descriptor);
    m_storage = impl()->data();
    m_storageLength = impl()->length();
    ASSERT(inherits(&s_info));
}
std::unique_ptr<ConcurrentPtrHashSet::Table> ConcurrentPtrHashSet::Table::create(unsigned size)
{
    std::unique_ptr<ConcurrentPtrHashSet::Table> result(new (fastMalloc(OBJECT_OFFSETOF(Table, array) + sizeof(Atomic<void*>) * size)) Table());
    result->size = size;
    result->mask = size - 1;
    result->load.storeRelaxed(0);
    for (unsigned i = 0; i < size; ++i)
        result->array[i].storeRelaxed(nullptr);
    return result;
}
Example #10
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[1].u.operand;
    int argCount = instruction[2].u.operand;
    int registerOffset = instruction[3].u.operand;

    // Handle eval
    Jump wasEval;
    if (opcodeID == op_call_eval) {
        JITStubCall stubCall(this, cti_op_call_eval);
        stubCall.addArgument(callee, regT0);
        stubCall.addArgument(JIT::Imm32(registerOffset));
        stubCall.addArgument(JIT::Imm32(argCount));
        stubCall.call();
        wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
    }

    // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
    // This deliberately leaves the callee in ecx, used when setting up the stack frame below
    emitGetVirtualRegister(callee, regT0);
    DataLabelPtr addressOfLinkedFunctionCheck;

    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);

    Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));

    END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);

    addSlowCase(jumpToSlow);
    ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callStructureStubCompilationInfo[callLinkInfoIndex].isCall = opcodeID != op_construct;

    // The following is the fast case, only used whan a callee can be linked.

    // Fast version of stack frame initialization, directly relative to edi.
    // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1); // newScopeChain
    
    store32(TrustedImm32(Int32Tag), intTagFor(registerOffset + RegisterFile::ArgumentCount));
    store32(Imm32(argCount), intPayloadFor(registerOffset + RegisterFile::ArgumentCount));
    storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
    storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
    storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
    addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);

    // Call to the callee
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
    
    if (opcodeID == op_call_eval)
        wasEval.link(this);

    sampleCodeBlock(m_codeBlock);
}
void AdaptiveInferredPropertyValueWatchpoint::PropertyWatchpoint::fireInternal(
    const FireDetail& detail)
{
    ptrdiff_t myOffset = OBJECT_OFFSETOF(
        AdaptiveInferredPropertyValueWatchpoint, m_propertyWatchpoint);
    
    AdaptiveInferredPropertyValueWatchpoint* parent =
        bitwise_cast<AdaptiveInferredPropertyValueWatchpoint*>(
            bitwise_cast<char*>(this) - myOffset);
    
    parent->fire(detail);
}
Example #12
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    JumpList slowCase;
    JumpList end;
    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(RegisterFile::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(RegisterFile::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        neg32(regT2);
        end.append(branchAdd32(Zero, TrustedImm32(1), regT2));
        // regT2: -argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue);
    stubCall.addArgument(arguments);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT3);

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        end.link(this);
}
Example #13
0
void InferredType::InferredStructureFinalizer::finalizeUnconditionally()
{
    InferredStructure* inferredStructure =
        bitwise_cast<InferredStructure*>(
            bitwise_cast<char*>(this) - OBJECT_OFFSETOF(InferredStructure, m_finalizer));
    
    ASSERT(Heap::isMarked(inferredStructure->m_parent));
    
    // Monotonicity ensures that we shouldn't see a new structure that is different from us, but we
    // could have been nulled. We only rely on it being the null case only in debug.
    if (inferredStructure == inferredStructure->m_parent->m_structure.get()) {
        if (!Heap::isMarked(inferredStructure->m_structure.get()))
            inferredStructure->m_parent->removeStructure();
    } else
        ASSERT(!inferredStructure->m_parent->m_structure);
    
    inferredStructure->deref();
}
Example #14
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(JSStack::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        neg32(regT3);
        add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        end.append(branchSub32(Zero, TrustedImm32(1), regT2));
        // regT2: argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitLoad(thisValue, regT1, regT0);
    emitLoad(arguments, regT3, regT2);
    callOperation(operationLoadVarargs, regT1, regT0, regT3, regT2, firstFreeRegister);
    move(returnValueRegister, regT3);

    if (canOptimize)
        end.link(this);
}
Example #15
0
void Data::performAssertions(VM& vm)
{
    UNUSED_PARAM(vm);
    
    // Assertions to match LowLevelInterpreter.asm.  If you change any of this code, be
    // prepared to change LowLevelInterpreter.asm as well!!

#if USE(JSVALUE64)
    const ptrdiff_t PtrSize = 8;
    const ptrdiff_t CallFrameHeaderSlots = 5;
#else // USE(JSVALUE64) // i.e. 32-bit version
    const ptrdiff_t PtrSize = 4;
    const ptrdiff_t CallFrameHeaderSlots = 4;
#endif
    const ptrdiff_t SlotSize = 8;

    STATIC_ASSERT(sizeof(void*) == PtrSize);
    STATIC_ASSERT(sizeof(Register) == SlotSize);
    STATIC_ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots);

    ASSERT(!CallFrame::callerFrameOffset());
    STATIC_ASSERT(JSStack::CallerFrameAndPCSize == (PtrSize * 2) / SlotSize);
    ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize);
    ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
    STATIC_ASSERT(JSStack::Callee * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize);
    STATIC_ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize);
    STATIC_ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize);
    STATIC_ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument);

    ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument);

#if CPU(BIG_ENDIAN)
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
#else
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
#if USE(JSVALUE32_64)
    STATIC_ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
    STATIC_ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
    STATIC_ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
    STATIC_ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
    STATIC_ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
    STATIC_ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
    STATIC_ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
    STATIC_ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
#else
    STATIC_ASSERT(TagBitTypeOther == 0x2);
    STATIC_ASSERT(TagBitBool == 0x4);
    STATIC_ASSERT(TagBitUndefined == 0x8);
    STATIC_ASSERT(ValueEmpty == 0x0);
    STATIC_ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
    STATIC_ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
    STATIC_ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
    STATIC_ASSERT(ValueNull == TagBitTypeOther);
#endif
#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT)
    STATIC_ASSERT(!maxFrameExtentForSlowPathCall);
#elif CPU(ARM) || CPU(SH4)
    STATIC_ASSERT(maxFrameExtentForSlowPathCall == 24);
#elif CPU(X86) || CPU(MIPS)
    STATIC_ASSERT(maxFrameExtentForSlowPathCall == 40);
#elif CPU(X86_64) && OS(WINDOWS)
    STATIC_ASSERT(maxFrameExtentForSlowPathCall == 64);
#endif

#if !ENABLE(JIT) || USE(JSVALUE32_64)
    ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
#elif (CPU(X86_64) && !OS(WINDOWS))  || CPU(ARM64)
    ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
#elif (CPU(X86_64) && OS(WINDOWS))
    ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
#endif
    
    STATIC_ASSERT(StringType == 6);
    STATIC_ASSERT(SymbolType == 7);
    STATIC_ASSERT(ObjectType == 20);
    STATIC_ASSERT(FinalObjectType == 21);
    STATIC_ASSERT(JSFunctionType == 23);
    STATIC_ASSERT(MasqueradesAsUndefined == 1);
    STATIC_ASSERT(ImplementsDefaultHasInstance == 2);
    STATIC_ASSERT(FirstConstantRegisterIndex == 0x40000000);
    STATIC_ASSERT(GlobalCode == 0);
    STATIC_ASSERT(EvalCode == 1);
    STATIC_ASSERT(FunctionCode == 2);
    STATIC_ASSERT(ModuleCode == 3);

    ASSERT(!(reinterpret_cast<ptrdiff_t>((reinterpret_cast<WriteBarrier<JSCell>*>(0x4000)->slot())) - 0x4000));
    static_assert(PutByIdPrimaryTypeMask == 0x6, "LLInt assumes PutByIdPrimaryTypeMask is == 0x6");
    static_assert(PutByIdPrimaryTypeSecondary == 0x0, "LLInt assumes PutByIdPrimaryTypeSecondary is == 0x0");
    static_assert(PutByIdPrimaryTypeObjectWithStructure == 0x2, "LLInt assumes PutByIdPrimaryTypeObjectWithStructure is == 0x2");
    static_assert(PutByIdPrimaryTypeObjectWithStructureOrOther == 0x4, "LLInt assumes PutByIdPrimaryTypeObjectWithStructureOrOther is == 0x4");
    static_assert(PutByIdSecondaryTypeMask == -0x8, "LLInt assumes PutByIdSecondaryTypeMask is == -0x8");
    static_assert(PutByIdSecondaryTypeBottom == 0x0, "LLInt assumes PutByIdSecondaryTypeBottom is == 0x0");
    static_assert(PutByIdSecondaryTypeBoolean == 0x8, "LLInt assumes PutByIdSecondaryTypeBoolean is == 0x8");
    static_assert(PutByIdSecondaryTypeOther == 0x10, "LLInt assumes PutByIdSecondaryTypeOther is == 0x10");
    static_assert(PutByIdSecondaryTypeInt32 == 0x18, "LLInt assumes PutByIdSecondaryTypeInt32 is == 0x18");
    static_assert(PutByIdSecondaryTypeNumber == 0x20, "LLInt assumes PutByIdSecondaryTypeNumber is == 0x20");
    static_assert(PutByIdSecondaryTypeString == 0x28, "LLInt assumes PutByIdSecondaryTypeString is == 0x28");
    static_assert(PutByIdSecondaryTypeSymbol == 0x30, "LLInt assumes PutByIdSecondaryTypeSymbol is == 0x30");
    static_assert(PutByIdSecondaryTypeObject == 0x38, "LLInt assumes PutByIdSecondaryTypeObject is == 0x38");
    static_assert(PutByIdSecondaryTypeObjectOrOther == 0x40, "LLInt assumes PutByIdSecondaryTypeObjectOrOther is == 0x40");
    static_assert(PutByIdSecondaryTypeTop == 0x48, "LLInt assumes PutByIdSecondaryTypeTop is == 0x48");

    static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0");
    static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1");
    static_assert(GlobalLexicalVar == 2, "LLInt assumes GlobalLexicalVar ResultType is == 2");
    static_assert(ClosureVar == 3, "LLInt assumes ClosureVar ResultType is == 3");
    static_assert(LocalClosureVar == 4, "LLInt assumes LocalClosureVar ResultType is == 4");
    static_assert(ModuleVar == 5, "LLInt assumes ModuleVar ResultType is == 5");
    static_assert(GlobalPropertyWithVarInjectionChecks == 6, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 6");
    static_assert(GlobalVarWithVarInjectionChecks == 7, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 7");
    static_assert(GlobalLexicalVarWithVarInjectionChecks == 8, "LLInt assumes GlobalLexicalVarWithVarInjectionChecks ResultType is == 8");
    static_assert(ClosureVarWithVarInjectionChecks == 9, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 9");

    static_assert(static_cast<unsigned>(InitializationMode::NotInitialization) == 2, "LLInt assumes that InitializationMode::NotInitialization is 0");
    
    STATIC_ASSERT(GetPutInfo::typeBits == 0x3ff);
    STATIC_ASSERT(GetPutInfo::initializationShift == 10);
    STATIC_ASSERT(GetPutInfo::initializationBits == 0xffc00);

    STATIC_ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0x3fff));

    // FIXME: make these assertions less horrible.
#if !ASSERT_DISABLED
    Vector<int> testVector;
    testVector.resize(42);
    ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
    ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif

    ASSERT(StringImpl::s_hashFlag8BitBuffer == 8);
}
Example #16
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    killLastResultRegister();

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
        lshift32(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        neg32(regT0);
        signExtend32ToPtr(regT0, regT0);
        end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
        // regT0: -argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue, regT0);
    stubCall.addArgument(arguments, regT0);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT1);

    if (canOptimize)
        end.link(this);
}
Example #17
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[1].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Caller initializes ScopeChain.
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ScopeChain; ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */

    if (opcodeID == op_call_varargs)
        compileLoadVarargs(instruction);
    else {
        int argCount = instruction[2].u.operand;
        int registerOffset = instruction[3].u.operand;

        if (opcodeID == op_call && canBeOptimized()) {
            emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
            Jump done = emitJumpIfNotJSCell(regT0);
            loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
            storePtr(regT0, instruction[5].u.arrayProfile->addressOfLastSeenStructure());
            done.link(this);
        }
    
        addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
        store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
    } // regT1 holds newCallFrame with ArgumentCount initialized.
    
    store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
    emitGetVirtualRegister(callee, regT0); // regT0 holds callee.

    store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
    store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
    move(regT1, callFrameRegister);

    if (opcodeID == op_call_eval) {
        compileCallEval();
        return;
    }

    DataLabelPtr addressOfLinkedFunctionCheck;
    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
    END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
    addSlowCase(slowCase);

    ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
    m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
    emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    sampleCodeBlock(m_codeBlock);
}
Example #18
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        neg64(regT1);
        add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
        lshift64(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        signExtend32ToPtr(regT0, regT0);
        end.append(branchSub64(Zero, TrustedImm32(1), regT0));
        // regT0: argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister);
    emitGetVirtualRegister(thisValue, regT1);
    emitGetVirtualRegister(arguments, regT2);
    callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
}
Example #19
0
PassRefPtr<ByteArray> ByteArray::create(size_t size)
{
    unsigned char* buffer = new unsigned char[size + OBJECT_OFFSETOF(ByteArray, m_data)];
    ASSERT((reinterpret_cast<size_t>(buffer) & 3) == 0);
    return adoptRef(new (buffer) ByteArray(size));
}
AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind)
{
    callExceptionFuzz();
    
#if USE(JSVALUE64)
    return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
#elif USE(JSVALUE32_64)
    return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
#endif
}
Example #21
0
static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
{
    int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);

    JSInterfaceJIT jit;

    jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);

#if CPU(X86)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    jit.peek(JSInterfaceJIT::regT1);
    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);

    // Calling convention:      f(ecx, edx, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);

    jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.

    // call the function
    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));

    jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);

#elif CPU(X86_64)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    jit.peek(JSInterfaceJIT::regT1);
    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);

#if !OS(WINDOWS)
    // Calling convention:      f(edi, esi, edx, ecx, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);

    jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));

    jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
    // Calling convention:      f(ecx, edx, r8, r9, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);

    // Leave space for the callee parameter home addresses and align the stack.
    jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));

    jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif

#elif CPU(ARM)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);

    // Calling convention:      f(r0 == regT0, r1 == regT1, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
    jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));

    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);

#elif CPU(SH4)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);

    // Calling convention: f(r0 == regT4, r1 == regT5, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
    jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);

    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);

#elif CPU(MIPS)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);

    // Calling convention:      f(a0, a1, a2, a3);
    // Host function signature: f(ExecState*);

    // Allocate stack space for 16 bytes (8-byte aligned)
    // 16 bytes (unused) for 4 arguments
    jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);

    // Setup arg0
    jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);

    // Call
    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
    jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));

    // Restore stack space
    jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);

    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
#else
#error "JIT not supported on this platform."
    UNUSED_PARAM(executableOffsetToFunction);
    breakpoint();
#endif

    // Check for an exception
#if USE(JSVALUE64)
    jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
    JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
#else
    JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
            JSInterfaceJIT::NotEqual,
            JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
            JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
#endif

    // Return.
    jit.ret();

    // Handle an exception
    exceptionHandler.link(&jit);

    // Grab the return address.
    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);

    jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
    jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);

    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);

    jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampolineSlowpath).value()), JSInterfaceJIT::regT1);
    jit.jump(JSInterfaceJIT::regT1);

    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
}
Example #22
0
static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
{
    int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
    
    JSInterfaceJIT jit(vm);

    if (entryType == EnterViaCall)
        jit.emitFunctionPrologue();

    jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);

#if CPU(X86)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    // Calling convention:      f(ecx, edx, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);

    jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.

    // call the function
    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));

    jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);

#elif CPU(X86_64)
    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
#if !OS(WINDOWS)
    // Calling convention:      f(edi, esi, edx, ecx, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));

#else
    // Calling convention:      f(ecx, edx, r8, r9, ...);
    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);

    // Leave space for the callee parameter home addresses and align the stack.
    jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));

    jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif

#elif CPU(ARM64)
    COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
    COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
    COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
    COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
    COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);

    // Load caller frame's scope chain into this callframe so that whatever we call can
    // get to its global data.
    jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

    // Host function signature: f(ExecState*);
    jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
    jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
    jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
    // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
    jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);

#if CPU(MIPS)
    // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
    jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif

    // Calling convention is f(argumentGPR0, argumentGPR1, ...).
    // Host function signature is f(ExecState*).
    jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);

    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));

#if CPU(MIPS)
    // Restore stack space
    jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif
#else
#error "JIT not supported on this platform."
    UNUSED_PARAM(executableOffsetToFunction);
    breakpoint();
#endif

    // Check for an exception
#if USE(JSVALUE64)
    jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
    JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
#else
    JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
        JSInterfaceJIT::NotEqual,
        JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
        JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
#endif

    jit.emitFunctionEpilogue();
    // Return.
    jit.ret();

    // Handle an exception
    exceptionHandler.link(&jit);

    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);

#if CPU(X86) && USE(JSVALUE32_64)
    jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
    jit.push(JSInterfaceJIT::regT0);
#else
    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
#endif
    jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
    jit.call(JSInterfaceJIT::regT3);
#if CPU(X86) && USE(JSVALUE32_64)
    jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif

    jit.jumpToExceptionHandler();

    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
}
Example #23
0
static MacroAssemblerCodeRef virtualForThunkGenerator(
    JSGlobalData* globalData, CodeSpecializationKind kind)
{
    // The return address is on the stack, or in the link register. We will hence
    // jump to the callee, or save the return address to the call frame while we
    // make a C++ function call to the appropriate DFG operation.

    CCallHelpers jit(globalData);
    
    CCallHelpers::JumpList slowCase;

    // FIXME: we should have a story for eliminating these checks. In many cases,
    // the DFG knows that the value is definitely a cell, or definitely a function.
    
#if USE(JSVALUE64)
    slowCase.append(
        jit.branchTestPtr(
            CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
    slowCase.append(
        jit.branch32(
            CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
            CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
    jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
    slowCase.append(
        jit.branchPtr(
            CCallHelpers::NotEqual,
            CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
            CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
    
    // Now we know we have a JSFunction.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
        GPRInfo::nonArgGPR2);
    slowCase.append(
        jit.branch32(
            CCallHelpers::LessThan,
            CCallHelpers::Address(
                GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
            CCallHelpers::TrustedImm32(0)));
    
    // Now we know that we have a CodeBlock, and we're committed to making a fast
    // call.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
        GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
    jit.storePtr(
        GPRInfo::nonArgGPR1,
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain));
#else
    jit.storePtr(
        GPRInfo::nonArgGPR1,
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
            OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
    jit.store32(
        CCallHelpers::TrustedImm32(JSValue::CellTag),
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
            OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
        GPRInfo::regT0);
    
    // Make a tail call. This will return back to DFG code.
    emitPointerValidation(jit, GPRInfo::regT0);
    jit.jump(GPRInfo::regT0);

    slowCase.link(&jit);
    
    // Here we don't know anything, so revert to the full slow path.
    
    slowPathFor(jit, globalData, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
    
    LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(
        patchBuffer,
        ("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
    // 1) Pro-forma stuff.
    if (Options::printEachOSRExit()) {
        SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
        debugInfo->codeBlock = m_jit.codeBlock();
        debugInfo->kind = exit.m_kind;
        debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
        
        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
    }
    
    // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
    m_jit.addPtr(
        CCallHelpers::TrustedImm32(
            -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
        CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
    
    // 2) Perform speculation recovery. This only comes into play when an operation
    //    starts mutating state before verifying the speculation it has already made.
    
    if (recovery) {
        switch (recovery->type()) {
        case SpeculativeAdd:
            m_jit.sub32(recovery->src(), recovery->dest());
            break;
            
        case BooleanSpeculationCheck:
            break;
            
        default:
            break;
        }
    }

    // 3) Refine some value profile, if appropriate.
    
    if (!!exit.m_jsValueSource) {
        if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
            // If the instruction that this originated from has an array profile, then
            // refine it. If it doesn't, then do nothing. The latter could happen for
            // hoisted checks, or checks emitted for operations that didn't have array
            // profiling - either ops that aren't array accesses at all, or weren't
            // known to be array acceses in the bytecode. The latter case is a FIXME
            // while the former case is an outcome of a CheckStructure not knowing why
            // it was emitted (could be either due to an inline cache of a property
            // property access, or due to an array profile).
            
            // Note: We are free to assume that the jsValueSource is already known to
            // be a cell since both BadCache and BadIndexingType exits occur after
            // the cell check would have already happened.
            
            CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
            if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
                GPRReg usedRegister1;
                GPRReg usedRegister2;
                if (exit.m_jsValueSource.isAddress()) {
                    usedRegister1 = exit.m_jsValueSource.base();
                    usedRegister2 = InvalidGPRReg;
                } else {
                    usedRegister1 = exit.m_jsValueSource.payloadGPR();
                    if (exit.m_jsValueSource.hasKnownTag())
                        usedRegister2 = InvalidGPRReg;
                    else
                        usedRegister2 = exit.m_jsValueSource.tagGPR();
                }
                
                GPRReg scratch1;
                GPRReg scratch2;
                scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
                scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
                
#if CPU(ARM64)
                m_jit.pushToSave(scratch1);
                m_jit.pushToSave(scratch2);
#else
                m_jit.push(scratch1);
                m_jit.push(scratch2);
#endif
                
                GPRReg value;
                if (exit.m_jsValueSource.isAddress()) {
                    value = scratch1;
                    m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
                } else
                    value = exit.m_jsValueSource.payloadGPR();
                
                m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
                m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
                m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
                m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
                m_jit.lshift32(scratch1, scratch2);
                m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
                
#if CPU(ARM64)
                m_jit.popToRestore(scratch2);
                m_jit.popToRestore(scratch1);
#else
                m_jit.pop(scratch2);
                m_jit.pop(scratch1);
#endif
            }
        }
        
        if (!!exit.m_valueProfile) {
            EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
        
            if (exit.m_jsValueSource.isAddress()) {
                // Save a register so we can use it.
                GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
                
#if CPU(ARM64)
                m_jit.pushToSave(scratch);
#else
                m_jit.push(scratch);
#endif

                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
                
#if CPU(ARM64)
                m_jit.popToRestore(scratch);
#else
                m_jit.pop(scratch);
#endif
            } else if (exit.m_jsValueSource.hasKnownTag()) {
                m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
            } else {
                m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
            }
        }
    }
    
    // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
    // do this simple approach.

    // 4) Save all state from GPRs into the scratch buffer.
    
    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
    EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case UnboxedInt32InGPR:
        case UnboxedBooleanInGPR:
        case UnboxedCellInGPR:
            m_jit.store32(
                recovery.gpr(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        case InPair:
            m_jit.store32(
                recovery.tagGPR(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
            m_jit.store32(
                recovery.payloadGPR(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        default:
            break;
        }
    }
    
    // Now all GPRs are free to reuse.
    
    // 5) Save all state from FPRs into the scratch buffer.
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case InFPR:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
            break;
            
        default:
            break;
        }
    }
    
    // Now all FPRs are free to reuse.
    
    // 6) Save all state from the stack into the scratch buffer. For simplicity we
    //    do this even for state that's already in the right place on the stack.
    //    It makes things simpler later.
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case DisplacedInJSStack:
        case Int32DisplacedInJSStack:
        case DoubleDisplacedInJSStack:
        case CellDisplacedInJSStack:
        case BooleanDisplacedInJSStack:
            m_jit.load32(
                AssemblyHelpers::tagFor(recovery.virtualRegister()),
                GPRInfo::regT0);
            m_jit.load32(
                AssemblyHelpers::payloadFor(recovery.virtualRegister()),
                GPRInfo::regT1);
            m_jit.store32(
                GPRInfo::regT0,
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
            m_jit.store32(
                GPRInfo::regT1,
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        default:
            break;
        }
    }
    
    // 7) Do all data format conversions and store the results into the stack.
    
    bool haveArguments = false;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        int operand = operands.operandForIndex(index);
        
        switch (recovery.technique()) {
        case InPair:
        case DisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
                GPRInfo::regT0);
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT1);
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT1,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case InFPR:
        case DoubleDisplacedInJSStack:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
            m_jit.purifyNaN(FPRInfo::fpRegT0);
            m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
            break;

        case UnboxedInt32InGPR:
        case Int32DisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case UnboxedCellInGPR:
        case CellDisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::CellTag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case UnboxedBooleanInGPR:
        case BooleanDisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case Constant:
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case ArgumentsThatWereNotCreated:
            haveArguments = true;
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue().tag()),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue().payload()),
                AssemblyHelpers::payloadFor(operand));
            break;
            
        default:
            break;
        }
    }
    
    // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
    //    that all new calls into this code will go to the new JIT, so the execute
    //    counter only affects call frames that performed OSR exit and call frames
    //    that were still executing the old JIT at the time of another call frame's
    //    OSR exit. We want to ensure that the following is true:
    //
    //    (a) Code the performs an OSR exit gets a chance to reenter optimized
    //        code eventually, since optimized code is faster. But we don't
    //        want to do such reentery too aggressively (see (c) below).
    //
    //    (b) If there is code on the call stack that is still running the old
    //        JIT's code and has never OSR'd, then it should get a chance to
    //        perform OSR entry despite the fact that we've exited.
    //
    //    (c) Code the performs an OSR exit should not immediately retry OSR
    //        entry, since both forms of OSR are expensive. OSR entry is
    //        particularly expensive.
    //
    //    (d) Frequent OSR failures, even those that do not result in the code
    //        running in a hot loop, result in recompilation getting triggered.
    //
    //    To ensure (c), we'd like to set the execute counter to
    //    counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
    //    (a) and (b), since then every OSR exit would delay the opportunity for
    //    every call frame to perform OSR entry. Essentially, if OSR exit happens
    //    frequently and the function has few loops, then the counter will never
    //    become non-negative and OSR entry will never be triggered. OSR entry
    //    will only happen if a loop gets hot in the old JIT, which does a pretty
    //    good job of ensuring (a) and (b). But that doesn't take care of (d),
    //    since each speculation failure would reset the execute counter.
    //    So we check here if the number of speculation failures is significantly
    //    larger than the number of successes (we want 90% success rate), and if
    //    there have been a large enough number of failures. If so, we set the
    //    counter to 0; otherwise we set the counter to
    //    counterValueForOptimizeAfterWarmUp().
    
    handleExitCounts(m_jit, exit);
    
    // 9) Reify inlined call frames.
    
    reifyInlinedCallFrames(m_jit, exit);
    
    // 10) Create arguments if necessary and place them into the appropriate aliased
    //     registers.
    
    if (haveArguments) {
        ArgumentsRecoveryGenerator argumentsRecovery;

        for (size_t index = 0; index < operands.size(); ++index) {
            const ValueRecovery& recovery = operands[index];
            if (recovery.technique() != ArgumentsThatWereNotCreated)
                continue;
            argumentsRecovery.generateFor(
                operands.operandForIndex(index), exit.m_codeOrigin, m_jit);
        }
    }

    // 12) And finish.
    adjustAndJumpToTarget(m_jit, exit);
}
Example #25
0
void Data::performAssertions(VM& vm)
{
    UNUSED_PARAM(vm);
    
    // Assertions to match LowLevelInterpreter.asm.  If you change any of this code, be
    // prepared to change LowLevelInterpreter.asm as well!!

#ifndef NDEBUG
#if USE(JSVALUE64)
    const ptrdiff_t PtrSize = 8;
    const ptrdiff_t CallFrameHeaderSlots = 5;
#else // USE(JSVALUE64) // i.e. 32-bit version
    const ptrdiff_t PtrSize = 4;
    const ptrdiff_t CallFrameHeaderSlots = 4;
#endif
    const ptrdiff_t SlotSize = 8;
#endif

    ASSERT(sizeof(void*) == PtrSize);
    ASSERT(sizeof(Register) == SlotSize);
    ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots);

    ASSERT(!CallFrame::callerFrameOffset());
    ASSERT(JSStack::CallerFrameAndPCSize == (PtrSize * 2) / SlotSize);
    ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize);
    ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
    ASSERT(JSStack::Callee * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize);
    ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize);
    ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize);
    ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument);

    ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument);

#if CPU(BIG_ENDIAN)
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
#else
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
    ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
#if USE(JSVALUE32_64)
    ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
    ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
    ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
    ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
    ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
    ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
    ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
    ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
#else
    ASSERT(TagBitTypeOther == 0x2);
    ASSERT(TagBitBool == 0x4);
    ASSERT(TagBitUndefined == 0x8);
    ASSERT(ValueEmpty == 0x0);
    ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
    ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
    ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
    ASSERT(ValueNull == TagBitTypeOther);
#endif
#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT)
    ASSERT(!maxFrameExtentForSlowPathCall);
#elif CPU(ARM) || CPU(SH4)
    ASSERT(maxFrameExtentForSlowPathCall == 24);
#elif CPU(X86) || CPU(MIPS)
    ASSERT(maxFrameExtentForSlowPathCall == 40);
#elif CPU(X86_64) && OS(WINDOWS)
    ASSERT(maxFrameExtentForSlowPathCall == 64);
#endif
    ASSERT(StringType == 6);
    ASSERT(ObjectType == 18);
    ASSERT(FinalObjectType == 19);
    ASSERT(MasqueradesAsUndefined == 1);
    ASSERT(ImplementsHasInstance == 2);
    ASSERT(ImplementsDefaultHasInstance == 8);
    ASSERT(FirstConstantRegisterIndex == 0x40000000);
    ASSERT(GlobalCode == 0);
    ASSERT(EvalCode == 1);
    ASSERT(FunctionCode == 2);

    static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0");
    static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1");
    static_assert(ClosureVar == 2, "LLInt assumes ClosureVar ResultType is == 2");
    static_assert(LocalClosureVar == 3, "LLInt assumes LocalClosureVar ResultType is == 3");
    static_assert(GlobalPropertyWithVarInjectionChecks == 4, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 4");
    static_assert(GlobalVarWithVarInjectionChecks == 5, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 5");
    static_assert(ClosureVarWithVarInjectionChecks == 6, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 6");
    static_assert(Dynamic == 7, "LLInt assumes Dynamic ResultType is == 7");
    
    ASSERT(ResolveModeAndType::mask == 0xffff);

    ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0xffff));

    // FIXME: make these assertions less horrible.
#if !ASSERT_DISABLED
    Vector<int> testVector;
    testVector.resize(42);
    ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
    ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif

    ASSERT(StringImpl::s_hashFlag8BitBuffer == 8);
}
Example #26
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[2].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Caller initializes ScopeChain.
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ScopeChain; ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */
    
    if (opcodeID == op_call_varargs)
        compileLoadVarargs(instruction);
    else {
        int argCount = instruction[3].u.operand;
        int registerOffset = -instruction[4].u.operand;
        
        if (opcodeID == op_call && shouldEmitProfiling()) {
            emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
            Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
            loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
            storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
            done.link(this);
        }
    
        addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);

        store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
    } // regT3 holds newCallFrame with ArgumentCount initialized.
    
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
    store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
    emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.

    storePtr(callFrameRegister, Address(regT3, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
    emitStore(JSStack::Callee, regT1, regT0, regT3);
    move(regT3, callFrameRegister);

    if (opcodeID == op_call_eval) {
        compileCallEval(instruction);
        return;
    }

    DataLabelPtr addressOfLinkedFunctionCheck;
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));

    addSlowCase(slowCase);
    addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));

    ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
    m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
    emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    sampleCodeBlock(m_codeBlock);
    emitPutCallResult(instruction);
}
Example #27
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[2].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Caller initializes ScopeChain.
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ScopeChain; ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */
    COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
    COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
    COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs)
        compileLoadVarargs(instruction);
    else {
        int argCount = instruction[3].u.operand;
        int registerOffset = -instruction[4].u.operand;

        if (opcodeID == op_call && shouldEmitProfiling()) {
            emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
            Jump done = emitJumpIfNotJSCell(regT0);
            load32(Address(regT0, JSCell::structureIDOffset()), regT0);
            store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
            done.link(this);
        }
    
        addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
        store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
    } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
    
    uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset);
    store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
    emitGetVirtualRegister(callee, regT0); // regT0 holds callee.

    store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));

    if (opcodeID == op_call_eval) {
        compileCallEval(instruction);
        return;
    }

    DataLabelPtr addressOfLinkedFunctionCheck;
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
    addSlowCase(slowCase);

    ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
    CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
    info->callType = CallLinkInfo::callTypeFor(opcodeID);
    info->codeOrigin = CodeOrigin(m_bytecodeOffset);
    info->calleeGPR = regT0;
    m_callCompilationInfo.append(CallCompilationInfo());
    m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT2);
    store64(regT2, Address(MacroAssembler::stackPointerRegister, JSStack::ScopeChain * sizeof(Register) - sizeof(CallerFrameAndPC)));

    m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
    checkStackPointerAlignment();

    sampleCodeBlock(m_codeBlock);
    
    emitPutCallResult(instruction);
}
Example #28
0
PassOwnPtr<GenericCompressedData> GenericCompressedData::create(const uint8_t* data, size_t dataLength)
{
    enum { MinimumSize = sizeof(GenericCompressedData) * 8 };

    if (!data || dataLength < MinimumSize)
        return nullptr;

    z_stream stream;
    memset(&stream, 0, sizeof(stream));
    stream.zalloc = zAlloc;
    stream.zfree = zFree;
    stream.data_type = Z_BINARY;
    stream.opaque = Z_NULL;
    stream.avail_in = dataLength;
    stream.next_in = const_cast<uint8_t*>(data);

    size_t currentOffset = OBJECT_OFFSETOF(GenericCompressedData, m_data);
    size_t currentCapacity = fastMallocGoodSize(MinimumSize);
    Bytef* compressedData = static_cast<Bytef*>(fastMalloc(currentCapacity));
    memset(compressedData, 0, sizeof(GenericCompressedData));
    stream.next_out = compressedData + currentOffset;
    stream.avail_out = currentCapacity - currentOffset;

    deflateInit(&stream, Z_BEST_COMPRESSION);

    while (true) {
        int deflateResult = deflate(&stream, Z_FINISH);
        if (deflateResult == Z_OK || !stream.avail_out) {
            size_t newCapacity = 0;
            currentCapacity -= stream.avail_out;
            if (!stream.avail_in)
                newCapacity = currentCapacity + 8;
            else {
                // Determine average capacity
                size_t compressedContent = stream.next_in - data;
                double expectedSize = static_cast<double>(dataLength) * compressedContent / currentCapacity;

                // Expand capacity by at least 8 bytes so we're always growing, and to
                // compensate for any exaggerated ideas of how effectively we'll compress
                // data in the future.
                newCapacity = std::max(static_cast<size_t>(expectedSize + 8), currentCapacity + 8);
            }
            newCapacity = fastMallocGoodSize(newCapacity);
            if (newCapacity >= dataLength)
                goto fail;
            compressedData = static_cast<Bytef*>(fastRealloc(compressedData, newCapacity));
            currentOffset = currentCapacity - stream.avail_out;
            stream.next_out = compressedData + currentOffset;
            stream.avail_out = newCapacity - currentCapacity;
            currentCapacity = newCapacity;
            continue;
        }

        if (deflateResult == Z_STREAM_END) {
            ASSERT(!stream.avail_in);
            break;
        }

        ASSERT_NOT_REACHED();
    fail:
        deflateEnd(&stream);
        fastFree(compressedData);
        return nullptr;
    }
    deflateEnd(&stream);
    static int64_t totalCompressed = 0;
    static int64_t totalInput = 0;

    totalCompressed += currentCapacity;
    totalInput += dataLength;
    GenericCompressedData* result = new (compressedData) GenericCompressedData(dataLength, stream.total_out);
    return adoptPtr(result);
}