Пример #1
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    killLastResultRegister();

    JumpList slowCase;
    JumpList end;
    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT1);
        lshift32(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1));

        // Initialize ArgumentCount.
        emitFastArithReTagImmediate(regT0, regT2);
        storePtr(regT2, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        neg32(regT0);
        signExtend32ToPtr(regT0, regT0);
        end.append(branchAddPtr(Zero, Imm32(1), regT0));
        // regT0: -argumentCount

        Label copyLoop = label();
        loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue, regT0);
    stubCall.addArgument(arguments, regT0);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT1);

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        end.link(this);
}
Пример #2
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        neg64(regT1);
        add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
        lshift64(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        signExtend32ToPtr(regT0, regT0);
        end.append(branchSub64(Zero, TrustedImm32(1), regT0));
        // regT0: argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitGetVirtualRegister(thisValue, regT0);
    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationLoadVarargs, regT0, regT1, firstFreeRegister);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
}
Пример #3
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    JumpList slowCase;
    JumpList end;
    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(RegisterFile::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(RegisterFile::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        neg32(regT2);
        end.append(branchAdd32(Zero, TrustedImm32(1), regT2));
        // regT2: -argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue);
    stubCall.addArgument(arguments);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT3);

    if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
        end.link(this);
}
Пример #4
0
void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned op1 = currentInstruction[2].u.operand;
    unsigned op2 = currentInstruction[3].u.operand;
    if (isOperandConstantImmediateInt(op2)) {
        int shift = getConstantOperand(op2).asInt32();
        // op1 = regT0
        linkSlowCase(iter); // int32 check
        if (supportsFloatingPointTruncate()) {
            JumpList failures;
            failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
            addPtr(tagTypeNumberRegister, regT0);
            movePtrToDouble(regT0, fpRegT0);
            failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
            if (shift)
                urshift32(Imm32(shift & 0x1f), regT0);
            if (shift < 0 || !(shift & 31))
                failures.append(branch32(LessThan, regT0, Imm32(0)));
            emitFastArithReTagImmediate(regT0, regT0);
            emitPutVirtualRegister(dst, regT0);
            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
            failures.link(this);
        }
        if (shift < 0 || !(shift & 31))
            linkSlowCase(iter); // failed to box in hot path
    } else {
        // op1 = regT0
        // op2 = regT1
        if (!isOperandConstantImmediateInt(op1)) {
            linkSlowCase(iter); // int32 check -- op1 is not an int
            if (supportsFloatingPointTruncate()) {
                JumpList failures;
                failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
                addPtr(tagTypeNumberRegister, regT0);
                movePtrToDouble(regT0, fpRegT0);
                failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
                failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
                emitFastArithImmToInt(regT1);
                urshift32(regT1, regT0);
                failures.append(branch32(LessThan, regT0, Imm32(0)));
                emitFastArithReTagImmediate(regT0, regT0);
                emitPutVirtualRegister(dst, regT0);
                emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
                failures.link(this);
            }
        }
        
        linkSlowCase(iter); // int32 check - op2 is not an int
        linkSlowCase(iter); // Can't represent unsigned result as an immediate
    }
    
    JITStubCall stubCall(this, cti_op_urshift);
    stubCall.addArgument(op1, regT0);
    stubCall.addArgument(op2, regT1);
    stubCall.call(dst);
}
Пример #5
0
void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;
    int firstVarArgOffset = instruction[6].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
        
        move(TrustedImm32(-firstFreeRegister), regT1);
        emitSetupVarargsFrameFastCase(*this, regT1, regT0, regT1, regT2, firstVarArgOffset, slowCase);
        end.append(jump());
        slowCase.link(this);
    }

    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset);
    move(TrustedImm32(-firstFreeRegister), regT1);
    emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
    addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
    emitGetVirtualRegister(arguments, regT2);
    callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
    
    // Profile the argument count.
    load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
    load8(&info->maxNumArguments, regT0);
    Jump notBiggest = branch32(Above, regT0, regT2);
    Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
    move(TrustedImm32(255), regT2);
    notSaturated.link(this);
    store8(regT2, &info->maxNumArguments);
    notBiggest.link(this);
    
    // Initialize 'this'.
    emitGetVirtualRegister(thisValue, regT0);
    store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

    addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}
Пример #6
0
void Generator::generateAssertionWordBoundary(JumpList& failures, bool invert)
{
    JumpList wordBoundary;
    JumpList notWordBoundary;

    // (1) Check if the previous value was a word char

    // (1.1) check for begin of input
    Jump atBegin = je32(index, Imm32(0));
    // (1.2) load the last char, and chck if is word character
    load16(BaseIndex(input, index, TimesTwo, -2), character);
    JumpList previousIsWord;
    generateCharacterClassInverted(previousIsWord, CharacterClass::wordchar());
    // (1.3) if we get here, previous is not a word char
    atBegin.link(this);

    // (2) Handle situation where previous was NOT a \w

    generateLoadCharacter(notWordBoundary);
    generateCharacterClassInverted(wordBoundary, CharacterClass::wordchar());
    // (2.1) If we get here, neither chars are word chars
    notWordBoundary.append(jump());

    // (3) Handle situation where previous was a \w

    // (3.0) link success in first match to here
    previousIsWord.link(this);
    generateLoadCharacter(wordBoundary);
    generateCharacterClassInverted(notWordBoundary, CharacterClass::wordchar());
    // (3.1) If we get here, this is an end of a word, within the input.
    
    // (4) Link everything up
    
    if (invert) {
        // handle the fall through case
        wordBoundary.append(jump());
    
        // looking for non word boundaries, so link boundary fails to here.
        notWordBoundary.link(this);

        failures.append(wordBoundary);
    } else {
        // looking for word boundaries, so link successes here.
        wordBoundary.link(this);
        
        failures.append(notWordBoundary);
    }
}
Пример #7
0
void Generator::terminateAlternative(JumpList& successes, JumpList& failures)
{
    successes.append(jump());
    
    failures.link(this);
    peek(index);
}
Пример #8
0
void Generator::generateAssertionEOL(JumpList& failures)
{
    if (m_parser.multiline()) {
        JumpList nextIsNewline;

        generateLoadCharacter(nextIsNewline); // end of input == success
        generateCharacterClassInverted(nextIsNewline, CharacterClass::newline());
        failures.append(jump());
        nextIsNewline.link(this);
    } else {
        failures.append(jne32(length, index));
    }
}
Пример #9
0
Generator::Jump Generator::generateParenthesesResetTrampoline(JumpList& newFailures, unsigned subpatternIdBefore, unsigned subpatternIdAfter)
{
    Jump skip = jump();
    newFailures.link(this);
    for (unsigned i = subpatternIdBefore + 1; i <= subpatternIdAfter; ++i) {
        store32(Imm32(-1), Address(output, (2 * i) * sizeof(int)));
        store32(Imm32(-1), Address(output, (2 * i + 1) * sizeof(int)));
    }
    
    Jump newFailJump = jump();
    skip.link(this);
    
    return newFailJump;
}
Пример #10
0
void Generator::generateParenthesesInvertedAssertion(JumpList& failures)
{
    JumpList disjunctionFailed;

    push(index);
    m_parser.parseDisjunction(disjunctionFailed);

    // If the disjunction succeeded, the inverted assertion failed.
    pop(index);
    failures.append(jump());

    // If the disjunction failed, the inverted assertion succeeded.
    disjunctionFailed.link(this);
    pop(index);
}
Пример #11
0
void Generator::generateParenthesesAssertion(JumpList& failures)
{
    JumpList disjunctionFailed;

    push(index);
    m_parser.parseDisjunction(disjunctionFailed);
    Jump success = jump();

    disjunctionFailed.link(this);
    pop(index);
    failures.append(jump());

    success.link(this);
    pop(index);
}
Пример #12
0
void Generator::generateCharacterClass(JumpList& failures, const CharacterClass& charClass, bool invert)
{
    generateLoadCharacter(failures);

    if (invert)
        generateCharacterClassInverted(failures, charClass);
    else {
        JumpList successes;
        generateCharacterClassInverted(successes, charClass);
        failures.append(jump());
        successes.link(this);
    }
    
    add32(Imm32(1), index);
}
Пример #13
0
void Generator::generateGreedyQuantifier(JumpList& failures, GenerateAtomFunctor& functor, unsigned min, unsigned max)
{
    if (!max)
        return;

    JumpList doneReadingAtomsList;
    JumpList alternativeFailedList;

    // (0) Setup: Save, then init repeatCount.
    push(repeatCount);
    move(Imm32(0), repeatCount);

    // (1) Greedily read as many copies of the atom as possible, then jump to (2).
    Label readAtom(this);
    functor.generateAtom(this, doneReadingAtomsList);
    add32(Imm32(1), repeatCount);
    if (max == Quantifier::Infinity)
        jump(readAtom);
    else if (max == 1)
        doneReadingAtomsList.append(jump());
    else {
        jne32(repeatCount, Imm32(max), readAtom);
        doneReadingAtomsList.append(jump());
    }

    // (5) Quantifier failed: No more backtracking possible.
    Label quantifierFailed(this);
    pop(repeatCount);
    failures.append(jump()); 

    // (4) Alternative failed: Backtrack, then fall through to (2) to try again.
    Label alternativeFailed(this);
    pop(index);
    functor.backtrack(this);
    sub32(Imm32(1), repeatCount);

    // (2) Verify that we have enough atoms.
    doneReadingAtomsList.link(this);
    jl32(repeatCount, Imm32(min), quantifierFailed);

    // (3) Test the rest of the alternative.
    push(index);
    m_parser.parseAlternative(alternativeFailedList);
    alternativeFailedList.linkTo(alternativeFailed, this);

    pop();
    pop(repeatCount);
}
Пример #14
0
void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
    JumpList slowCases;

    slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
    slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
    slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
    
    loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
    emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
    
    Call call = nearCall();
    Jump done = jump();
    
    slowCases.link(this);
    move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
    restoreReturnAddressBeforeReturn(regT2);
    Jump slow = jump();
    
    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
    
    patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
    patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
    patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
    
    RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
        FINALIZE_CODE(
            patchBuffer,
            ("Baseline closure call stub for %s, return point %p, target %p (%s)",
                toCString(*m_codeBlock).data(),
                callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
                codePtr.executableAddress(),
                toCString(pointerDump(calleeCodeBlock)).data())),
        *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
        callLinkInfo->codeOrigin));
    
    RepatchBuffer repatchBuffer(m_codeBlock);
    
    repatchBuffer.replaceWithJump(
        RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
        CodeLocationLabel(stubRoutine->code().code()));
    repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
    
    callLinkInfo->stub = stubRoutine.release();
}
Пример #15
0
void Generator::generateAssertionBOL(JumpList& failures)
{
    if (m_parser.multiline()) {
        JumpList previousIsNewline;

        // begin of input == success
        previousIsNewline.append(je32(index, Imm32(0)));

        // now check prev char against newline characters.
        load16(BaseIndex(input, index, TimesTwo, -2), character);
        generateCharacterClassInverted(previousIsNewline, CharacterClass::newline());

        failures.append(jump());

        previousIsNewline.link(this);
    } else
        failures.append(jne32(index, Imm32(0)));
}
Пример #16
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(JSStack::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        neg32(regT3);
        add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        end.append(branchSub32(Zero, TrustedImm32(1), regT2));
        // regT2: argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitLoad(thisValue, regT1, regT0);
    emitLoad(arguments, regT3, regT2);
    callOperation(operationLoadVarargs, regT1, regT0, regT3, regT2, firstFreeRegister);
    move(returnValueRegister, regT3);

    if (canOptimize)
        end.link(this);
}
Пример #17
0
void Generator::generateCharacterClassInverted(JumpList& matchDest, const CharacterClass& charClass)
{
    Jump unicodeFail;
    if (charClass.numMatchesUnicode || charClass.numRangesUnicode) {
        Jump isAscii = jle32(character, Imm32(0x7f));
    
        if (charClass.numMatchesUnicode) {
            for (unsigned i = 0; i < charClass.numMatchesUnicode; ++i) {
                UChar ch = charClass.matchesUnicode[i];
                matchDest.append(je32(character, Imm32(ch)));
            }
        }
        
        if (charClass.numRangesUnicode) {
            for (unsigned i = 0; i < charClass.numRangesUnicode; ++i) {
                UChar lo = charClass.rangesUnicode[i].begin;
                UChar hi = charClass.rangesUnicode[i].end;
                
                Jump below = jl32(character, Imm32(lo));
                matchDest.append(jle32(character, Imm32(hi)));
                below.link(this);
            }
        }

        unicodeFail = jump();
        isAscii.link(this);
    }

    if (charClass.numRanges) {
        unsigned matchIndex = 0;
        JumpList failures; 
        generateCharacterClassInvertedRange(failures, matchDest, charClass.ranges, charClass.numRanges, &matchIndex, charClass.matches, charClass.numMatches);
        while (matchIndex < charClass.numMatches)
            matchDest.append(je32(character, Imm32((unsigned short)charClass.matches[matchIndex++])));

        failures.link(this);
    } else if (charClass.numMatches) {
        // optimization: gather 'a','A' etc back together, can mask & test once.
        Vector<char> matchesAZaz;

        for (unsigned i = 0; i < charClass.numMatches; ++i) {
            char ch = charClass.matches[i];
            if (m_parser.ignoreCase()) {
                if (isASCIILower(ch)) {
                    matchesAZaz.append(ch);
                    continue;
                }
                if (isASCIIUpper(ch))
                    continue;
            }
            matchDest.append(je32(character, Imm32((unsigned short)ch)));
        }

        if (unsigned countAZaz = matchesAZaz.size()) {
            or32(Imm32(32), character);
            for (unsigned i = 0; i < countAZaz; ++i)
                matchDest.append(je32(character, Imm32(matchesAZaz[i])));
        }
    }

    if (charClass.numMatchesUnicode || charClass.numRangesUnicode)
        unicodeFail.link(this);
}
Пример #18
0
void Generator::terminateDisjunction(JumpList& successes)
{
    successes.link(this);
}
Пример #19
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;
    int firstVarArgOffset = instruction[6].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        if (firstVarArgOffset) {
            Jump sufficientArguments = branch32(GreaterThan, regT0, TrustedImm32(firstVarArgOffset + 1));
            move(TrustedImm32(1), regT0);
            Jump endVarArgs = jump();
            sufficientArguments.link(this);
            sub32(TrustedImm32(firstVarArgOffset), regT0);
            endVarArgs.link(this);
        }
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis
        move(regT0, regT1);
        add64(TrustedImm32(-firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
        // regT1 now has the required frame size in Register units
        // Round regT1 to next multiple of stackAlignmentRegisters()
        add64(TrustedImm32(stackAlignmentRegisters() - 1), regT1);
        and64(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT1);

        neg64(regT1);
        lshift64(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        signExtend32ToPtr(regT0, regT0);
        end.append(branchSub64(Zero, TrustedImm32(1), regT0));
        // regT0: argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, (CallFrame::thisArgumentOffset() + firstVarArgOffset) * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationSizeFrameForVarargs, regT1, firstFreeRegister, firstVarArgOffset);
    move(returnValueGPR, stackPointerRegister);
    emitGetVirtualRegister(thisValue, regT1);
    emitGetVirtualRegister(arguments, regT2);
    callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2, firstVarArgOffset);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
    
    addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}