Пример #1
0
void
FrameState::assertValidRegisterState() const
{
    Registers checkedFreeRegs;

    FrameEntry *tos = tosFe();
    for (uint32 i = 0; i < tracker.nentries; i++) {
        FrameEntry *fe = tracker[i];
        if (fe >= tos)
            continue;

        JS_ASSERT(i == fe->trackerIndex());
        JS_ASSERT_IF(fe->isCopy(),
                     fe->trackerIndex() > fe->copyOf()->trackerIndex());
        JS_ASSERT_IF(fe->isCopy(), !fe->type.inRegister() && !fe->data.inRegister());
        JS_ASSERT_IF(fe->isCopy(), fe->copyOf() < tos);
        JS_ASSERT_IF(fe->isCopy(), fe->copyOf()->isCopied());

        if (fe->isCopy())
            continue;
        if (fe->type.inRegister()) {
            checkedFreeRegs.takeReg(fe->type.reg());
            JS_ASSERT(regstate[fe->type.reg()].fe == fe);
        }
        if (fe->data.inRegister()) {
            checkedFreeRegs.takeReg(fe->data.reg());
            JS_ASSERT(regstate[fe->data.reg()].fe == fe);
        }
    }

    JS_ASSERT(checkedFreeRegs == freeRegs);
}
Пример #2
0
void
ImmutableSync::syncCopy(FrameEntry *fe)
{
    JS_ASSERT(fe >= bottom);

    FrameEntry *backing = fe->copyOf();
    SyncEntry &e = entryFor(backing);

    JS_ASSERT(!backing->isConstant());

    Address addr = frame.addressOf(fe);

    if (fe->isTypeKnown() && !e.learnedType) {
        e.learnedType = true;
        e.type = fe->getKnownType();
    }

    if (!fe->data.synced())
        masm->storePayload(ensureDataReg(backing, e), addr);

    if (!fe->type.synced()) {
        if (e.learnedType)
            masm->storeTypeTag(ImmType(e.type), addr);
        else
            masm->storeTypeTag(ensureTypeReg(backing, e), addr);
    }
}
Пример #3
0
void
FrameState::merge(Assembler &masm, Changes changes) const
{
    FrameEntry *tos = tosFe();
    Registers temp(Registers::TempRegs);

    for (uint32 i = 0; i < tracker.nentries; i++) {
        FrameEntry *fe = tracker[i];
        if (fe >= tos)
            continue;

        /* Copies do not have registers. */
        if (fe->isCopy()) {
            JS_ASSERT(!fe->data.inRegister());
            JS_ASSERT(!fe->type.inRegister());
            continue;
        }

        if (fe->data.inRegister() && fe->type.inRegister())
            masm.loadValueAsComponents(addressOf(fe), fe->type.reg(), fe->data.reg());
        else if (fe->data.inRegister())
            masm.loadPayload(addressOf(fe), fe->data.reg());
        else if (fe->type.inRegister())
            masm.loadTypeTag(addressOf(fe), fe->type.reg());
    }
}
Пример #4
0
void
FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore)
{
    /* Backwards, so we can allocate registers to backing slots better. */
    FrameEntry *tos = tosFe();
    FrameEntry *bottom = tos - uses.nuses;

    tos -= ignore.nuses;

    if (inTryBlock)
        bottom = NULL;

    for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) {
        FrameEntry *fe = tracker[i];
        if (fe >= tos)
            continue;

        Address address = addressOf(fe);
        FrameEntry *backing = fe;
        if (fe->isCopy()) {
            if (!inTryBlock && fe < bottom)
                continue;
            backing = fe->copyOf();
        }

        JS_ASSERT_IF(i == 0, !fe->isCopy());

        bool killData = fe->data.inRegister() && kill.hasReg(fe->data.reg());
        if (!fe->data.synced() && (killData || fe >= bottom)) {
            if (backing != fe && backing->data.inMemory())
                tempRegForData(backing);
            syncData(backing, address, masm);
            fe->data.sync();
            if (fe->isConstant() && !fe->type.synced())
                fe->type.sync();
        }
        if (killData) {
            JS_ASSERT(backing == fe);
            JS_ASSERT(fe->data.synced());
            if (regstate[fe->data.reg()].fe)
                forgetReg(fe->data.reg());
            fe->data.setMemory();
        }
        bool killType = fe->type.inRegister() && kill.hasReg(fe->type.reg());
        if (!fe->type.synced() && (killType || fe >= bottom)) {
            if (backing != fe && backing->type.inMemory())
                tempRegForType(backing);
            syncType(backing, address, masm);
            fe->type.sync();
        }
        if (killType) {
            JS_ASSERT(backing == fe);
            JS_ASSERT(fe->type.synced());
            if (regstate[fe->type.reg()].fe)
                forgetReg(fe->type.reg());
            fe->type.setMemory();
        }
    }
}
Пример #5
0
void DWARFDebugFrame::dump(raw_ostream &OS) const {
    OS << "\n";
    for (EntryVector::const_iterator I = Entries.begin(), E = Entries.end();
            I != E; ++I) {
        FrameEntry *Entry = *I;
        Entry->dumpHeader(OS);
        Entry->dumpInstructions(OS);
        OS << "\n";
    }
}
CompileStatus
mjit::Compiler::compileArrayWithLength(uint32_t argc)
{
    /* Match Array() or Array(n) for constant n. */
    JS_ASSERT(argc == 0 || argc == 1);

    int32_t length = 0;
    if (argc == 1) {
        FrameEntry *arg = frame.peek(-1);
        if (!arg->isConstant() || !arg->getValue().isInt32())
            return Compile_InlineAbort;
        length = arg->getValue().toInt32();
        if (length < 0)
            return Compile_InlineAbort;
    }

    RootedScript script(cx, script_);
    types::TypeObject *type = types::TypeScript::InitObject(cx, script, PC, JSProto_Array);
    if (!type)
        return Compile_Error;

    JSObject *templateObject = NewDenseUnallocatedArray(cx, length, type->proto);
    if (!templateObject)
        return Compile_Error;
    templateObject->setType(type);

    RegisterID result = frame.allocReg();
    Jump emptyFreeList = getNewObject(cx, result, templateObject);

    stubcc.linkExit(emptyFreeList, Uses(0));
    stubcc.leave();

    stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
    OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);

    frame.popn(argc + 2);
    frame.pushTypedPayload(JSVAL_TYPE_OBJECT, result);

    stubcc.rejoin(Changes(1));
    return Compile_Okay;
}
Пример #7
0
JSC::MacroAssembler::RegisterID
ImmutableSync::allocReg()
{
    if (!avail.empty())
        return avail.takeAnyReg();

    uint32 lastResort = FrameState::InvalidIndex;
    uint32 evictFromFrame = FrameState::InvalidIndex;

    /* Find something to evict. */
    for (uint32 i = 0; i < JSC::MacroAssembler::TotalRegisters; i++) {
        RegisterID reg = RegisterID(i);
        if (!(Registers::maskReg(reg) & Registers::AvailRegs))
            continue;

        lastResort = 0;

        if (!regs[i]) {
            /* If the frame does not own this register, take it! */
            FrameEntry *fe = frame.regstate[i].usedBy();
            if (!fe)
                return reg;

            evictFromFrame = i;

            /*
             * If not copied, we can sync and not have to load again later.
             * That's about as good as it gets, so just break out now.
             */
            if (!fe->isCopied())
                break;
        }
    }

    if (evictFromFrame != FrameState::InvalidIndex) {
        FrameEntry *fe = frame.regstate[evictFromFrame].usedBy();
        SyncEntry &e = entryFor(fe);
        if (frame.regstate[evictFromFrame].type() == RematInfo::TYPE) {
            JS_ASSERT(!e.typeClobbered);
            e.typeClobbered = true;
        } else {
            JS_ASSERT(!e.dataClobbered);
            e.dataClobbered = true;
        }
        return RegisterID(evictFromFrame);
    }

    JS_ASSERT(lastResort != FrameState::InvalidIndex);
    JS_ASSERT(regs[lastResort]);

    SyncEntry *e = regs[lastResort];
    RegisterID reg = RegisterID(lastResort);
    if (e->hasDataReg && e->dataReg == reg) {
        e->hasDataReg = false;
    } else if (e->hasTypeReg && e->typeReg == reg) {
        e->hasTypeReg = false;
    } else {
        JS_NOT_REACHED("no way");
    }

    return reg;
}
Пример #8
0
void
FrameEntry::copy(const FrameEntry& other)
{
    U8* dstPixels = data();

    assert(dstPixels);
    if (!dstPixels) {
        return;
    }
    const U8* srcPixels = other.data();
    assert(srcPixels);
    if (!srcPixels) {
        return;
    }
    const TextureRect& srcBounds = other.getKey().getTexRect();
    const TextureRect& dstBounds = _key.getTexRect();
    std::size_t srcRowSize = srcBounds.width();
    unsigned int srcPixelSize = 4;
    if ( (ImageBitDepthEnum)other.getKey().getBitDepth() == eImageBitDepthFloat ) {
        srcPixelSize *= sizeof(float);
    }
    srcRowSize *= srcPixelSize;

    std::size_t dstRowSize = srcBounds.width();
    unsigned int dstPixelSize = 4;
    if ( (ImageBitDepthEnum)_key.getBitDepth() == eImageBitDepthFloat ) {
        dstPixelSize *= sizeof(float);
    }
    dstRowSize *= dstPixelSize;

    // Fill with black and transparent because src might be smaller
    bool filledZero = false;
    if ( !srcBounds.contains(dstBounds) ) {
        std::memset( dstPixels, 0, dstRowSize * dstBounds.height() );
        filledZero = true;
    }
    if ( other.getKey().getBitDepth() != _key.getBitDepth() ) {
        if (!filledZero) {
            std::memset( dstPixels, 0, dstRowSize * dstBounds.height() );
        }

        return;
    }

    // Copy pixels over the intersection
    RectI srcBoundsRect;
    srcBoundsRect.x1 = srcBounds.x1;
    srcBoundsRect.x2 = srcBounds.x2;
    srcBoundsRect.y1 = srcBounds.y1;
    srcBoundsRect.y2 = srcBounds.y2;
    RectI roi;
    if ( !dstBounds.intersect(srcBoundsRect, &roi) ) {
        return;
    }

    dstPixels += (roi.y1 - dstBounds.y1) * dstRowSize + (roi.x1 - dstBounds.x1) * dstPixelSize;
    srcPixels += (roi.y1 - srcBounds.y1) * srcRowSize + (roi.x1 - srcBounds.x1) * srcPixelSize;

    std::size_t roiRowSize = dstPixelSize * roi.width();

    //Align dstPixel to srcPixels point
    for (int y = roi.y1; y < roi.y2; ++y,
         srcPixels += srcRowSize,
         dstPixels += dstRowSize) {
        std::memcpy(dstPixels, srcPixels, roiRowSize);
    }
} // FrameEntry::copy
Пример #9
0
CompileStatus
mjit::Compiler::inlineNativeFunction(uint32_t argc, bool callingNew)
{
    if (!cx->typeInferenceEnabled())
        return Compile_InlineAbort;

    if (applyTricks == LazyArgsObj)
        return Compile_InlineAbort;

    FrameEntry *origCallee = frame.peek(-((int)argc + 2));
    FrameEntry *thisValue = frame.peek(-((int)argc + 1));
    types::TypeSet *thisTypes = analysis->poppedTypes(PC, argc);

    if (!origCallee->isConstant() || !origCallee->isType(JSVAL_TYPE_OBJECT))
        return Compile_InlineAbort;

    JSObject *callee = &origCallee->getValue().toObject();
    if (!callee->isFunction())
        return Compile_InlineAbort;

    /*
     * The callee must have the same parent as the script's global, otherwise
     * inference may not have accounted for any side effects correctly.
     */
    if (!globalObj || globalObj != &callee->global())
        return Compile_InlineAbort;

    Native native = callee->toFunction()->maybeNative();

    if (!native)
        return Compile_InlineAbort;

    JSValueType type = knownPushedType(0);
    JSValueType thisType = thisValue->isTypeKnown()
                           ? thisValue->getKnownType()
                           : JSVAL_TYPE_UNKNOWN;

    /*
     * Note: when adding new natives which operate on properties, add relevant
     * constraint generation to the behavior of TypeConstraintCall.
     */

    /* Handle natives that can be called either with or without 'new'. */

    if (native == js_Array && type == JSVAL_TYPE_OBJECT && globalObj) {
        if (argc == 0 || argc == 1)
            return compileArrayWithLength(argc);
        return compileArrayWithArgs(argc);
    }

    /* Remaining natives must not be called with 'new'. */
    if (callingNew)
        return Compile_InlineAbort;

    if (native == js::num_parseInt && argc >= 1) {
        FrameEntry *arg = frame.peek(-(int32_t)argc);
        JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN;

        if ((argType == JSVAL_TYPE_DOUBLE || argType == JSVAL_TYPE_INT32) &&
            type == JSVAL_TYPE_INT32) {
            return compileParseInt(argType, argc);
        }
    }

    if (argc == 0) {
        if ((native == js::array_pop || native == js::array_shift) && thisType == JSVAL_TYPE_OBJECT) {
            /*
             * Only handle pop/shift on dense arrays which have never been used
             * in an iterator --- when popping elements we don't account for
             * suppressing deleted properties in active iterators.
             *
             * Constraints propagating properties directly into the result
             * type set are generated by TypeConstraintCall during inference.
             */
            if (!thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY |
                                           types::OBJECT_FLAG_ITERATED) &&
                !types::ArrayPrototypeHasIndexedProperty(cx, outerScript)) {
                bool packed = !thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_PACKED_ARRAY);
                return compileArrayPopShift(thisValue, packed, native == js::array_pop);
            }
        }
    } else if (argc == 1) {
        FrameEntry *arg = frame.peek(-1);
        types::TypeSet *argTypes = frame.extra(arg).types;
        if (!argTypes)
            return Compile_InlineAbort;
        JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN;

        if (native == js_math_abs) {
            if (argType == JSVAL_TYPE_INT32 && type == JSVAL_TYPE_INT32)
                return compileMathAbsInt(arg);

            if (argType == JSVAL_TYPE_DOUBLE && type == JSVAL_TYPE_DOUBLE)
                return compileMathAbsDouble(arg);
        }
        if (native == js_math_floor && argType == JSVAL_TYPE_DOUBLE &&
            type == JSVAL_TYPE_INT32) {
            return compileRound(arg, Floor);
        }
        if (native == js_math_round && argType == JSVAL_TYPE_DOUBLE &&
            type == JSVAL_TYPE_INT32) {
            return compileRound(arg, Round);
        }
        if (native == js_math_sqrt && type == JSVAL_TYPE_DOUBLE &&
             masm.supportsFloatingPointSqrt() &&
            (argType == JSVAL_TYPE_INT32 || argType == JSVAL_TYPE_DOUBLE)) {
            return compileMathSqrt(arg);
        }
        if (native == js_str_charCodeAt && argType == JSVAL_TYPE_INT32 &&
            thisType == JSVAL_TYPE_STRING && type == JSVAL_TYPE_INT32) {
            return compileGetChar(thisValue, arg, GetCharCode);
        }
        if (native == js_str_charAt && argType == JSVAL_TYPE_INT32 &&
            thisType == JSVAL_TYPE_STRING && type == JSVAL_TYPE_STRING) {
            return compileGetChar(thisValue, arg, GetChar);
        }
        if (native == js::str_fromCharCode && argType == JSVAL_TYPE_INT32 &&
            type == JSVAL_TYPE_STRING) {
            return compileStringFromCode(arg);
        }
        if (native == js::array_push &&
            thisType == JSVAL_TYPE_OBJECT && type == JSVAL_TYPE_INT32) {
            /*
             * Constraints propagating properties into the 'this' object are
             * generated by TypeConstraintCall during inference.
             */
            if (!thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY) &&
                !types::ArrayPrototypeHasIndexedProperty(cx, outerScript)) {
                return compileArrayPush(thisValue, arg);
            }
        }
        if (native == js::array_concat && argType == JSVAL_TYPE_OBJECT &&
            thisType == JSVAL_TYPE_OBJECT && type == JSVAL_TYPE_OBJECT &&
            !thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY) &&
            !argTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY)) {
            return compileArrayConcat(thisTypes, argTypes, thisValue, arg);
        }
    } else if (argc == 2) {
        FrameEntry *arg1 = frame.peek(-2);
        FrameEntry *arg2 = frame.peek(-1);

        JSValueType arg1Type = arg1->isTypeKnown() ? arg1->getKnownType() : JSVAL_TYPE_UNKNOWN;
        JSValueType arg2Type = arg2->isTypeKnown() ? arg2->getKnownType() : JSVAL_TYPE_UNKNOWN;

        if (native == js_math_pow && type == JSVAL_TYPE_DOUBLE &&
             masm.supportsFloatingPointSqrt() &&
            (arg1Type == JSVAL_TYPE_DOUBLE || arg1Type == JSVAL_TYPE_INT32) &&
            arg2Type == JSVAL_TYPE_DOUBLE && arg2->isConstant())
        {
            Value arg2Value = arg2->getValue();
            if (arg2Value.toDouble() == -0.5 || arg2Value.toDouble() == 0.5)
                return compileMathPowSimple(arg1, arg2);
        }
        if ((native == js_math_min || native == js_math_max)) {
            if (arg1Type == JSVAL_TYPE_INT32 && arg2Type == JSVAL_TYPE_INT32 &&
                type == JSVAL_TYPE_INT32) {
                return compileMathMinMaxInt(arg1, arg2, 
                        native == js_math_min ? Assembler::LessThan : Assembler::GreaterThan);
            }
            if ((arg1Type == JSVAL_TYPE_INT32 || arg1Type == JSVAL_TYPE_DOUBLE) &&
                (arg2Type == JSVAL_TYPE_INT32 || arg2Type == JSVAL_TYPE_DOUBLE) &&
                type == JSVAL_TYPE_DOUBLE) {
                return compileMathMinMaxDouble(arg1, arg2,
                        (native == js_math_min)
                        ? Assembler::DoubleLessThan
                        : Assembler::DoubleGreaterThan);
            }
        }
    }
    return Compile_InlineAbort;
}
Пример #10
0
CompileStatus
mjit::Compiler::compileParseInt(JSValueType argType, uint32_t argc)
{
    bool needStubCall = false;

    if (argc > 1) {
        FrameEntry *arg = frame.peek(-(int32_t)argc + 1);

        if (!arg->isTypeKnown() || arg->getKnownType() != JSVAL_TYPE_INT32)
            return Compile_InlineAbort;

        if (arg->isConstant()) {
            int32_t base = arg->getValue().toInt32();
            if (base != 0 && base != 10)
                return Compile_InlineAbort;
        } else {
            RegisterID baseReg = frame.tempRegForData(arg);
            needStubCall = true;

            Jump isTen = masm.branch32(Assembler::Equal, baseReg, Imm32(10));
            Jump isNotZero = masm.branch32(Assembler::NotEqual, baseReg, Imm32(0));
            stubcc.linkExit(isNotZero, Uses(2 + argc));

            isTen.linkTo(masm.label(), &masm);
        }
    }

    if (argType == JSVAL_TYPE_INT32) {
        if (needStubCall) {
            stubcc.leave();
            stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
            OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);
        }

        /* 
         * Stack looks like callee, this, arg1, arg2, argN.
         * First pop all args other than arg1.
         */
        frame.popn(argc - 1);
        /* "Shimmy" arg1 to the callee slot and pop this + arg1. */
        frame.shimmy(2);

        if (needStubCall) {
            stubcc.rejoin(Changes(1));
        }        
    } else {
        FrameEntry *arg = frame.peek(-(int32_t)argc);
        FPRegisterID fpScratchReg = frame.allocFPReg();
        FPRegisterID fpReg;
        bool allocate;

        DebugOnly<MaybeJump> notNumber = loadDouble(arg, &fpReg, &allocate);
        JS_ASSERT(!((MaybeJump)notNumber).isSet());

        masm.slowLoadConstantDouble(1, fpScratchReg);

        /* Slow path for NaN and numbers < 1. */
        Jump lessThanOneOrNan = masm.branchDouble(Assembler::DoubleLessThanOrUnordered, 
                                                  fpReg, fpScratchReg);
        stubcc.linkExit(lessThanOneOrNan, Uses(2 + argc));

        frame.freeReg(fpScratchReg);

        /* Truncate to integer, slow path if this overflows. */
        RegisterID reg = frame.allocReg();
        Jump overflow = masm.branchTruncateDoubleToInt32(fpReg, reg);
        stubcc.linkExit(overflow, Uses(2 + argc));

        if (allocate)
            frame.freeReg(fpReg);

        stubcc.leave();
        stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
        OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);

        frame.popn(2 + argc);
        frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);

        stubcc.rejoin(Changes(1));
    }

    return Compile_Okay;   
}
Пример #11
0
void
FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange)
{
    FrameEntry *localFe = getLocal(n);
    bool cacheable = !eval && !escaping[n];

    if (!popGuaranteed && !cacheable) {
        JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed),
                     entries[localIndex(n)].type.inMemory() &&
                     entries[localIndex(n)].data.inMemory());
        Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value));
        storeTo(peek(-1), local, false);
        forgetAllRegs(getLocal(n));
        localFe->resetSynced();
        return;
    }

    bool wasSynced = localFe->type.synced();

    /* Detect something like (x = x) which is a no-op. */
    FrameEntry *top = peek(-1);
    if (top->isCopy() && top->copyOf() == localFe) {
        JS_ASSERT(localFe->isCopied());
        return;
    }

    /* Completely invalidate the local variable. */
    if (localFe->isCopied()) {
        uncopy(localFe);
        if (!localFe->isCopied())
            forgetAllRegs(localFe);
    } else {
        forgetAllRegs(localFe);
    }

    localFe->resetUnsynced();

    /* Constants are easy to propagate. */
    if (top->isConstant()) {
        localFe->setCopyOf(NULL);
        localFe->setNotCopied();
        localFe->setConstant(Jsvalify(top->getValue()));
        return;
    }

    /*
     * When dealing with copies, there are two important invariants:
     *
     * 1) The backing store precedes all copies in the tracker.
     * 2) The backing store of a local is never a stack slot, UNLESS the local
     *    variable itself is a stack slot (blocks) that precedes the stack
     *    slot.
     *
     * If the top is a copy, and the second condition holds true, the local
     * can be rewritten as a copy of the original backing slot. If the first
     * condition does not hold, force it to hold by swapping in-place.
     */
    FrameEntry *backing = top;
    if (top->isCopy()) {
        backing = top->copyOf();
        JS_ASSERT(backing->trackerIndex() < top->trackerIndex());

        uint32 backingIndex = indexOfFe(backing);
        uint32 tol = uint32(spBase - base);
        if (backingIndex < tol || backingIndex < localIndex(n)) {
            /* local.idx < backing.idx means local cannot be a copy yet */
            if (localFe->trackerIndex() < backing->trackerIndex())
                swapInTracker(backing, localFe);
            localFe->setNotCopied();
            localFe->setCopyOf(backing);
            if (backing->isTypeKnown())
                localFe->setType(backing->getKnownType());
            else
                localFe->type.invalidate();
            localFe->data.invalidate();
            localFe->isNumber = backing->isNumber;
            return;
        }

        /*
         * If control flow lands here, then there was a bytecode sequence like
         *
         *  ENTERBLOCK 2
         *  GETLOCAL 1
         *  SETLOCAL 0
         *
         * The problem is slot N can't be backed by M if M could be popped
         * before N. We want a guarantee that when we pop M, even if it was
         * copied, it has no outstanding copies.
         * 
         * Because of |let| expressions, it's kind of hard to really know
         * whether a region on the stack will be popped all at once. Bleh!
         *
         * This should be rare except in browser code (and maybe even then),
         * but even so there's a quick workaround. We take all copies of the
         * backing fe, and redirect them to be copies of the destination.
         */
        FrameEntry *tos = tosFe();
        for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
            FrameEntry *fe = tracker[i];
            if (fe >= tos)
                continue;
            if (fe->isCopy() && fe->copyOf() == backing)
                fe->setCopyOf(localFe);
        }
    }
    backing->setNotCopied();
    
    /*
     * This is valid from the top->isCopy() path because we're guaranteed a
     * consistent ordering - all copies of |backing| are tracked after 
     * |backing|. Transitively, only one swap is needed.
     */
    if (backing->trackerIndex() < localFe->trackerIndex())
        swapInTracker(backing, localFe);

    /*
     * Move the backing store down - we spill registers here, but we could be
     * smarter and re-use the type reg.
     */
    RegisterID reg = tempRegForData(backing);
    localFe->data.setRegister(reg);
    moveOwnership(reg, localFe);

    if (typeChange) {
        if (backing->isTypeKnown()) {
            localFe->setType(backing->getKnownType());
        } else {
            RegisterID reg = tempRegForType(backing);
            localFe->type.setRegister(reg);
            moveOwnership(reg, localFe);
        }
    } else {
        if (!wasSynced)
            masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(localFe));
        localFe->type.setMemory();
    }

    if (!backing->isTypeKnown())
        backing->type.invalidate();
    backing->data.invalidate();
    backing->setCopyOf(localFe);
    backing->isNumber = localFe->isNumber;
    localFe->setCopied();

    if (!cacheable) {
        /* TODO: x64 optimization */
        if (!localFe->type.synced())
            syncType(localFe, addressOf(localFe), masm);
        if (!localFe->data.synced())
            syncData(localFe, addressOf(localFe), masm);
        forgetAllRegs(localFe);
        localFe->type.setMemory();
        localFe->data.setMemory();
    }

    JS_ASSERT(top->copyOf() == localFe);
}
Пример #12
0
FrameEntry *
FrameState::uncopy(FrameEntry *original)
{
    JS_ASSERT(original->isCopied());

    /*
     * Copies have two critical invariants:
     *  1) The backing store precedes all copies in the tracker.
     *  2) The backing store of a copy cannot be popped from the stack
     *     while the copy is still live.
     *
     * Maintaining this invariant iteratively is kind of hard, so we choose
     * the "lowest" copy in the frame up-front.
     *
     * For example, if the stack is:
     *    [A, B, C, D]
     * And the tracker has:
     *    [A, D, C, B]
     *
     * If B, C, and D are copies of A - we will walk the tracker to the end
     * and select D, not B (see bug 583684).
     */
    uint32 firstCopy = InvalidIndex;
    FrameEntry *tos = tosFe();
    FrameEntry *bestFe = NULL;
    uint32 ncopies = 0;
    for (uint32 i = 0; i < tracker.nentries; i++) {
        FrameEntry *fe = tracker[i];
        if (fe >= tos)
            continue;
        if (fe->isCopy() && fe->copyOf() == original) {
            if (firstCopy == InvalidIndex) {
                firstCopy = i;
                bestFe = fe;
            } else if (fe < bestFe) {
                bestFe = fe;
            }
            ncopies++;
        }
    }

    if (!ncopies) {
        JS_ASSERT(firstCopy == InvalidIndex);
        JS_ASSERT(!bestFe);
        original->copied = false;
        return NULL;
    }

    JS_ASSERT(firstCopy != InvalidIndex);
    JS_ASSERT(bestFe);

    /* Mark all extra copies as copies of the new backing index. */
    bestFe->setCopyOf(NULL);
    if (ncopies > 1) {
        bestFe->setCopied();
        for (uint32 i = firstCopy; i < tracker.nentries; i++) {
            FrameEntry *other = tracker[i];
            if (other >= tos || other == bestFe)
                continue;

            /* The original must be tracked before copies. */
            JS_ASSERT(other != original);

            if (!other->isCopy() || other->copyOf() != original)
                continue;

            other->setCopyOf(bestFe);

            /*
             * This is safe even though we're mutating during iteration. There
             * are two cases. The first is that both indexes are <= i, and :.
             * will never be observed. The other case is we're placing the
             * other FE such that it will be observed later. Luckily, copyOf()
             * will return != original, so nothing will happen.
             */
            if (other->trackerIndex() < bestFe->trackerIndex())
                swapInTracker(bestFe, other);
        }
    } else {
        bestFe->setNotCopied();
    }

    FrameEntry *fe = bestFe;

    /*
     * Switch the new backing store to the old backing store. During
     * this process we also necessarily make sure the copy can be
     * synced.
     */
    if (!original->isTypeKnown()) {
        /*
         * If the copy is unsynced, and the original is in memory,
         * give the original a register. We do this below too; it's
         * okay if it's spilled.
         */
        if (original->type.inMemory() && !fe->type.synced())
            tempRegForType(original);
        fe->type.inherit(original->type);
        if (fe->type.inRegister())
            moveOwnership(fe->type.reg(), fe);
    } else {
        JS_ASSERT(fe->isTypeKnown());
        JS_ASSERT(fe->getKnownType() == original->getKnownType());
    }
    if (original->data.inMemory() && !fe->data.synced())
        tempRegForData(original);
    fe->data.inherit(original->data);
    if (fe->data.inRegister())
        moveOwnership(fe->data.reg(), fe);

    return fe;
}
Пример #13
0
void
FrameState::pushCopyOf(uint32 index)
{
    FrameEntry *backing = entryFor(index);
    FrameEntry *fe = rawPush();
    fe->resetUnsynced();
    if (backing->isConstant()) {
        fe->setConstant(Jsvalify(backing->getValue()));
    } else {
        if (backing->isTypeKnown())
            fe->setType(backing->getKnownType());
        else
            fe->type.invalidate();
        fe->isNumber = backing->isNumber;
        fe->data.invalidate();
        if (backing->isCopy()) {
            backing = backing->copyOf();
            fe->setCopyOf(backing);
        } else {
            fe->setCopyOf(backing);
            backing->setCopied();
        }

        /* Maintain tracker ordering guarantees for copies. */
        JS_ASSERT(backing->isCopied());
        if (fe->trackerIndex() < backing->trackerIndex())
            swapInTracker(fe, backing);
    }
}
Пример #14
0
void
FrameState::sync(Assembler &masm, Uses uses) const
{
    /*
     * Keep track of free registers using a bitmask. If we have to drop into
     * syncFancy(), then this mask will help avoid eviction.
     */
    Registers avail(freeRegs);
    Registers temp(Registers::TempRegs);

    FrameEntry *tos = tosFe();
    FrameEntry *bottom = tos - uses.nuses;

    if (inTryBlock)
        bottom = NULL;

    for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) {
        FrameEntry *fe = tracker[i];
        if (fe >= tos)
            continue;

        Address address = addressOf(fe);

        if (!fe->isCopy()) {
            /* Keep track of registers that can be clobbered. */
            if (fe->data.inRegister())
                avail.putReg(fe->data.reg());
            if (fe->type.inRegister())
                avail.putReg(fe->type.reg());

            /* Sync. */
            if (!fe->data.synced() && (fe->data.inRegister() || fe >= bottom)) {
                syncData(fe, address, masm);
                if (fe->isConstant())
                    continue;
            }
            if (!fe->type.synced() && (fe->type.inRegister() || fe >= bottom))
                syncType(fe, addressOf(fe), masm);
        } else if (fe >= bottom) {
            FrameEntry *backing = fe->copyOf();
            JS_ASSERT(backing != fe);
            JS_ASSERT(!backing->isConstant() && !fe->isConstant());

            /*
             * If the copy is backed by something not in a register, fall back
             * to a slower sync algorithm.
             */
            if ((!fe->type.synced() && !backing->type.inRegister()) ||
                (!fe->data.synced() && !backing->data.inRegister())) {
                syncFancy(masm, avail, i, bottom);
                return;
            }

            if (!fe->type.synced()) {
                /* :TODO: we can do better, the type is learned for all copies. */
                if (fe->isTypeKnown()) {
                    //JS_ASSERT(fe->getTypeTag() == backing->getTypeTag());
                    masm.storeTypeTag(ImmType(fe->getKnownType()), address);
                } else {
                    masm.storeTypeTag(backing->type.reg(), address);
                }
            }

            if (!fe->data.synced())
                masm.storePayload(backing->data.reg(), address);
        }
    }
}
Пример #15
0
void
FrameState::allocForBinary(FrameEntry *lhs, FrameEntry *rhs, JSOp op, BinaryAlloc &alloc,
                           bool needsResult)
{
    FrameEntry *backingLeft = lhs;
    FrameEntry *backingRight = rhs;

    if (backingLeft->isCopy())
        backingLeft = backingLeft->copyOf();
    if (backingRight->isCopy())
        backingRight = backingRight->copyOf();

    /*
     * For each remat piece of both FEs, if a register is assigned, get it now
     * and pin it. This is safe - constants and known types will be avoided.
     */
    if (AllocHelper(backingLeft->type, alloc.lhsType))
        pinReg(alloc.lhsType.reg());
    if (AllocHelper(backingLeft->data, alloc.lhsData))
        pinReg(alloc.lhsData.reg());
    if (AllocHelper(backingRight->type, alloc.rhsType))
        pinReg(alloc.rhsType.reg());
    if (AllocHelper(backingRight->data, alloc.rhsData))
        pinReg(alloc.rhsData.reg());

    /* For each type without a register, give it a register if needed. */
    if (!alloc.lhsType.isSet() && backingLeft->type.inMemory()) {
        alloc.lhsType = tempRegForType(lhs);
        pinReg(alloc.lhsType.reg());
    }
    if (!alloc.rhsType.isSet() && backingRight->type.inMemory()) {
        alloc.rhsType = tempRegForType(rhs);
        pinReg(alloc.rhsType.reg());
    }

    bool commu;
    switch (op) {
      case JSOP_EQ:
      case JSOP_GT:
      case JSOP_GE:
      case JSOP_LT:
      case JSOP_LE:
        /* fall through */
      case JSOP_ADD:
      case JSOP_MUL:
      case JSOP_SUB:
        commu = true;
        break;

      case JSOP_DIV:
        commu = false;
        break;

      default:
        JS_NOT_REACHED("unknown op");
        return;
    }

    /*
     * Data is a little more complicated. If the op is MUL, not all CPUs
     * have multiplication on immediates, so a register is needed. Also,
     * if the op is not commutative, the LHS _must_ be in a register.
     */
    JS_ASSERT_IF(lhs->isConstant(), !rhs->isConstant());
    JS_ASSERT_IF(rhs->isConstant(), !lhs->isConstant());

    if (!alloc.lhsData.isSet()) {
        if (backingLeft->data.inMemory()) {
            alloc.lhsData = tempRegForData(lhs);
            pinReg(alloc.lhsData.reg());
        } else if (op == JSOP_MUL || !commu) {
            JS_ASSERT(lhs->isConstant());
            alloc.lhsData = allocReg();
            alloc.extraFree = alloc.lhsData;
            masm.move(Imm32(lhs->getValue().toInt32()), alloc.lhsData.reg());
        }
    }
    if (!alloc.rhsData.isSet()) {
        if (backingRight->data.inMemory()) {
            alloc.rhsData = tempRegForData(rhs);
            pinReg(alloc.rhsData.reg());
        } else if (op == JSOP_MUL) {
            JS_ASSERT(rhs->isConstant());
            alloc.rhsData = allocReg();
            alloc.extraFree = alloc.rhsData;
            masm.move(Imm32(rhs->getValue().toInt32()), alloc.rhsData.reg());
        }
    }

    alloc.lhsNeedsRemat = false;
    alloc.rhsNeedsRemat = false;

    if (!needsResult)
        goto skip;

    /*
     * Now a result register is needed. It must contain a mutable copy of the
     * LHS. For commutative operations, we can opt to use the RHS instead. At
     * this point, if for some reason either must be in a register, that has
     * already been guaranteed at this point.
     */
    if (!freeRegs.empty()) {
        /* Free reg - just grab it. */
        alloc.result = allocReg();
        if (!alloc.lhsData.isSet()) {
            JS_ASSERT(alloc.rhsData.isSet());
            JS_ASSERT(commu);
            masm.move(alloc.rhsData.reg(), alloc.result);
            alloc.resultHasRhs = true;
        } else {
            masm.move(alloc.lhsData.reg(), alloc.result);
            alloc.resultHasRhs = false;
        }
    } else {
        /*
         * No free regs. Find a good candidate to re-use. Best candidates don't
         * require syncs on the inline path.
         */
        bool leftInReg = backingLeft->data.inRegister();
        bool rightInReg = backingRight->data.inRegister();
        bool leftSynced = backingLeft->data.synced();
        bool rightSynced = backingRight->data.synced();
        if (!commu || (leftInReg && (leftSynced || (!rightInReg || !rightSynced)))) {
            JS_ASSERT(backingLeft->data.inRegister() || !commu);
            JS_ASSERT_IF(backingLeft->data.inRegister(),
                         backingLeft->data.reg() == alloc.lhsData.reg());
            if (backingLeft->data.inRegister()) {
                alloc.result = backingLeft->data.reg();
                unpinReg(alloc.result);
                takeReg(alloc.result);
                alloc.lhsNeedsRemat = true;
            } else {
                /* For now, just spill... */
                alloc.result = allocReg();
                masm.move(alloc.lhsData.reg(), alloc.result);
            }
            alloc.resultHasRhs = false;
        } else {
            JS_ASSERT(commu);
            JS_ASSERT(!leftInReg || (rightInReg && rightSynced));
            alloc.result = backingRight->data.reg();
            unpinReg(alloc.result);
            takeReg(alloc.result);
            alloc.resultHasRhs = true;
            alloc.rhsNeedsRemat = true;
        }
    }

  skip:
    /* Unpin everything that was pinned. */
    if (backingLeft->type.inRegister())
        unpinReg(backingLeft->type.reg());
    if (backingRight->type.inRegister())
        unpinReg(backingRight->type.reg());
    if (backingLeft->data.inRegister())
        unpinReg(backingLeft->data.reg());
    if (backingRight->data.inRegister())
        unpinReg(backingRight->data.reg());
}
Пример #16
0
void DWARFDebugFrame::parse(DataExtractor Data) {
    uint32_t Offset = 0;

    while (Data.isValidOffset(Offset)) {
        uint32_t StartOffset = Offset;

        bool IsDWARF64 = false;
        uint64_t Length = Data.getU32(&Offset);
        uint64_t Id;

        if (Length == UINT32_MAX) {
            // DWARF-64 is distinguished by the first 32 bits of the initial length
            // field being 0xffffffff. Then, the next 64 bits are the actual entry
            // length.
            IsDWARF64 = true;
            Length = Data.getU64(&Offset);
        }

        // At this point, Offset points to the next field after Length.
        // Length is the structure size excluding itself. Compute an offset one
        // past the end of the structure (needed to know how many instructions to
        // read).
        // TODO: For honest DWARF64 support, DataExtractor will have to treat
        //       offset_ptr as uint64_t*
        uint32_t EndStructureOffset = Offset + static_cast<uint32_t>(Length);

        // The Id field's size depends on the DWARF format
        Id = Data.getUnsigned(&Offset, IsDWARF64 ? 8 : 4);
        bool IsCIE = ((IsDWARF64 && Id == DW64_CIE_ID) || Id == DW_CIE_ID);

        FrameEntry *Entry = 0;
        if (IsCIE) {
            // Note: this is specifically DWARFv3 CIE header structure. It was
            // changed in DWARFv4. We currently don't support reading DWARFv4
            // here because LLVM itself does not emit it (and LLDB doesn't
            // support it either).
            uint8_t Version = Data.getU8(&Offset);
            const char *Augmentation = Data.getCStr(&Offset);
            uint64_t CodeAlignmentFactor = Data.getULEB128(&Offset);
            int64_t DataAlignmentFactor = Data.getSLEB128(&Offset);
            uint64_t ReturnAddressRegister = Data.getULEB128(&Offset);

            Entry = new CIE(Data, StartOffset, Length, Version,
                            StringRef(Augmentation), CodeAlignmentFactor,
                            DataAlignmentFactor, ReturnAddressRegister);
        } else {
            // FDE
            uint64_t CIEPointer = Id;
            uint64_t InitialLocation = Data.getAddress(&Offset);
            uint64_t AddressRange = Data.getAddress(&Offset);

            Entry = new FDE(Data, StartOffset, Length, CIEPointer,
                            InitialLocation, AddressRange);
        }

        assert(Entry && "Expected Entry to be populated with CIE or FDE");
        Entry->parseInstructions(&Offset, EndStructureOffset);

        if (Offset == EndStructureOffset) {
            // Entry instrucitons parsed successfully.
            Entries.push_back(Entry);
        } else {
            std::string Str;
            raw_string_ostream OS(Str);
            OS << format("Parsing entry instructions at %lx failed",
                         Entry->getOffset());
            report_fatal_error(Str);
        }
    }
}