コード例 #1
0
bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to)
{
    if (verbose)
        dataLog("   Merging from ", pointerDump(from), " to ", pointerDump(to), "\n");
    ASSERT(from->variablesAtTail.numberOfArguments() == to->variablesAtHead.numberOfArguments());
    ASSERT(from->variablesAtTail.numberOfLocals() == to->variablesAtHead.numberOfLocals());

    bool changed = false;

    changed |= checkAndSet(
        to->cfaStructureClobberStateAtHead,
        DFG::merge(from->cfaStructureClobberStateAtTail, to->cfaStructureClobberStateAtHead));

    switch (m_graph.m_form) {
    case ThreadedCPS: {
        for (size_t argument = 0; argument < from->variablesAtTail.numberOfArguments(); ++argument) {
            AbstractValue& destination = to->valuesAtHead.argument(argument);
            changed |= mergeVariableBetweenBlocks(destination, from->valuesAtTail.argument(argument), to->variablesAtHead.argument(argument), from->variablesAtTail.argument(argument));
        }

        for (size_t local = 0; local < from->variablesAtTail.numberOfLocals(); ++local) {
            AbstractValue& destination = to->valuesAtHead.local(local);
            changed |= mergeVariableBetweenBlocks(destination, from->valuesAtTail.local(local), to->variablesAtHead.local(local), from->variablesAtTail.local(local));
        }
        break;
    }

    case SSA: {
        for (size_t i = from->valuesAtTail.size(); i--;)
            changed |= to->valuesAtHead[i].merge(from->valuesAtTail[i]);

        HashSet<Node*>::iterator iter = to->ssa->liveAtHead.begin();
        HashSet<Node*>::iterator end = to->ssa->liveAtHead.end();
        for (; iter != end; ++iter) {
            Node* node = *iter;
            if (verbose)
                dataLog("      Merging for ", node, ": from ", from->ssa->valuesAtTail.find(node)->value, " to ", to->ssa->valuesAtHead.find(node)->value, "\n");
            changed |= to->ssa->valuesAtHead.find(node)->value.merge(
                from->ssa->valuesAtTail.find(node)->value);
            if (verbose)
                dataLog("         Result: ", to->ssa->valuesAtHead.find(node)->value, "\n");
        }
        break;
    }

    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }

    if (!to->cfaHasVisited)
        changed = true;

    if (verbose)
        dataLog("      Will revisit: ", changed, "\n");
    to->cfaShouldRevisit |= changed;

    return changed;
}
コード例 #2
0
void Arg::dump(PrintStream& out) const
{
    switch (m_kind) {
    case Invalid:
        out.print("<invalid>");
        return;
    case Tmp:
        out.print(tmp());
        return;
    case Imm:
        out.print("$", m_offset);
        return;
    case Imm64:
        out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
        return;
    case Addr:
        if (offset())
            out.print(offset());
        out.print("(", base(), ")");
        return;
    case Index:
        if (offset())
            out.print(offset());
        out.print("(", base(), ",", index());
        if (scale() != 1)
            out.print(",", scale());
        out.print(")");
        return;
    case Stack:
        if (offset())
            out.print(offset());
        out.print("(", pointerDump(stackSlot()), ")");
        return;
    case CallArg:
        if (offset())
            out.print(offset());
        out.print("(callArg)");
        return;
    case RelCond:
        out.print(asRelationalCondition());
        return;
    case ResCond:
        out.print(asResultCondition());
        return;
    case DoubleCond:
        out.print(asDoubleCondition());
        return;
    case Special:
        out.print(pointerDump(special()));
        return;
    }

    RELEASE_ASSERT_NOT_REACHED();
}
コード例 #3
0
ファイル: FTLJITCode.cpp プロジェクト: rodrigo-speller/webkit
JITCode::~JITCode()
{
    if (FTL::shouldShowDisassembly()) {
        dataLog("Destroying FTL JIT code at ");
        CommaPrinter comma;
        for (auto& handle : m_handles)
            dataLog(comma, pointerDump(handle.get()));
        dataLog(comma, pointerDump(m_arityCheckEntrypoint.executableMemory()));
        dataLog(comma, pointerDump(m_exitThunks.executableMemory()));
        dataLog("\n");
    }
}
コード例 #4
0
ファイル: DFGExitProfile.cpp プロジェクト: eocanha/webkit
bool ExitProfile::add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite& site)
{
    ASSERT(site.jitType() != ExitFromAnything);

    CODEBLOCK_LOG_EVENT(owner, "frequentExit", (site));

    if (Options::verboseExitProfile())
        dataLog(pointerDump(owner), ": Adding exit site: ", site, "\n");

    // If we've never seen any frequent exits then create the list and put this site
    // into it.
    if (!m_frequentExitSites) {
        m_frequentExitSites = std::make_unique<Vector<FrequentExitSite>>();
        m_frequentExitSites->append(site);
        return true;
    }

    // Don't add it if it's already there. This is O(n), but that's OK, because we
    // know that the total number of places where code exits tends to not be large,
    // and this code is only used when recompilation is triggered.
    for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
        if (m_frequentExitSites->at(i) == site)
            return false;
    }

    m_frequentExitSites->append(site);
    return true;
}
コード例 #5
0
ファイル: CodeBlockSet.cpp プロジェクト: caiolima/webkit
void CodeBlockSet::dump(PrintStream& out) const
{
    CommaPrinter comma;
    out.print("{old = [");
    for (CodeBlock* codeBlock : m_oldCodeBlocks)
        out.print(comma, pointerDump(codeBlock));
    out.print("], new = [");
    comma = CommaPrinter();
    for (CodeBlock* codeBlock : m_newCodeBlocks)
        out.print(comma, pointerDump(codeBlock));
    out.print("], currentlyExecuting = [");
    comma = CommaPrinter();
    for (CodeBlock* codeBlock : m_currentlyExecuting)
        out.print(comma, pointerDump(codeBlock));
    out.print("]}");
}
コード例 #6
0
ファイル: B3Value.cpp プロジェクト: happyyang/webkit
void Value::deepDump(PrintStream& out) const
{
    out.print(m_type, " ", *this, " = ", m_opcode);

    out.print("(");
    CommaPrinter comma;
    for (Value* child : children())
        out.print(comma, pointerDump(child));

    if (m_origin)
        out.print(comma, m_origin);

    {
        StringPrintStream stringOut;
        dumpMeta(stringOut);
        CString string = stringOut.toCString();
        if (string.length())
            out.print(comma, string);
    }

    {
        CString string = toCString(effects());
        if (string.length())
            out.print(comma, string);
    }

    out.print(")");
}
コード例 #7
0
ファイル: FTLJITCode.cpp プロジェクト: sailei1/webkit
JITCode::~JITCode()
{
    if (FTL::shouldDumpDisassembly()) {
        dataLog("Destroying FTL JIT code at ");
        CommaPrinter comma;
#if FTL_USES_B3
        dataLog(comma, m_b3Code);
        dataLog(comma, m_arityCheckEntrypoint);
#else
        for (auto& handle : m_handles)
            dataLog(comma, pointerDump(handle.get()));
        dataLog(comma, pointerDump(m_arityCheckEntrypoint.executableMemory()));
        dataLog(comma, pointerDump(m_exitThunks.executableMemory()));
        dataLog("\n");
#endif
    }
}
コード例 #8
0
void Event::dump(PrintStream& out) const
{
    out.print(m_time, ": ", pointerDump(m_bytecodes));
    if (m_compilation)
        out.print(" ", *m_compilation);
    out.print(": ", m_summary);
    if (m_detail.length())
        out.print(" (", m_detail, ")");
}
コード例 #9
0
ファイル: VM.cpp プロジェクト: llelectronics/lls-qtwebkit
void logSanitizeStack(VM* vm)
{
    if (Options::verboseSanitizeStack() && vm->topCallFrame) {
        int dummy;
        dataLog(
            "Sanitizing stack with top call frame at ", RawPointer(vm->topCallFrame),
            ", current stack pointer at ", RawPointer(&dummy), ", in ",
            pointerDump(vm->topCallFrame->codeBlock()), " and last code origin = ",
            vm->topCallFrame->codeOrigin(), "\n");
    }
}
コード例 #10
0
ファイル: DFGLazyNode.cpp プロジェクト: biddyweb/switch-oss
void LazyNode::dump(PrintStream& out) const
{
    if (!*this)
        out.print("LazyNode:0");
    else {
        if (isNode())
            out.print("LazyNode:@", asNode()->index());
        else
            out.print("LazyNode:FrozenValue:", Graph::opName(op()), ", ", pointerDump(asValue()));
        out.print(")");
    }
}
コード例 #11
0
void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
    JumpList slowCases;

    slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
    slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
    slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
    
    loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
    emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
    
    Call call = nearCall();
    Jump done = jump();
    
    slowCases.link(this);
    move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
    restoreReturnAddressBeforeReturn(regT2);
    Jump slow = jump();
    
    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
    
    patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
    patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
    patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
    
    RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
        FINALIZE_CODE(
            patchBuffer,
            ("Baseline closure call stub for %s, return point %p, target %p (%s)",
                toCString(*m_codeBlock).data(),
                callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
                codePtr.executableAddress(),
                toCString(pointerDump(calleeCodeBlock)).data())),
        *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
        callLinkInfo->codeOrigin));
    
    RepatchBuffer repatchBuffer(m_codeBlock);
    
    repatchBuffer.replaceWithJump(
        RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
        CodeLocationLabel(stubRoutine->code().code()));
    repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
    
    callLinkInfo->stub = stubRoutine.release();
}
コード例 #12
0
ファイル: CallLinkInfo.cpp プロジェクト: morpheus502/webkit
void CallLinkInfo::unlink(RepatchBuffer& repatchBuffer)
{
    ASSERT(isLinked());
    
    if (Options::showDisassembly())
        dataLog("Unlinking call from ", callReturnLocation, " to ", pointerDump(repatchBuffer.codeBlock()), "\n");

    repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast<MacroAssembler::RegisterID>(calleeGPR), 0);
    repatchBuffer.relink(
        callReturnLocation,
        repatchBuffer.codeBlock()->vm()->getCTIStub(linkThunkGeneratorFor(
            (callType == Construct || callType == ConstructVarargs)? CodeForConstruct : CodeForCall,
            isFTL ? MustPreserveRegisters : RegisterPreservationNotRequired)).code());
    hasSeenShouldRepatch = false;
    callee.clear();
    stub.clear();

    // It will be on a list if the callee has a code block.
    if (isOnList())
        remove();
}
コード例 #13
0
ファイル: AirCode.cpp プロジェクト: 121Room/webkit
void Code::dump(PrintStream& out) const
{
    for (BasicBlock* block : *this)
        out.print(deepDump(block));
    if (stackSlots().size()) {
        out.print("Stack slots:\n");
        for (StackSlot* slot : stackSlots())
            out.print("    ", pointerDump(slot), ": ", deepDump(slot), "\n");
    }
    if (specials().size()) {
        out.print("Specials:\n");
        for (Special* special : specials())
            out.print("    ", deepDump(special), "\n");
    }
    if (m_frameSize)
        out.print("Frame size: ", m_frameSize, "\n");
    if (m_callArgAreaSize)
        out.print("Call arg area size: ", m_callArgAreaSize, "\n");
    if (m_calleeSaveRegisters.size())
        out.print("Callee saves: ", m_calleeSaveRegisters, "\n");
}
コード例 #14
0
void ExitValue::dumpInContext(PrintStream& out, DumpContext* context) const
{
    switch (kind()) {
    case InvalidExitValue:
        out.print("Invalid");
        return;
    case ExitValueDead:
        out.print("Dead");
        return;
    case ExitValueArgument:
        out.print("Argument(", exitArgument(), ")");
        return;
    case ExitValueConstant:
        out.print("Constant(", inContext(constant(), context), ")");
        return;
    case ExitValueInJSStack:
        out.print("InJSStack:r", virtualRegister());
        return;
    case ExitValueInJSStackAsInt32:
        out.print("InJSStackAsInt32:r", virtualRegister());
        return;
    case ExitValueInJSStackAsInt52:
        out.print("InJSStackAsInt52:r", virtualRegister());
        return;
    case ExitValueInJSStackAsDouble:
        out.print("InJSStackAsDouble:r", virtualRegister());
        return;
    case ExitValueArgumentsObjectThatWasNotCreated:
        out.print("ArgumentsObjectThatWasNotCreated");
        return;
    case ExitValueRecovery:
        out.print("Recovery(", recoveryOpcode(), ", arg", leftRecoveryArgument(), ", arg", rightRecoveryArgument(), ", ", recoveryFormat(), ")");
        return;
    case ExitValueMaterializeNewObject:
        out.print("Materialize(", pointerDump(objectMaterialization()), ")");
        return;
    }
    
    RELEASE_ASSERT_NOT_REACHED();
}
コード例 #15
0
bool simplifyCFG(Code& code)
{
    const bool verbose = false;

    PhaseScope phaseScope(code, "simplifyCFG");

    // We have three easy simplification rules:
    //
    // 1) If a successor is a block that just jumps to another block, then jump directly to
    //    that block.
    //
    // 2) If all successors are the same and the operation has no effects, then use a jump
    //    instead.
    //
    // 3) If you jump to a block that is not you and has one predecessor, then merge.
    //
    // Note that because of the first rule, this phase may introduce critical edges. That's fine.
    // If you need broken critical edges, then you have to break them yourself.

    bool result = false;
    for (;;) {
        if (verbose) {
            dataLog("Air before an iteration of simplifyCFG:\n");
            dataLog(code);
        }

        bool changed = false;
        for (BasicBlock* block : code) {
            // We rely on predecessors being conservatively correct. Verify this here.
            if (shouldValidateIRAtEachPhase()) {
                for (BasicBlock* block : code) {
                    for (BasicBlock* successor : block->successorBlocks())
                        RELEASE_ASSERT(successor->containsPredecessor(block));
                }
            }

            // We don't care about blocks that don't have successors.
            if (!block->numSuccessors())
                continue;

            // First check if any of the successors of this block can be forwarded over.
            for (BasicBlock*& successor : block->successorBlocks()) {
                if (successor != block
                    && successor->size() == 1
                    && successor->last().opcode == Jump) {
                    BasicBlock* newSuccessor = successor->successorBlock(0);
                    if (newSuccessor != successor) {
                        if (verbose) {
                            dataLog(
                                "Replacing ", pointerDump(block), "->", pointerDump(successor),
                                " with ", pointerDump(block), "->", pointerDump(newSuccessor), "\n");
                        }
                        // Note that we do not do replacePredecessor() because the block we're
                        // skipping will still have newSuccessor as its successor.
                        newSuccessor->addPredecessor(block);
                        successor = newSuccessor;
                        changed = true;
                    }
                }
            }

            // Now check if the block's terminal can be replaced with a jump.
            if (block->numSuccessors() > 1) {
                // The terminal must not have weird effects.
                if (!block->last().hasArgEffects()
                    && !block->last().hasNonArgNonControlEffects()) {
                    // All of the successors must be the same.
                    bool allSame = true;
                    BasicBlock* firstSuccessor = block->successorBlock(0);
                    for (unsigned i = 1; i < block->numSuccessors(); ++i) {
                        if (block->successorBlock(i) != firstSuccessor) {
                            allSame = false;
                            break;
                        }
                    }
                    if (allSame) {
                        if (verbose)
                            dataLog("Changing ", pointerDump(block), "'s terminal to a Jump.\n");
                        block->last() = Inst(Jump, block->last().origin);
                        block->successors().resize(1);
                        block->successors()[0].frequency() = FrequencyClass::Normal;
                        changed = true;
                    }
                }
            }

            // Finally handle jumps to a block with one predecessor.
            if (block->numSuccessors() == 1) {
                BasicBlock* successor = block->successorBlock(0);
                if (successor != block && successor->numPredecessors() == 1) {
                    RELEASE_ASSERT(successor->predecessor(0) == block);

                    // We can merge the two blocks because the predecessor only jumps to the successor
                    // and the successor is only reachable from the predecessor.

                    // Remove the terminal.
                    Value* origin = block->insts().takeLast().origin;

                    // Append the full contents of the successor to the predecessor.
                    block->insts().reserveCapacity(block->size() + successor->size());
                    for (Inst& inst : *successor)
                        block->appendInst(WTFMove(inst));

                    // Make sure that our successors are the successor's successors.
                    block->successors() = WTFMove(successor->successors());

                    // Make sure that the successor has nothing left in it except an oops.
                    successor->resize(1);
                    successor->last() = Inst(Oops, origin);
                    successor->successors().clear();

                    // Ensure that the predecessors of block's new successors know what's up.
                    for (BasicBlock* newSuccessor : block->successorBlocks())
                        newSuccessor->replacePredecessor(successor, block);

                    if (verbose)
                        dataLog("Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
                    changed = true;
                }
            }
        }

        if (!changed)
            break;
        result = true;
        code.resetReachability();
    }

    return result;
}
コード例 #16
0
ファイル: AirAllocateStack.cpp プロジェクト: TigerWFH/webkit
void allocateStack(Code& code)
{
    PhaseScope phaseScope(code, "allocateStack");

    // Perform an escape analysis over stack slots. An escaping stack slot is one that is locked or
    // is explicitly escaped in the code.
    IndexSet<StackSlot> escapingStackSlots;
    for (StackSlot* slot : code.stackSlots()) {
        if (slot->isLocked())
            escapingStackSlots.add(slot);
    }
    for (BasicBlock* block : code) {
        for (Inst& inst : *block) {
            inst.forEachArg(
                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
                    if (role == Arg::UseAddr && arg.isStack())
                        escapingStackSlots.add(arg.stackSlot());
                });
        }
    }

    // Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
    // the possibility of stack slots being assigned frame offsets before we even get here.
    ASSERT(!code.frameSize());
    Vector<StackSlot*> assignedEscapedStackSlots;
    Vector<StackSlot*> escapedStackSlotsWorklist;
    for (StackSlot* slot : code.stackSlots()) {
        if (escapingStackSlots.contains(slot)) {
            if (slot->offsetFromFP())
                assignedEscapedStackSlots.append(slot);
            else
                escapedStackSlotsWorklist.append(slot);
        } else {
            // It would be super strange to have an unlocked stack slot that has an offset already.
            ASSERT(!slot->offsetFromFP());
        }
    }
    // This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
    // escaped stack slots.
    while (!escapedStackSlotsWorklist.isEmpty()) {
        StackSlot* slot = escapedStackSlotsWorklist.takeLast();
        assign(slot, assignedEscapedStackSlots);
        assignedEscapedStackSlots.append(slot);
    }

    // Now we handle the anonymous slots.
    StackSlotLiveness liveness(code);
    IndexMap<StackSlot, HashSet<StackSlot*>> interference(code.stackSlots().size());
    Vector<StackSlot*> slots;

    for (BasicBlock* block : code) {
        StackSlotLiveness::LocalCalc localCalc(liveness, block);

        auto interfere = [&] (unsigned instIndex) {
            if (verbose)
                dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");

            Inst::forEachDef<Arg>(
                block->get(instIndex), block->get(instIndex + 1),
                [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
                    if (!arg.isStack())
                        return;
                    StackSlot* slot = arg.stackSlot();
                    if (slot->kind() != StackSlotKind::Anonymous)
                        return;

                    for (StackSlot* otherSlot : localCalc.live()) {
                        interference[slot].add(otherSlot);
                        interference[otherSlot].add(slot);
                    }
                });
        };

        for (unsigned instIndex = block->size(); instIndex--;) {
            if (verbose)
                dataLog("Analyzing: ", block->at(instIndex), "\n");

            // Kill dead stores. For simplicity we say that a store is killable if it has only late
            // defs and those late defs are to things that are dead right now. We only do that
            // because that's the only kind of dead stack store we will see here.
            Inst& inst = block->at(instIndex);
            if (!inst.hasNonArgEffects()) {
                bool ok = true;
                inst.forEachArg(
                    [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
                        if (Arg::isEarlyDef(role)) {
                            ok = false;
                            return;
                        }
                        if (!Arg::isLateDef(role))
                            return;
                        if (!arg.isStack()) {
                            ok = false;
                            return;
                        }
                        StackSlot* slot = arg.stackSlot();
                        if (slot->kind() != StackSlotKind::Anonymous) {
                            ok = false;
                            return;
                        }

                        if (localCalc.isLive(slot)) {
                            ok = false;
                            return;
                        }
                    });
                if (ok)
                    inst = Inst();
            }
            
            interfere(instIndex);
            localCalc.execute(instIndex);
        }
        interfere(-1);
        
        block->insts().removeAllMatching(
            [&] (const Inst& inst) -> bool {
                return !inst;
            });
    }

    if (verbose) {
        for (StackSlot* slot : code.stackSlots())
            dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
    }

    // Now we assign stack locations. At its heart this algorithm is just first-fit. For each
    // StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
    // overlap with other StackSlots that this overlaps with.
    Vector<StackSlot*> otherSlots = assignedEscapedStackSlots;
    for (StackSlot* slot : code.stackSlots()) {
        if (slot->offsetFromFP()) {
            // Already assigned an offset.
            continue;
        }

        HashSet<StackSlot*>& interferingSlots = interference[slot];
        otherSlots.resize(assignedEscapedStackSlots.size());
        otherSlots.resize(assignedEscapedStackSlots.size() + interferingSlots.size());
        unsigned nextIndex = assignedEscapedStackSlots.size();
        for (StackSlot* otherSlot : interferingSlots)
            otherSlots[nextIndex++] = otherSlot;

        assign(slot, otherSlots);
    }

    // Figure out how much stack we're using for stack slots.
    unsigned frameSizeForStackSlots = 0;
    for (StackSlot* slot : code.stackSlots()) {
        frameSizeForStackSlots = std::max(
            frameSizeForStackSlots,
            static_cast<unsigned>(-slot->offsetFromFP()));
    }

    frameSizeForStackSlots = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSizeForStackSlots);

    // Now we need to deduce how much argument area we need.
    for (BasicBlock* block : code) {
        for (Inst& inst : *block) {
            for (Arg& arg : inst.args) {
                if (arg.isCallArg()) {
                    // For now, we assume that we use 8 bytes of the call arg. But that's not
                    // such an awesome assumption.
                    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
                    ASSERT(arg.offset() >= 0);
                    code.requestCallArgAreaSize(arg.offset() + 8);
                }
            }
        }
    }

    code.setFrameSize(frameSizeForStackSlots + code.callArgAreaSize());

    // Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
    // transformation since we can search the StackSlots array to figure out which StackSlot any
    // offset-from-FP refers to.

    // FIXME: This may produce addresses that aren't valid if we end up with a ginormous stack frame.
    // We would have to scavenge for temporaries if this happened. Fortunately, this case will be
    // extremely rare so we can do crazy things when it arises.
    // https://bugs.webkit.org/show_bug.cgi?id=152530

    InsertionSet insertionSet(code);
    for (BasicBlock* block : code) {
        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
            Inst& inst = block->at(instIndex);
            inst.forEachArg(
                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
                    auto stackAddr = [&] (int32_t offset) -> Arg {
                        return Arg::stackAddr(offset, code.frameSize(), width);
                    };
                    
                    switch (arg.kind()) {
                    case Arg::Stack: {
                        StackSlot* slot = arg.stackSlot();
                        if (Arg::isZDef(role)
                            && slot->kind() == StackSlotKind::Anonymous
                            && slot->byteSize() > Arg::bytes(width)) {
                            // Currently we only handle this simple case because it's the only one
                            // that arises: ZDef's are only 32-bit right now. So, when we hit these
                            // assertions it means that we need to implement those other kinds of
                            // zero fills.
                            RELEASE_ASSERT(slot->byteSize() == 8);
                            RELEASE_ASSERT(width == Arg::Width32);

                            RELEASE_ASSERT(isValidForm(StoreZero32, Arg::Stack));
                            insertionSet.insert(
                                instIndex + 1, StoreZero32, inst.origin,
                                stackAddr(arg.offset() + 4 + slot->offsetFromFP()));
                        }
                        arg = stackAddr(arg.offset() + slot->offsetFromFP());
                        break;
                    }
                    case Arg::CallArg:
                        arg = stackAddr(arg.offset() - code.frameSize());
                        break;
                    default:
                        break;
                    }
                }
            );
        }
        insertionSet.execute(block);
    }
}
コード例 #17
0
void VariableValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
{
    out.print(comma, pointerDump(m_variable));
}
コード例 #18
0
ファイル: JITCode.cpp プロジェクト: wolfviking0/webcl-webkit
JITCodeWithCodeRef::~JITCodeWithCodeRef()
{
    if ((Options::dumpDisassembly() || (isOptimizingJIT(jitType()) && Options::dumpDFGDisassembly()))
        && m_ref.executableMemory())
        dataLog("Destroying JIT code at ", pointerDump(m_ref.executableMemory()), "\n");
}
コード例 #19
0
ファイル: B3Value.cpp プロジェクト: sailei1/webkit
void Value::dumpChildren(CommaPrinter& comma, PrintStream& out) const
{
    for (Value* child : children())
        out.print(comma, pointerDump(child));
}
コード例 #20
0
ファイル: FTLCompile.cpp プロジェクト: rodrigo-speller/webkit
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }

        if (graph.hasDebuggerEnabled())
            codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
    }
    
    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;)
                exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
            for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                materialization->accountForLocalsOffset(localsOffset);
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
                if (!exit.m_materializations.isEmpty()) {
                    dataLog("    Materializations: \n");
                    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                        dataLog("        Materialize(", pointerDump(materialization), ")\n");
                }
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[putById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[checkIn.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
                stubInfo->codeOrigin = codeOrigin;
                stubInfo->callSiteIndex = checkIn.callSiteIndex();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_uid);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
        if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, state.jitCode->stackmaps.stackSizeForLocals());

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, varargsSpillSlotsOffset);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        });
    }

    adjustCallICsForStackmaps(state.jsTailCalls, recordMap);

    for (unsigned i = state.jsTailCalls.size(); i--;) {
        JSTailCall& call = state.jsTailCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(*state.jitCode.get(), fastPathJIT);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = call.estimatedSize();

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
コード例 #21
0
void SlotBaseValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
{
    out.print(comma, pointerDump(m_slot));
}
コード例 #22
0
ファイル: B3FixSSA.cpp プロジェクト: jeff-jenness/webkit
bool fixSSA(Procedure& proc)
{
    PhaseScope phaseScope(proc, "fixSSA");
    
    // Collect the stack "variables". If there aren't any, then we don't have anything to do.
    // That's a fairly common case.
    HashMap<StackSlotValue*, Type> stackVariable;
    for (Value* value : proc.values()) {
        if (StackSlotValue* stack = value->as<StackSlotValue>()) {
            if (stack->kind() == StackSlotKind::Anonymous)
                stackVariable.add(stack, Void);
        }
    }

    if (stackVariable.isEmpty())
        return false;

    // Make sure that we know how to optimize all of these. We only know how to handle Load and
    // Store on anonymous variables.
    for (Value* value : proc.values()) {
        auto reject = [&] (Value* value) {
            if (StackSlotValue* stack = value->as<StackSlotValue>())
                stackVariable.remove(stack);
        };
        
        auto handleAccess = [&] (Value* access, Type type) {
            StackSlotValue* stack = access->lastChild()->as<StackSlotValue>();
            if (!stack)
                return;
            
            if (value->as<MemoryValue>()->offset()) {
                stackVariable.remove(stack);
                return;
            }

            auto result = stackVariable.find(stack);
            if (result == stackVariable.end())
                return;
            if (result->value == Void) {
                result->value = type;
                return;
            }
            if (result->value == type)
                return;
            stackVariable.remove(result);
        };
        
        switch (value->opcode()) {
        case Load:
            // We're OK with loads from stack variables at an offset of zero.
            handleAccess(value, value->type());
            break;
        case Store:
            // We're OK with stores to stack variables, but not storing stack variables.
            reject(value->child(0));
            handleAccess(value, value->child(0)->type());
            break;
        default:
            for (Value* child : value->children())
                reject(child);
            break;
        }
    }

    Vector<StackSlotValue*> deadValues;
    for (auto& entry : stackVariable) {
        if (entry.value == Void)
            deadValues.append(entry.key);
    }

    for (StackSlotValue* deadValue : deadValues) {
        deadValue->replaceWithNop();
        stackVariable.remove(deadValue);
    }

    if (stackVariable.isEmpty())
        return false;

    // We know that we have variables to optimize, so do that now.
    breakCriticalEdges(proc);

    SSACalculator ssa(proc);

    // Create a SSACalculator::Variable for every stack variable.
    Vector<StackSlotValue*> variableToStack;
    HashMap<StackSlotValue*, SSACalculator::Variable*> stackToVariable;

    for (auto& entry : stackVariable) {
        StackSlotValue* stack = entry.key;
        SSACalculator::Variable* variable = ssa.newVariable();
        RELEASE_ASSERT(variable->index() == variableToStack.size());
        variableToStack.append(stack);
        stackToVariable.add(stack, variable);
    }

    // Create Defs for all of the stores to the stack variable.
    for (BasicBlock* block : proc) {
        for (Value* value : *block) {
            if (value->opcode() != Store)
                continue;

            StackSlotValue* stack = value->child(1)->as<StackSlotValue>();
            if (!stack)
                continue;

            if (SSACalculator::Variable* variable = stackToVariable.get(stack))
                ssa.newDef(variable, block, value->child(0));
        }
    }

    // Decide where Phis are to be inserted. This creates them but does not insert them.
    ssa.computePhis(
        [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Value* {
            StackSlotValue* stack = variableToStack[variable->index()];
            Value* phi = proc.add<Value>(Phi, stackVariable.get(stack), stack->origin());
            if (verbose) {
                dataLog(
                    "Adding Phi for ", pointerDump(stack), " at ", *block, ": ",
                    deepDump(proc, phi), "\n");
            }
            return phi;
        });

    // Now perform the conversion.
    InsertionSet insertionSet(proc);
    HashMap<StackSlotValue*, Value*> mapping;
    for (BasicBlock* block : proc.blocksInPreOrder()) {
        mapping.clear();

        for (auto& entry : stackToVariable) {
            StackSlotValue* stack = entry.key;
            SSACalculator::Variable* variable = entry.value;

            SSACalculator::Def* def = ssa.reachingDefAtHead(block, variable);
            if (def)
                mapping.set(stack, def->value());
        }

        for (SSACalculator::Def* phiDef : ssa.phisForBlock(block)) {
            StackSlotValue* stack = variableToStack[phiDef->variable()->index()];

            insertionSet.insertValue(0, phiDef->value());
            mapping.set(stack, phiDef->value());
        }

        for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
            Value* value = block->at(valueIndex);
            value->performSubstitution();

            switch (value->opcode()) {
            case Load: {
                if (StackSlotValue* stack = value->child(0)->as<StackSlotValue>()) {
                    if (Value* replacement = mapping.get(stack))
                        value->replaceWithIdentity(replacement);
                }
                break;
            }
                
            case Store: {
                if (StackSlotValue* stack = value->child(1)->as<StackSlotValue>()) {
                    if (stackToVariable.contains(stack)) {
                        mapping.set(stack, value->child(0));
                        value->replaceWithNop();
                    }
                }
                break;
            }

            default:
                break;
            }
        }

        unsigned upsilonInsertionPoint = block->size() - 1;
        Origin upsilonOrigin = block->last()->origin();
        for (BasicBlock* successorBlock : block->successorBlocks()) {
            for (SSACalculator::Def* phiDef : ssa.phisForBlock(successorBlock)) {
                Value* phi = phiDef->value();
                SSACalculator::Variable* variable = phiDef->variable();
                StackSlotValue* stack = variableToStack[variable->index()];

                Value* mappedValue = mapping.get(stack);
                if (verbose) {
                    dataLog(
                        "Mapped value for ", *stack, " with successor Phi ", *phi, " at end of ",
                        *block, ": ", pointerDump(mappedValue), "\n");
                }
                
                if (!mappedValue)
                    mappedValue = insertionSet.insertBottom(upsilonInsertionPoint, phi);
                
                insertionSet.insert<UpsilonValue>(
                    upsilonInsertionPoint, upsilonOrigin, mappedValue, phi);
            }
        }

        insertionSet.execute(block);
    }

    // Finally, kill the stack slots.
    for (StackSlotValue* stack : variableToStack)
        stack->replaceWithNop();

    if (verbose) {
        dataLog("B3 after SSA conversion:\n");
        dataLog(proc);
    }

    return true;
}
コード例 #23
0
void allocateStack(Code& code)
{
    PhaseScope phaseScope(code, "allocateStack");

    // Perform an escape analysis over stack slots. An escaping stack slot is one that is locked or
    // is explicitly escaped in the code.
    IndexSet<StackSlot> escapingStackSlots;
    for (StackSlot* slot : code.stackSlots()) {
        if (slot->isLocked())
            escapingStackSlots.add(slot);
    }
    for (BasicBlock* block : code) {
        for (Inst& inst : *block) {
            inst.forEachArg(
                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
                    if (role == Arg::UseAddr && arg.isStack())
                        escapingStackSlots.add(arg.stackSlot());
                });
        }
    }

    // Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
    // the possibility of stack slots being assigned frame offsets before we even get here.
    ASSERT(!code.frameSize());
    Vector<StackSlot*> assignedEscapedStackSlots;
    Vector<StackSlot*> escapedStackSlotsWorklist;
    for (StackSlot* slot : code.stackSlots()) {
        if (escapingStackSlots.contains(slot)) {
            if (slot->offsetFromFP())
                assignedEscapedStackSlots.append(slot);
            else
                escapedStackSlotsWorklist.append(slot);
        } else {
            // It would be super strange to have an unlocked stack slot that has an offset already.
            ASSERT(!slot->offsetFromFP());
        }
    }
    // This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
    // escaped stack slots.
    while (!escapedStackSlotsWorklist.isEmpty()) {
        StackSlot* slot = escapedStackSlotsWorklist.takeLast();
        assign(slot, assignedEscapedStackSlots);
        assignedEscapedStackSlots.append(slot);
    }

    // Now we handle the anonymous slots.
    StackSlotLiveness liveness(code);
    IndexMap<StackSlot, HashSet<StackSlot*>> interference(code.stackSlots().size());
    Vector<StackSlot*> slots;

    for (BasicBlock* block : code) {
        StackSlotLiveness::LocalCalc localCalc(liveness, block);

        auto interfere = [&] (Inst& inst) {
            if (verbose)
                dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");

            inst.forEachArg(
                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
                    if (!Arg::isDef(role))
                        return;
                    if (!arg.isStack())
                        return;
                    StackSlot* slot = arg.stackSlot();
                    if (slot->kind() != StackSlotKind::Anonymous)
                        return;

                    for (StackSlot* otherSlot : localCalc.live()) {
                        interference[slot].add(otherSlot);
                        interference[otherSlot].add(slot);
                    }
                });
        };

        for (unsigned instIndex = block->size(); instIndex--;) {
            if (verbose)
                dataLog("Analyzing: ", block->at(instIndex), "\n");
            Inst& inst = block->at(instIndex);
            interfere(inst);
            localCalc.execute(instIndex);
        }
        Inst nop;
        interfere(nop);
    }

    if (verbose) {
        for (StackSlot* slot : code.stackSlots())
            dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
    }

    // Now we assign stack locations. At its heart this algorithm is just first-fit. For each
    // StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
    // overlap with other StackSlots that this overlaps with.
    Vector<StackSlot*> otherSlots = assignedEscapedStackSlots;
    for (StackSlot* slot : code.stackSlots()) {
        if (slot->offsetFromFP()) {
            // Already assigned an offset.
            continue;
        }

        HashSet<StackSlot*>& interferingSlots = interference[slot];
        otherSlots.resize(assignedEscapedStackSlots.size());
        otherSlots.resize(assignedEscapedStackSlots.size() + interferingSlots.size());
        unsigned nextIndex = assignedEscapedStackSlots.size();
        for (StackSlot* otherSlot : interferingSlots)
            otherSlots[nextIndex++] = otherSlot;

        assign(slot, otherSlots);
    }

    // Figure out how much stack we're using for stack slots.
    unsigned frameSizeForStackSlots = 0;
    for (StackSlot* slot : code.stackSlots()) {
        frameSizeForStackSlots = std::max(
            frameSizeForStackSlots,
            static_cast<unsigned>(-slot->offsetFromFP()));
    }

    frameSizeForStackSlots = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSizeForStackSlots);

    // Now we need to deduce how much argument area we need.
    for (BasicBlock* block : code) {
        for (Inst& inst : *block) {
            for (Arg& arg : inst.args) {
                if (arg.isCallArg()) {
                    // For now, we assume that we use 8 bytes of the call arg. But that's not
                    // such an awesome assumption.
                    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
                    ASSERT(arg.offset() >= 0);
                    code.requestCallArgAreaSize(arg.offset() + 8);
                }
            }
        }
    }

    code.setFrameSize(frameSizeForStackSlots + code.callArgAreaSize());

    // Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
    // transformation since we can search the StackSlots array to figure out which StackSlot any
    // offset-from-FP refers to.

    for (BasicBlock* block : code) {
        for (Inst& inst : *block) {
            for (Arg& arg : inst.args) {
                switch (arg.kind()) {
                case Arg::Stack:
                    arg = Arg::addr(
                        Tmp(GPRInfo::callFrameRegister),
                        arg.offset() + arg.stackSlot()->offsetFromFP());
                    break;
                case Arg::CallArg:
                    arg = Arg::addr(
                        Tmp(GPRInfo::callFrameRegister),
                        arg.offset() - code.frameSize());
                    break;
                default:
                    break;
                }
            }
        }
    }
}
コード例 #24
0
bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(
    Structure* structure, JSObject* base) const
{
    if (verbose) {
        dataLog(
            "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ",
            JSValue(base), " assuming impure property watchpoints are set.\n");
    }
    
    if (!*this) {
        if (verbose)
            dataLog("Invalid because unset.\n");
        return false;
    }
    
    if (!structure->propertyAccessesAreCacheable()) {
        if (verbose)
            dataLog("Invalid because accesses are not cacheable.\n");
        return false;
    }
    
    switch (m_kind) {
    case Presence: {
        unsigned currentAttributes;
        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
        if (currentOffset != offset() || currentAttributes != attributes()) {
            if (verbose) {
                dataLog(
                    "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(),
                    " but they are ", currentOffset, ", ", currentAttributes, "\n");
            }
            return false;
        }
        return true;
    }
        
    case Absence: {
        if (structure->isDictionary()) {
            if (verbose)
                dataLog("Invalid because it's a dictionary.\n");
            return false;
        }

        PropertyOffset currentOffset = structure->getConcurrently(uid());
        if (currentOffset != invalidOffset) {
            if (verbose)
                dataLog("Invalid because the property exists at offset: ", currentOffset, "\n");
            return false;
        }
        
        if (structure->storedPrototypeObject() != prototype()) {
            if (verbose) {
                dataLog(
                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
                    "it should have been ", JSValue(prototype()), "\n");
            }
            return false;
        }
        
        return true;
    }
    
    case AbsenceOfSetter: {
        if (structure->isDictionary()) {
            if (verbose)
                dataLog("Invalid because it's a dictionary.\n");
            return false;
        }
        
        unsigned currentAttributes;
        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
        if (currentOffset != invalidOffset) {
            if (currentAttributes & (Accessor | CustomAccessor)) {
                if (verbose) {
                    dataLog(
                        "Invalid because we expected not to have a setter, but we have one at offset ",
                        currentOffset, " with attributes ", currentAttributes, "\n");
                }
                return false;
            }
        }
        
        if (structure->storedPrototypeObject() != prototype()) {
            if (verbose) {
                dataLog(
                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
                    "it should have been ", JSValue(prototype()), "\n");
            }
            return false;
        }
        
        return true;
    }
        
    case Equivalence: {
        if (!base || base->structure() != structure) {
            // Conservatively return false, since we cannot verify this one without having the
            // object.
            if (verbose) {
                dataLog(
                    "Invalid because we don't have a base or the base has the wrong structure: ",
                    RawPointer(base), "\n");
            }
            return false;
        }
        
        // FIXME: This is somewhat racy, and maybe more risky than we want.
        // https://bugs.webkit.org/show_bug.cgi?id=134641
        
        PropertyOffset currentOffset = structure->getConcurrently(uid());
        if (currentOffset == invalidOffset) {
            if (verbose) {
                dataLog(
                    "Invalid because the base no long appears to have ", uid(), " on its structure: ",
                        RawPointer(base), "\n");
            }
            return false;
        }

        JSValue currentValue = base->getDirect(currentOffset);
        if (currentValue != requiredValue()) {
            if (verbose) {
                dataLog(
                    "Invalid because the value is ", currentValue, " but we require ", requiredValue(),
                    "\n");
            }
            return false;
        }
        
        return true;
    } }
    
    RELEASE_ASSERT_NOT_REACHED();
    return false;
}