void emitPutByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const PutByIdVariant& variant, unsigned identifierNumber) { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); addBaseCheck(indexInBlock, node, baseValue, variant.oldStructure()); childEdge.setUseKind(KnownCellUse); Transition* transition = 0; if (variant.kind() == PutByIdVariant::Transition) { transition = m_graph.m_transitions.add( variant.oldStructureForTransition(), variant.newStructure()); } Edge propertyStorage; if (isInlineOffset(variant.offset())) propertyStorage = childEdge; else if (!variant.reallocatesStorage()) { propertyStorage = Edge(m_insertionSet.insertNode( indexInBlock, SpecNone, GetButterfly, origin, childEdge)); } else if (!variant.oldStructureForTransition()->outOfLineCapacity()) { ASSERT(variant.newStructure()->outOfLineCapacity()); ASSERT(!isInlineOffset(variant.offset())); Node* allocatePropertyStorage = m_insertionSet.insertNode( indexInBlock, SpecNone, AllocatePropertyStorage, origin, OpInfo(transition), childEdge); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); propertyStorage = Edge(allocatePropertyStorage); } else { ASSERT(variant.oldStructureForTransition()->outOfLineCapacity()); ASSERT(variant.newStructure()->outOfLineCapacity() > variant.oldStructureForTransition()->outOfLineCapacity()); ASSERT(!isInlineOffset(variant.offset())); Node* reallocatePropertyStorage = m_insertionSet.insertNode( indexInBlock, SpecNone, ReallocatePropertyStorage, origin, OpInfo(transition), childEdge, Edge(m_insertionSet.insertNode( indexInBlock, SpecNone, GetButterfly, origin, childEdge))); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); propertyStorage = Edge(reallocatePropertyStorage); } if (variant.kind() == PutByIdVariant::Transition) { Node* putStructure = m_graph.addNode(SpecNone, PutStructure, origin, OpInfo(transition), childEdge); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); m_insertionSet.insert(indexInBlock, putStructure); } node->convertToPutByOffset(m_graph.m_storageAccessData.size(), propertyStorage); m_insertionSet.insertNode( indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child2().node(), KnownCellUse)); StorageAccessData storageAccessData; storageAccessData.offset = variant.offset(); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); }
Node* emitCodeToGetArgumentsArrayLength( InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin) { Graph& graph = insertionSet.graph(); DFG_ASSERT( graph, arguments, arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments); InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; if (inlineCallFrame && !inlineCallFrame->isVarargs()) { return insertionSet.insertConstant( nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1)); } Node* argumentCount; if (!inlineCallFrame) argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin); else { VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount); argumentCount = insertionSet.insertNode( nodeIndex, SpecInt32, GetStack, origin, OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32))); } return insertionSet.insertNode( nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked), Edge(argumentCount, Int32Use), insertionSet.insertConstantForUse( nodeIndex, origin, jsNumber(1), Int32Use)); }
Node* emitCodeToGetArgumentsArrayLength( InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin) { Graph& graph = insertionSet.graph(); DFG_ASSERT( graph, arguments, arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments || arguments->op() == CreateClonedArguments || arguments->op() == CreateRest || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments || arguments->op() == PhantomCreateRest); InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; unsigned numberOfArgumentsToSkip = 0; if (arguments->op() == CreateRest || arguments->op() == PhantomCreateRest) numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip(); if (inlineCallFrame && !inlineCallFrame->isVarargs()) { unsigned argumentsSize = inlineCallFrame->arguments.size() - 1; if (argumentsSize >= numberOfArgumentsToSkip) argumentsSize -= numberOfArgumentsToSkip; else argumentsSize = 0; return insertionSet.insertConstant( nodeIndex, origin, jsNumber(argumentsSize)); } Node* argumentCount; if (!inlineCallFrame) argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32Only, GetArgumentCountIncludingThis, origin); else { VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount); argumentCount = insertionSet.insertNode( nodeIndex, SpecInt32Only, GetStack, origin, OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32))); } Node* result = insertionSet.insertNode( nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked), Edge(argumentCount, Int32Use), insertionSet.insertConstantForUse( nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use)); if (numberOfArgumentsToSkip) { // The above subtraction may produce a negative number if this number is non-zero. We correct that here. result = insertionSet.insertNode( nodeIndex, SpecInt32Only, ArithMax, origin, Edge(result, Int32Use), insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use)); result->setResult(NodeResultInt32); } return result; }
void addStructureTransitionCheck(NodeOrigin origin, unsigned indexInBlock, JSCell* cell, Structure* structure) { if (m_graph.watchpoints().consider(cell->structure())) return; Node* weakConstant = m_insertionSet.insertNode( indexInBlock, speculationFromValue(cell), JSConstant, origin, OpInfo(m_graph.freeze(cell))); m_insertionSet.insertNode( indexInBlock, SpecNone, CheckStructure, origin, OpInfo(m_graph.addStructureSet(structure)), Edge(weakConstant, CellUse)); }
void handleNode() { switch (m_node->op()) { case GetByVal: case HasIndexedProperty: lowerBoundsCheck(m_node->child1(), m_node->child2(), m_node->child3()); break; case PutByVal: case PutByValDirect: { Edge base = m_graph.varArgChild(m_node, 0); Edge index = m_graph.varArgChild(m_node, 1); Edge storage = m_graph.varArgChild(m_node, 3); if (lowerBoundsCheck(base, index, storage)) break; if (m_node->arrayMode().typedArrayType() != NotTypedArray && m_node->arrayMode().isOutOfBounds()) { Node* length = m_insertionSet.insertNode( m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin, OpInfo(m_node->arrayMode().asWord()), base, storage); m_graph.varArgChild(m_node, 4) = Edge(length, KnownInt32Use); break; } break; } default: break; } }
void addStructureTransitionCheck(NodeOrigin origin, unsigned indexInBlock, JSCell* cell) { Node* weakConstant = m_insertionSet.insertNode( indexInBlock, speculationFromValue(cell), WeakJSConstant, origin, OpInfo(cell)); if (m_graph.watchpoints().isStillValid(cell->structure()->transitionWatchpointSet())) { m_insertionSet.insertNode( indexInBlock, SpecNone, StructureTransitionWatchpoint, origin, OpInfo(cell->structure()), Edge(weakConstant, CellUse)); return; } m_insertionSet.insertNode( indexInBlock, SpecNone, CheckStructure, origin, OpInfo(m_graph.addStructureSet(cell->structure())), Edge(weakConstant, CellUse)); }
void treatRootBlock(BasicBlock* block, InsertionSet& insertionSet) { Operands<VariableAccessData*> initialAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); Operands<Node*> initialAccessNodes(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); for (unsigned i = 0; i < block->size(); i++) { Node* node = block->at(i); if (!node->hasVariableAccessData(m_graph)) continue; VirtualRegister operand = node->local(); if (initialAccessData.operand(operand)) continue; DFG_ASSERT(m_graph, node, node->op() != SetLocal); // We should have inserted a Flush before this! initialAccessData.operand(operand) = node->variableAccessData(); initialAccessNodes.operand(operand) = node; } // We want every Flush to be able to reach backwards to // a SetLocal. Doing this in the root block achieves this goal. NodeOrigin origin = block->at(0)->origin; Node* undefined = insertionSet.insertConstant(0, origin, jsUndefined()); for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) { VirtualRegister operand = virtualRegisterForLocal(i); VariableAccessData* accessData; DFG_ASSERT(m_graph, nullptr, initialAccessNodes.operand(operand)->op() == Flush); // We should have inserted a Flush before any SetLocal/SetArgument for the local that we are analyzing now. accessData = initialAccessData.operand(operand); DFG_ASSERT(m_graph, nullptr, accessData); insertionSet.insertNode(0, SpecNone, SetLocal, origin, OpInfo(accessData), Edge(undefined)); } }
BasicBlock* createPreHeader(Graph& graph, BlockInsertionSet& insertionSet, BasicBlock* block) { // Don't bother to preserve execution frequencies for now. BasicBlock* preHeader = insertionSet.insertBefore(block, PNaN); preHeader->appendNode( graph, SpecNone, Jump, block->firstOrigin(), OpInfo(block)); for (unsigned predecessorIndex = 0; predecessorIndex < block->predecessors.size(); predecessorIndex++) { BasicBlock* predecessor = block->predecessors[predecessorIndex]; if (graph.m_dominators.dominates(block, predecessor)) continue; block->predecessors[predecessorIndex--] = block->predecessors.last(); block->predecessors.removeLast(); for (unsigned successorIndex = predecessor->numSuccessors(); successorIndex--;) { BasicBlock*& successor = predecessor->successor(successorIndex); if (successor != block) continue; successor = preHeader; preHeader->predecessors.append(predecessor); } } block->predecessors.append(preHeader); return preHeader; }
void prepareToFoldTypedArray(JSArrayBufferView* view) { m_insertionSet.insertNode( m_nodeIndex, SpecNone, TypedArrayWatchpoint, m_node->origin, OpInfo(view)); m_insertionSet.insertNode( m_nodeIndex, SpecNone, Phantom, m_node->origin, m_node->children); }
void treatRegularBlock(BasicBlock* block, InsertionSet& insertionSet) { Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); // Insert a Flush before every SetLocal to properly pattern the graph such that // any range between SetLocal and Flush has access to the local on the stack. { for (unsigned i = 0; i < block->size(); i++) { Node* node = block->at(i); bool isPrimordialSetArgument = node->op() == SetArgument && node->local().isArgument() && node == m_graph.m_arguments[node->local().toArgument()]; if (node->op() == SetLocal || (node->op() == SetArgument && !isPrimordialSetArgument)) { VirtualRegister operand = node->local(); VariableAccessData* flushAccessData = currentBlockAccessData.operand(operand); if (!flushAccessData) flushAccessData = newVariableAccessData(operand); insertionSet.insertNode(i, SpecNone, Flush, node->origin, OpInfo(flushAccessData)); } if (node->hasVariableAccessData(m_graph)) currentBlockAccessData.operand(node->local()) = node->variableAccessData(); } } // Flush everything at the end of the block. { NodeOrigin origin = block->at(block->size() - 1)->origin; auto insertFlushAtEnd = [&] (VirtualRegister operand) { VariableAccessData* accessData = currentBlockAccessData.operand(operand); if (!accessData) accessData = newVariableAccessData(operand); currentBlockAccessData.operand(operand) = accessData; insertionSet.insertNode(block->size(), SpecNone, Flush, origin, OpInfo(accessData)); }; for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) insertFlushAtEnd(virtualRegisterForLocal(i)); for (unsigned i = 0; i < block->variablesAtTail.numberOfArguments(); i++) insertFlushAtEnd(virtualRegisterForArgument(i)); } }
void breakCriticalEdge(BasicBlock* predecessor, BasicBlock** successor) { BasicBlock* pad = m_insertionSet.insertBefore(*successor); pad->appendNode( m_graph, SpecNone, Jump, (*successor)->at(0)->origin, OpInfo(*successor)); pad->predecessors.append(predecessor); (*successor)->replacePredecessor(predecessor, pad); *successor = pad; }
void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin codeOrigin, int operand) { Node* livenessNode = jettisonedBlock->variablesAtHead.operand(operand); if (!livenessNode) return; if (livenessNode->variableAccessData()->isCaptured()) return; block->appendNode( m_graph, SpecNone, PhantomLocal, codeOrigin, OpInfo(livenessNode->variableAccessData())); }
Node* insertAdd( unsigned nodeIndex, NodeOrigin origin, Edge source, int32_t addend, Arith::Mode arithMode = Arith::CheckOverflow) { if (!addend) return source.node(); return m_insertionSet.insertNode( nodeIndex, source->prediction(), source->result(), ArithAdd, origin, OpInfo(arithMode), source, m_insertionSet.insertConstantForUse( nodeIndex, origin, jsNumber(addend), source.useKind())); }
void emitGetByOffset(unsigned indexInBlock, Node* node, Structure* structure, const GetByIdVariant& variant, unsigned identifierNumber) { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton(); bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell; ASSERT(!variant.chain()); ASSERT(variant.structureSet().contains(structure)); // Now before we do anything else, push the CFA forward over the GetById // and make sure we signal to the loop that it should continue and not // do any eliminations. m_interpreter.execute(indexInBlock); if (needsWatchpoint) { m_insertionSet.insertNode( indexInBlock, SpecNone, StructureTransitionWatchpoint, origin, OpInfo(structure), childEdge); } else if (needsCellCheck) { m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, origin, childEdge); } if (variant.specificValue()) { m_graph.convertToConstant(node, variant.specificValue()); return; } childEdge.setUseKind(KnownCellUse); Edge propertyStorage; if (isInlineOffset(variant.offset())) propertyStorage = childEdge; else { propertyStorage = Edge(m_insertionSet.insertNode( indexInBlock, SpecNone, GetButterfly, origin, childEdge)); } node->convertToGetByOffset(m_graph.m_storageAccessData.size(), propertyStorage); StorageAccessData storageAccessData; storageAccessData.offset = variant.offset(); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); }
void convertToJump(BasicBlock* block, BasicBlock* targetBlock) { ASSERT(targetBlock); ASSERT(targetBlock->isReachable); if (targetBlock->predecessors.size() == 1) { m_graph.dethread(); mergeBlocks(block, targetBlock, noBlocks()); } else { Node* branch = block->terminal(); ASSERT(branch->op() == Branch || branch->op() == Switch); block->replaceTerminal( m_graph, SpecNone, Jump, branch->origin, OpInfo(targetBlock)); } }
bool lowerBoundsCheck(Edge base, Edge index, Edge storage) { if (!m_node->arrayMode().permitsBoundsCheckLowering()) return false; if (!m_node->arrayMode().lengthNeedsStorage()) storage = Edge(); Node* length = m_insertionSet.insertNode( m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin, OpInfo(m_node->arrayMode().asWord()), base, storage); m_insertionSet.insertNode( m_nodeIndex, SpecInt32, CheckInBounds, m_node->origin, index, Edge(length, KnownInt32Use)); return true; }
void addBaseCheck( unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const StructureSet& set) { if (!baseValue.m_structure.isSubsetOf(set)) { // Arises when we prune MultiGetByOffset. We could have a // MultiGetByOffset with a single variant that checks for structure S, // and the input has structures S and T, for example. m_insertionSet.insertNode( indexInBlock, SpecNone, CheckStructure, node->origin, OpInfo(m_graph.addStructureSet(set)), node->child1()); return; } if (baseValue.m_type & ~SpecCell) { m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, node->origin, node->child1()); } }
void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, NodeOrigin nodeOrigin, VirtualRegister operand) { Node* livenessNode = jettisonedBlock->variablesAtHead.operand(operand); if (!livenessNode) return; NodeType nodeType; if (livenessNode->flags() & NodeIsFlushed) nodeType = Flush; else { // This seems like it shouldn't be necessary because we could just rematerialize // PhantomLocals or something similar using bytecode liveness. However, in ThreadedCPS, it's // worth the sanity to maintain this eagerly. See // https://bugs.webkit.org/show_bug.cgi?id=144086 nodeType = PhantomLocal; } block->appendNode( m_graph, SpecNone, nodeType, nodeOrigin, OpInfo(livenessNode->variableAccessData())); }
bool run() { for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; // An edge A->B is critical if A has multiple successor and B has multiple // predecessors. Thus we fail early if we don't have multiple successors. if (block->numSuccessors() <= 1) continue; // Break critical edges by inserting a "Jump" pad block in place of each // unique A->B critical edge. HashMap<BasicBlock*, BasicBlock*> successorPads; for (unsigned i = block->numSuccessors(); i--;) { BasicBlock** successor = &block->successor(i); if ((*successor)->predecessors.size() <= 1) continue; BasicBlock* pad = nullptr; auto iter = successorPads.find(*successor); if (iter == successorPads.end()) { pad = m_insertionSet.insertBefore(*successor, (*successor)->executionCount); pad->appendNode( m_graph, SpecNone, Jump, (*successor)->at(0)->origin, OpInfo(*successor)); pad->predecessors.append(block); (*successor)->replacePredecessor(block, pad); successorPads.set(*successor, pad); } else pad = iter->value; *successor = pad; } } return m_insertionSet.execute(); }
BasicBlock* createPreHeader(Graph& graph, BlockInsertionSet& insertionSet, BasicBlock* block) { BasicBlock* preHeader = insertionSet.insertBefore(block); preHeader->appendNode( graph, SpecNone, Jump, block->at(0)->codeOrigin, OpInfo(block)); for (unsigned predecessorIndex = 0; predecessorIndex < block->predecessors.size(); predecessorIndex++) { BasicBlock* predecessor = block->predecessors[predecessorIndex]; if (graph.m_dominators.dominates(block, predecessor)) continue; block->predecessors[predecessorIndex--] = block->predecessors.last(); block->predecessors.removeLast(); for (unsigned successorIndex = predecessor->numSuccessors(); successorIndex--;) { BasicBlock*& successor = predecessor->successor(successorIndex); if (successor != block) continue; successor = preHeader; preHeader->predecessors.append(predecessor); } } block->predecessors.append(preHeader); return preHeader; }
void insertInferredTypeCheck( InsertionSet& insertionSet, unsigned nodeIndex, NodeOrigin origin, Node* baseNode, const InferredType::Descriptor& type) { insertionSet.graph().registerInferredType(type); switch (type.kind()) { case InferredType::Bottom: insertionSet.insertNode(nodeIndex, SpecNone, ForceOSRExit, origin); return; case InferredType::Boolean: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, BooleanUse)); return; case InferredType::Other: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, OtherUse)); return; case InferredType::Int32: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, Int32Use)); return; case InferredType::Number: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, NumberUse)); return; case InferredType::String: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, StringUse)); return; case InferredType::Symbol: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, SymbolUse)); return; case InferredType::ObjectWithStructure: insertionSet.insertNode( nodeIndex, SpecNone, CheckStructure, origin, OpInfo(insertionSet.graph().addStructureSet(type.structure())), Edge(baseNode, CellUse)); return; case InferredType::ObjectWithStructureOrOther: insertionSet.insertNode( nodeIndex, SpecNone, CheckStructure, origin, OpInfo(insertionSet.graph().addStructureSet(type.structure())), Edge(baseNode, CellOrOtherUse)); return; case InferredType::Object: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, ObjectUse)); return; case InferredType::ObjectOrOther: insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, ObjectOrOtherUse)); return; case InferredType::Top: return; } DFG_CRASH(insertionSet.graph(), baseNode, "Bad inferred type"); }
bool run() { const bool extremeLogging = false; bool outerChanged = false; bool innerChanged; do { innerChanged = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; ASSERT(block->isReachable); switch (block->last()->op()) { case Jump: { // Successor with one predecessor -> merge. if (block->successor(0)->predecessors.size() == 1) { ASSERT(block->successor(0)->predecessors[0] == block); if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, block->successor(0), noBlocks()); innerChanged = outerChanged = true; break; } // FIXME: Block only has a jump -> remove. This is tricky though because of // liveness. What we really want is to slam in a phantom at the end of the // block, after the terminal. But we can't right now. :-( // Idea: what if I slam the ghosties into my successor? Nope, that's // suboptimal, because if my successor has multiple predecessors then we'll // be keeping alive things on other predecessor edges unnecessarily. // What we really need is the notion of end-of-block ghosties! break; } case Branch: { // Branch on constant -> jettison the not-taken block and merge. if (isKnownDirection(block->cfaBranchDirection)) { bool condition = branchCondition(block->cfaBranchDirection); BasicBlock* targetBlock = block->successorForCondition(condition); BasicBlock* jettisonedBlock = block->successorForCondition(!condition); if (targetBlock->predecessors.size() == 1) { if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, targetBlock, oneBlock(jettisonedBlock)); } else { if (extremeLogging) m_graph.dump(); m_graph.dethread(); ASSERT(block->last()->isTerminal()); CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin; block->last()->convertToPhantom(); ASSERT(block->last()->refCount() == 1); jettisonBlock(block, jettisonedBlock, boundaryCodeOrigin); block->appendNode( m_graph, SpecNone, Jump, boundaryCodeOrigin, OpInfo(targetBlock)); } innerChanged = outerChanged = true; break; } if (block->successor(0) == block->successor(1)) { convertToJump(block, block->successor(0)); innerChanged = outerChanged = true; break; } // Branch to same destination -> jump. // FIXME: this will currently not be hit because of the lack of jump-only // block simplification. break; } case Switch: { SwitchData* data = block->last()->switchData(); // Prune out cases that end up jumping to default. for (unsigned i = 0; i < data->cases.size(); ++i) { if (data->cases[i].target == data->fallThrough) data->cases[i--] = data->cases.takeLast(); } // If there are no cases other than default then this turns // into a jump. if (data->cases.isEmpty()) { convertToJump(block, data->fallThrough); innerChanged = outerChanged = true; break; } // Switch on constant -> jettison all other targets and merge. if (block->last()->child1()->hasConstant()) { JSValue value = m_graph.valueOfJSConstant(block->last()->child1().node()); TriState found = FalseTriState; BasicBlock* targetBlock = 0; for (unsigned i = data->cases.size(); found == FalseTriState && i--;) { found = data->cases[i].value.strictEqual(value); if (found == TrueTriState) targetBlock = data->cases[i].target; } if (found == MixedTriState) break; if (found == FalseTriState) targetBlock = data->fallThrough; ASSERT(targetBlock); Vector<BasicBlock*, 1> jettisonedBlocks; for (unsigned i = block->numSuccessors(); i--;) { BasicBlock* jettisonedBlock = block->successor(i); if (jettisonedBlock != targetBlock) jettisonedBlocks.append(jettisonedBlock); } if (targetBlock->predecessors.size() == 1) { if (extremeLogging) m_graph.dump(); m_graph.dethread(); mergeBlocks(block, targetBlock, jettisonedBlocks); } else { if (extremeLogging) m_graph.dump(); m_graph.dethread(); CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin; block->last()->convertToPhantom(); for (unsigned i = jettisonedBlocks.size(); i--;) jettisonBlock(block, jettisonedBlocks[i], boundaryCodeOrigin); block->appendNode( m_graph, SpecNone, Jump, boundaryCodeOrigin, OpInfo(targetBlock)); } innerChanged = outerChanged = true; break; } } default: break; } } if (innerChanged) { // Here's the reason for this pass: // Blocks: A, B, C, D, E, F // A -> B, C // B -> F // C -> D, E // D -> F // E -> F // // Assume that A's branch is determined to go to B. Then the rest of this phase // is smart enough to simplify down to: // A -> B // B -> F // C -> D, E // D -> F // E -> F // // We will also merge A and B. But then we don't have any other mechanism to // remove D, E as predecessors for F. Worse, the rest of this phase does not // know how to fix the Phi functions of F to ensure that they no longer refer // to variables in D, E. In general, we need a way to handle Phi simplification // upon: // 1) Removal of a predecessor due to branch simplification. The branch // simplifier already does that. // 2) Invalidation of a predecessor because said predecessor was rendered // unreachable. We do this here. // // This implies that when a block is unreachable, we must inspect its // successors' Phi functions to remove any references from them into the // removed block. m_graph.invalidateCFG(); m_graph.resetReachability(); m_graph.killUnreachableBlocks(); } if (Options::validateGraphAtEachPhase()) validate(m_graph); } while (innerChanged); return outerChanged; }
void emitPutByOffset(unsigned indexInBlock, Node* node, Structure* structure, const PutByIdVariant& variant, unsigned identifierNumber) { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); ASSERT(variant.oldStructure() == structure); bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton(); bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell; // Now before we do anything else, push the CFA forward over the PutById // and make sure we signal to the loop that it should continue and not // do any eliminations. m_interpreter.execute(indexInBlock); if (needsWatchpoint) { m_insertionSet.insertNode( indexInBlock, SpecNone, StructureTransitionWatchpoint, origin, OpInfo(structure), childEdge); } else if (needsCellCheck) { m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, origin, childEdge); } childEdge.setUseKind(KnownCellUse); StructureTransitionData* transitionData = 0; if (variant.kind() == PutByIdVariant::Transition) { transitionData = m_graph.addStructureTransitionData( StructureTransitionData(structure, variant.newStructure())); if (node->op() == PutById) { if (!structure->storedPrototype().isNull()) { addStructureTransitionCheck( origin, indexInBlock, structure->storedPrototype().asCell()); } m_graph.chains().addLazily(variant.structureChain()); for (unsigned i = 0; i < variant.structureChain()->size(); ++i) { JSValue prototype = variant.structureChain()->at(i)->storedPrototype(); if (prototype.isNull()) continue; ASSERT(prototype.isCell()); addStructureTransitionCheck( origin, indexInBlock, prototype.asCell()); } } } Edge propertyStorage; if (isInlineOffset(variant.offset())) propertyStorage = childEdge; else if ( variant.kind() == PutByIdVariant::Replace || structure->outOfLineCapacity() == variant.newStructure()->outOfLineCapacity()) { propertyStorage = Edge(m_insertionSet.insertNode( indexInBlock, SpecNone, GetButterfly, origin, childEdge)); } else if (!structure->outOfLineCapacity()) { ASSERT(variant.newStructure()->outOfLineCapacity()); ASSERT(!isInlineOffset(variant.offset())); Node* allocatePropertyStorage = m_insertionSet.insertNode( indexInBlock, SpecNone, AllocatePropertyStorage, origin, OpInfo(transitionData), childEdge); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); propertyStorage = Edge(allocatePropertyStorage); } else { ASSERT(structure->outOfLineCapacity()); ASSERT(variant.newStructure()->outOfLineCapacity() > structure->outOfLineCapacity()); ASSERT(!isInlineOffset(variant.offset())); Node* reallocatePropertyStorage = m_insertionSet.insertNode( indexInBlock, SpecNone, ReallocatePropertyStorage, origin, OpInfo(transitionData), childEdge, Edge(m_insertionSet.insertNode( indexInBlock, SpecNone, GetButterfly, origin, childEdge))); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); propertyStorage = Edge(reallocatePropertyStorage); } if (variant.kind() == PutByIdVariant::Transition) { Node* putStructure = m_graph.addNode(SpecNone, PutStructure, origin, OpInfo(transitionData), childEdge); m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child1().node(), KnownCellUse)); m_insertionSet.insert(indexInBlock, putStructure); } node->convertToPutByOffset(m_graph.m_storageAccessData.size(), propertyStorage); m_insertionSet.insertNode( indexInBlock, SpecNone, StoreBarrier, origin, Edge(node->child2().node(), KnownCellUse)); StorageAccessData storageAccessData; storageAccessData.offset = variant.offset(); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); }
bool run() { RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); if (dumpGraph) { dataLog("Graph dump at top of SSA conversion:\n"); m_graph.dump(); } // Eliminate all duplicate or self-pointing Phi edges. This means that // we transform: // // p: Phi(@n1, @n2, @n3) // // into: // // p: Phi(@x) // // if each @ni in {@n1, @n2, @n3} is either equal to @p to is equal // to @x, for exactly one other @x. Additionally, trivial Phis (i.e. // p: Phi(@x)) are forwarded, so that if have an edge to such @p, we // replace it with @x. This loop does this for Phis only; later we do // such forwarding for Phi references found in other nodes. // // See Aycock and Horspool in CC'00 for a better description of what // we're doing here. do { m_changed = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) { Node* phi = block->phis[phiIndex]; if (phi->variableAccessData()->isCaptured()) continue; forwardPhiChildren(phi); deduplicateChildren(phi); } } } while (m_changed); // For each basic block, for each local live at the head of that block, // figure out what node we should be referring to instead of that local. // If it turns out to be a non-trivial Phi, make sure that we create an // SSA Phi and Upsilons in predecessor blocks. We reuse // BasicBlock::variablesAtHead for tracking which nodes to refer to. Operands<bool> nonTrivialPhis(OperandsLike, m_graph.block(0)->variablesAtHead); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; nonTrivialPhis.fill(false); for (unsigned i = block->phis.size(); i--;) { Node* phi = block->phis[i]; if (!phi->children.justOneChild()) nonTrivialPhis.operand(phi->local()) = true; } for (unsigned i = block->variablesAtHead.size(); i--;) { Node* node = block->variablesAtHead[i]; if (!node) continue; if (verbose) dataLog("At block #", blockIndex, " for operand r", block->variablesAtHead.operandForIndex(i), " have node ", node, "\n"); VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) { // Poison this entry in variablesAtHead because we don't // want anyone to try to refer to it, if the variable is // captured. block->variablesAtHead[i] = 0; continue; } switch (node->op()) { case Phi: case SetArgument: break; case Flush: case GetLocal: case PhantomLocal: node = node->child1().node(); break; default: RELEASE_ASSERT_NOT_REACHED(); } RELEASE_ASSERT(node->op() == Phi || node->op() == SetArgument); bool isFlushed = !!(node->flags() & NodeIsFlushed); if (node->op() == Phi) { if (!nonTrivialPhis.operand(node->local())) { Edge edge = node->children.justOneChild(); ASSERT(edge); if (verbose) dataLog(" One child: ", edge, ", ", RawPointer(edge.node()), "\n"); node = edge.node(); // It's something from a different basic block. } else { if (verbose) dataLog(" Non-trivial.\n"); // It's a non-trivial Phi. FlushFormat format = variable->flushFormat(); NodeFlags result = resultFor(format); UseKind useKind = useKindFor(format); node = m_insertionSet.insertNode(0, SpecNone, Phi, NodeOrigin()); if (verbose) dataLog(" Inserted new node: ", node, "\n"); node->mergeFlags(result); RELEASE_ASSERT((node->flags() & NodeResultMask) == result); for (unsigned j = block->predecessors.size(); j--;) { BasicBlock* predecessor = block->predecessors[j]; predecessor->appendNonTerminal( m_graph, SpecNone, Upsilon, predecessor->last()->origin, OpInfo(node), Edge(predecessor->variablesAtTail[i], useKind)); } if (isFlushed) { // Do nothing. For multiple reasons. // Reason #1: If the local is flushed then we don't need to bother // with a MovHint since every path to this point in the code will // have flushed the bytecode variable using a SetLocal and hence // the Availability::flushedAt() will agree, and that will be // sufficient for figuring out how to recover the variable's value. // Reason #2: If we had inserted a MovHint and the Phi function had // died (because the only user of the value was the "flush" - i.e. // some asynchronous runtime thingy) then the MovHint would turn // into a ZombieHint, which would fool us into thinking that the // variable is dead. // Reason #3: If we had inserted a MovHint then even if the Phi // stayed alive, we would still end up generating inefficient code // since we would be telling the OSR exit compiler to use some SSA // value for the bytecode variable rather than just telling it that // the value was already on the stack. } else { m_insertionSet.insertNode( 0, SpecNone, MovHint, NodeOrigin(), OpInfo(variable->local().offset()), Edge(node)); } } } block->variablesAtHead[i] = node; } m_insertionSet.execute(block); } if (verbose) { dataLog("Variables at head after SSA Phi insertion:\n"); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; dataLog(" ", *block, ": ", block->variablesAtHead, "\n"); } } // At this point variablesAtHead in each block refers to either: // // 1) A new SSA phi in the current block. // 2) A SetArgument, which will soon get converted into a GetArgument. // 3) An old CPS phi in a different block. // // We don't have to do anything for (1) and (2), but we do need to // do a replacement for (3). // Clear all replacements, since other phases may have used them. m_graph.clearReplacements(); if (dumpGraph) { dataLog("Graph just before identifying replacements:\n"); m_graph.dump(); } // For all of the old CPS Phis, figure out what they correspond to in SSA. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; if (verbose) dataLog("Dealing with block #", blockIndex, "\n"); for (unsigned phiIndex = block->phis.size(); phiIndex--;) { Node* phi = block->phis[phiIndex]; if (verbose) { dataLog( "Considering ", phi, " (", RawPointer(phi), "), for r", phi->local(), ", and its replacement in ", *block, ", ", block->variablesAtHead.operand(phi->local()), "\n"); } ASSERT(phi != block->variablesAtHead.operand(phi->local())); phi->misc.replacement = block->variablesAtHead.operand(phi->local()); } } // Now make sure that all variablesAtHead in each block points to the // canonical SSA value. Prior to this, variablesAtHead[local] may point to // an old CPS Phi in a different block. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (size_t i = block->variablesAtHead.size(); i--;) { Node* node = block->variablesAtHead[i]; if (!node) continue; while (node->misc.replacement) { ASSERT(node != node->misc.replacement); node = node->misc.replacement; } block->variablesAtHead[i] = node; } } if (verbose) { dataLog("Variables at head after convergence:\n"); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; dataLog(" ", *block, ": ", block->variablesAtHead, "\n"); } } // Convert operations over locals into operations over SSA nodes. // - GetLocal over captured variables lose their phis. // - GetLocal over uncaptured variables die and get replaced with references // to the node specified by variablesAtHead. // - SetLocal gets NodeMustGenerate if it's flushed, or turns into a // Check otherwise. // - Flush loses its children and turns into a Phantom. // - PhantomLocal becomes Phantom, and its child is whatever is specified // by variablesAtHead. // - SetArgument turns into GetArgument unless it's a captured variable. // - Upsilons get their children fixed to refer to the true value of that local // at the end of the block. Prior to this loop, Upsilons will refer to // variableAtTail[operand], which may be any of Flush, PhantomLocal, GetLocal, // SetLocal, SetArgument, or Phi. We accomplish this by setting the // replacement pointers of all of those nodes to refer to either // variablesAtHead[operand], or the child of the SetLocal. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) { block->phis[phiIndex]->misc.replacement = block->variablesAtHead.operand(block->phis[phiIndex]->local()); } for (unsigned nodeIndex = block->size(); nodeIndex--;) ASSERT(!block->at(nodeIndex)->misc.replacement); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); m_graph.performSubstitution(node); switch (node->op()) { case SetLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured() || !!(node->flags() & NodeIsFlushed)) node->mergeFlags(NodeMustGenerate); else node->setOpAndDefaultFlags(Check); node->misc.replacement = node->child1().node(); // Only for Upsilons. break; } case GetLocal: { // It seems tempting to just do forwardPhi(GetLocal), except that we // could have created a new (SSA) Phi, and the GetLocal could still be // referring to an old (CPS) Phi. Uses variablesAtHead to tell us what // to refer to. node->children.reset(); VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) break; node->convertToPhantom(); node->misc.replacement = block->variablesAtHead.operand(variable->local()); break; } case Flush: { node->children.reset(); node->convertToPhantom(); // This is only for Upsilons. An Upsilon will only refer to a Flush if // there were no SetLocals or GetLocals in the block. node->misc.replacement = block->variablesAtHead.operand(node->local()); break; } case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) break; node->child1().setNode(block->variablesAtHead.operand(variable->local())); node->convertToPhantom(); // This is only for Upsilons. An Upsilon will only refer to a // PhantomLocal if there were no SetLocals or GetLocals in the block. node->misc.replacement = block->variablesAtHead.operand(variable->local()); break; } case SetArgument: { VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) break; node->setOpAndDefaultFlags(GetArgument); node->mergeFlags(resultFor(node->variableAccessData()->flushFormat())); break; } default: break; } } } // Free all CPS phis and reset variables vectors. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) m_graph.m_allocator.free(block->phis[phiIndex]); block->phis.clear(); block->variablesAtHead.clear(); block->variablesAtTail.clear(); block->valuesAtHead.clear(); block->valuesAtHead.clear(); block->ssa = adoptPtr(new BasicBlock::SSAData(block)); } m_graph.m_arguments.clear(); m_graph.m_form = SSA; return true; }
Node* PromotedHeapLocation::createHint(Graph& graph, NodeOrigin origin, Node* value) { return graph.addNode( SpecNone, PutHint, origin, OpInfo(descriptor().imm1()), OpInfo(descriptor().imm2()), base()->defaultEdge(), value->defaultEdge()); }
void handleNode() { switch (m_node->op()) { case BitOr: handleCommutativity(); if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) { convertToIdentityOverChild1(); break; } break; case BitXor: case BitAnd: handleCommutativity(); break; case BitLShift: case BitRShift: case BitURShift: if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !(m_node->child2()->asInt32() & 0x1f)) { convertToIdentityOverChild1(); break; } break; case UInt32ToNumber: if (m_node->child1()->op() == BitURShift && m_node->child1()->child2()->isInt32Constant() && (m_node->child1()->child2()->asInt32() & 0x1f) && m_node->arithMode() != Arith::DoOverflow) { m_node->convertToIdentity(); m_changed = true; break; } break; case ArithAdd: handleCommutativity(); if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) { convertToIdentityOverChild1(); break; } break; case ArithMul: { handleCommutativity(); Edge& child2 = m_node->child2(); if (child2->isNumberConstant() && child2->asNumber() == 2) { switch (m_node->binaryUseKind()) { case DoubleRepUse: // It is always valuable to get rid of a double multiplication by 2. // We won't have half-register dependencies issues on x86 and we won't have to load the constants. m_node->setOp(ArithAdd); child2.setNode(m_node->child1().node()); m_changed = true; break; #if USE(JSVALUE64) case Int52RepUse: #endif case Int32Use: // For integers, we can only convert compatible modes. // ArithAdd does handle do negative zero check for example. if (m_node->arithMode() == Arith::CheckOverflow || m_node->arithMode() == Arith::Unchecked) { m_node->setOp(ArithAdd); child2.setNode(m_node->child1().node()); m_changed = true; } break; default: break; } } break; } case ArithSub: if (m_node->child2()->isInt32Constant() && m_node->isBinaryUseKind(Int32Use)) { int32_t value = m_node->child2()->asInt32(); if (-value != value) { m_node->setOp(ArithAdd); m_node->child2().setNode( m_insertionSet.insertConstant( m_nodeIndex, m_node->origin, jsNumber(-value))); m_changed = true; break; } } break; case ArithPow: if (m_node->child2()->isNumberConstant()) { double yOperandValue = m_node->child2()->asNumber(); if (yOperandValue == 1) { convertToIdentityOverChild1(); } else if (yOperandValue == 0.5) { m_insertionSet.insertCheck(m_nodeIndex, m_node); m_node->convertToArithSqrt(); m_changed = true; } } break; case ArithMod: // On Integers // In: ArithMod(ArithMod(x, const1), const2) // Out: Identity(ArithMod(x, const1)) // if const1 <= const2. if (m_node->binaryUseKind() == Int32Use && m_node->child2()->isInt32Constant() && m_node->child1()->op() == ArithMod && m_node->child1()->binaryUseKind() == Int32Use && m_node->child1()->child2()->isInt32Constant() && std::abs(m_node->child1()->child2()->asInt32()) <= std::abs(m_node->child2()->asInt32())) { convertToIdentityOverChild1(); } break; case ValueRep: case Int52Rep: case DoubleRep: { // This short-circuits circuitous conversions, like ValueRep(DoubleRep(value)) or // even more complicated things. Like, it can handle a beast like // ValueRep(DoubleRep(Int52Rep(value))). // The only speculation that we would do beyond validating that we have a type that // can be represented a certain way is an Int32 check that would appear on Int52Rep // nodes. For now, if we see this and the final type we want is an Int52, we use it // as an excuse not to fold. The only thing we would need is a Int52RepInt32Use kind. bool hadInt32Check = false; if (m_node->op() == Int52Rep) { if (m_node->child1().useKind() != Int32Use) break; hadInt32Check = true; } for (Node* node = m_node->child1().node(); ; node = node->child1().node()) { if (canonicalResultRepresentation(node->result()) == canonicalResultRepresentation(m_node->result())) { m_insertionSet.insertCheck(m_nodeIndex, m_node); if (hadInt32Check) { // FIXME: Consider adding Int52RepInt32Use or even DoubleRepInt32Use, // which would be super weird. The latter would only arise in some // seriously circuitous conversions. if (canonicalResultRepresentation(node->result()) != NodeResultJS) break; m_insertionSet.insertCheck( m_nodeIndex, m_node->origin, Edge(node, Int32Use)); } m_node->child1() = node->defaultEdge(); m_node->convertToIdentity(); m_changed = true; break; } switch (node->op()) { case Int52Rep: if (node->child1().useKind() != Int32Use) break; hadInt32Check = true; continue; case DoubleRep: case ValueRep: continue; default: break; } break; } break; } case Flush: { ASSERT(m_graph.m_form != SSA); Node* setLocal = nullptr; VirtualRegister local = m_node->local(); for (unsigned i = m_nodeIndex; i--;) { Node* node = m_block->at(i); if (node->op() == SetLocal && node->local() == local) { setLocal = node; break; } if (accessesOverlap(m_graph, node, AbstractHeap(Stack, local))) break; } if (!setLocal) break; // The Flush should become a PhantomLocal at this point. This means that we want the // local's value during OSR, but we don't care if the value is stored to the stack. CPS // rethreading can canonicalize PhantomLocals for us. m_node->convertFlushToPhantomLocal(); m_graph.dethread(); m_changed = true; break; } // FIXME: we should probably do this in constant folding but this currently relies on an OSR exit rule. // https://bugs.webkit.org/show_bug.cgi?id=154832 case OverridesHasInstance: { if (!m_node->child2().node()->isCellConstant()) break; if (m_node->child2().node()->asCell() != m_graph.globalObjectFor(m_node->origin.semantic)->functionProtoHasInstanceSymbolFunction()) { m_graph.convertToConstant(m_node, jsBoolean(true)); m_changed = true; } else if (!m_graph.hasExitSite(m_node->origin.semantic, BadTypeInfoFlags)) { // We optimistically assume that we will not see a function that has a custom instanceof operation as they should be rare. m_insertionSet.insertNode(m_nodeIndex, SpecNone, CheckTypeInfoFlags, m_node->origin, OpInfo(ImplementsDefaultHasInstance), Edge(m_node->child1().node(), CellUse)); m_graph.convertToConstant(m_node, jsBoolean(false)); m_changed = true; } break; } // FIXME: We have a lot of string constant-folding rules here. It would be great to // move these to the abstract interpreter once AbstractValue can support LazyJSValue. // https://bugs.webkit.org/show_bug.cgi?id=155204 case MakeRope: case ValueAdd: case StrCat: { String leftString = m_node->child1()->tryGetString(m_graph); if (!leftString) break; String rightString = m_node->child2()->tryGetString(m_graph); if (!rightString) break; String extraString; if (m_node->child3()) { extraString = m_node->child3()->tryGetString(m_graph); if (!extraString) break; } StringBuilder builder; builder.append(leftString); builder.append(rightString); if (!!extraString) builder.append(extraString); m_node->convertToLazyJSConstant( m_graph, LazyJSValue::newString(m_graph, builder.toString())); m_changed = true; break; } case GetArrayLength: { if (m_node->arrayMode().type() == Array::Generic || m_node->arrayMode().type() == Array::String) { String string = m_node->child1()->tryGetString(m_graph); if (!!string) { m_graph.convertToConstant(m_node, jsNumber(string.length())); m_changed = true; } } break; } default: break; } }
bool run() { ASSERT(m_graph.m_form == ThreadedCPS); for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; variable->clearVotes(); } // Identify the set of variables that are always subject to the same structure // checks. For now, only consider monomorphic structure checks (one structure). for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); switch (node->op()) { case CheckStructure: case StructureTransitionWatchpoint: { Node* child = node->child1().node(); if (child->op() != GetLocal) break; VariableAccessData* variable = child->variableAccessData(); variable->vote(VoteStructureCheck); if (!shouldConsiderForHoisting(variable)) break; noticeStructureCheck(variable, node->structureSet()); break; } case ForwardCheckStructure: case ForwardStructureTransitionWatchpoint: // We currently rely on the fact that we're the only ones who would // insert this node. RELEASE_ASSERT_NOT_REACHED(); break; case GetByOffset: case PutByOffset: case PutStructure: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetButterfly: case GetByVal: case PutByVal: case PutByValAlias: case GetArrayLength: case CheckArray: case GetIndexedPropertyStorage: case Phantom: // Don't count these uses. break; case ArrayifyToStructure: case Arrayify: if (node->arrayMode().conversion() == Array::RageConvert) { // Rage conversion changes structures. We should avoid tying to do // any kind of hoisting when rage conversion is in play. Node* child = node->child1().node(); if (child->op() != GetLocal) break; VariableAccessData* variable = child->variableAccessData(); variable->vote(VoteOther); if (!shouldConsiderForHoisting(variable)) break; noticeStructureCheck(variable, 0); } break; case SetLocal: { // Find all uses of the source of the SetLocal. If any of them are a // kind of CheckStructure, then we should notice them to ensure that // we're not hoisting a check that would contravene checks that are // already being performed. VariableAccessData* variable = node->variableAccessData(); if (!shouldConsiderForHoisting(variable)) break; Node* source = node->child1().node(); for (unsigned subIndexInBlock = 0; subIndexInBlock < block->size(); ++subIndexInBlock) { Node* subNode = block->at(subIndexInBlock); switch (subNode->op()) { case CheckStructure: { if (subNode->child1() != source) break; noticeStructureCheck(variable, subNode->structureSet()); break; } case StructureTransitionWatchpoint: { if (subNode->child1() != source) break; noticeStructureCheck(variable, subNode->structure()); break; } default: break; } } m_graph.voteChildren(node, VoteOther); break; } case GarbageValue: break; default: m_graph.voteChildren(node, VoteOther); break; } } } // Disable structure hoisting on variables that appear to mostly be used in // contexts where it doesn't make sense. for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->voteRatio() >= Options::structureCheckVoteRatioForHoisting()) continue; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the ratio is ", variable->voteRatio(), ".\n"); #endif iter->value.m_structure = 0; } // Disable structure check hoisting for variables that cross the OSR entry that // we're currently taking, and where the value currently does not have the // structure we want. for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; ASSERT(block->isReachable); if (!block->isOSRTarget) continue; if (block->bytecodeBegin != m_graph.m_osrEntryBytecodeIndex) continue; for (size_t i = 0; i < m_graph.m_mustHandleValues.size(); ++i) { int operand = m_graph.m_mustHandleValues.operandForIndex(i); Node* node = block->variablesAtHead.operand(operand); if (!node) continue; VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; if (!iter->value.m_structure) continue; JSValue value = m_graph.m_mustHandleValues[i]; if (!value || !value.isCell()) { #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the OSR entry value is not a cell: ", value, ".\n"); #endif iter->value.m_structure = 0; continue; } if (value.asCell()->structure() != iter->value.m_structure) { #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the OSR entry value has structure ", RawPointer(value.asCell()->structure()), " and we wanted ", RawPointer(iter->value.m_structure), ".\n"); #endif iter->value.m_structure = 0; continue; } } } bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { if (!it->value.m_structure) { dataLog( "Not hoisting checks for ", VariableAccessDataDump(m_graph, it->key), " because of heuristics.\n"); continue; } dataLog("Hoisting checks for ", VariableAccessDataDump(m_graph, it->key), "\n"); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) // Place CheckStructure's at SetLocal sites. InsertionSet insertionSet(m_graph); for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. switch (node->op()) { case SetArgument: { ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its // variable-at-tail, then replace it with this GetLocal. VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->value.m_structure) break; CodeOrigin codeOrigin = node->codeOrigin; Node* getLocal = insertionSet.insertNode( indexInBlock + 1, variable->prediction(), GetLocal, codeOrigin, OpInfo(variable), Edge(node)); insertionSet.insertNode( indexInBlock + 1, SpecNone, CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(getLocal, CellUse)); if (block->variablesAtTail.operand(variable->local()) == node) block->variablesAtTail.operand(variable->local()) = getLocal; m_graph.substituteGetLocal(*block, indexInBlock, variable, getLocal); changed = true; break; } case SetLocal: { VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->value.m_structure) break; // First insert a dead SetLocal to tell OSR that the child's value should // be dropped into this bytecode variable if the CheckStructure decides // to exit. CodeOrigin codeOrigin = node->codeOrigin; Edge child1 = node->child1(); insertionSet.insertNode( indexInBlock, SpecNone, SetLocal, codeOrigin, OpInfo(variable), child1); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. insertionSet.insertNode( indexInBlock, SpecNone, ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(child1.node(), CellUse)); changed = true; break; } default: break; } } insertionSet.execute(block); } return changed; }
bool run() { RELEASE_ASSERT(m_graph.m_plan.mode == FTLForOSREntryMode); RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex; RELEASE_ASSERT(bytecodeIndex); RELEASE_ASSERT(bytecodeIndex != UINT_MAX); // Needed by createPreHeader(). m_graph.ensureDominators(); CodeBlock* baseline = m_graph.m_profiledBlock; BasicBlock* target = 0; for (unsigned blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; unsigned nodeIndex = 0; Node* firstNode = block->at(0); while (firstNode->isSemanticallySkippable()) firstNode = block->at(++nodeIndex); if (firstNode->op() == LoopHint && firstNode->origin.semantic == CodeOrigin(bytecodeIndex)) { target = block; break; } } if (!target) { // This is a terrible outcome. It shouldn't often happen but it might // happen and so we should defend against it. If it happens, then this // compilation is a failure. return false; } BlockInsertionSet insertionSet(m_graph); // We say that the execution count of the entry block is 1, because we know for sure // that this must be the case. Under our definition of executionCount, "1" means "once // per invocation". We could have said NaN here, since that would ask any clients of // executionCount to use best judgement - but that seems unnecessary since we know for // sure what the executionCount should be in this case. BasicBlock* newRoot = insertionSet.insert(0, 1); // We'd really like to use an unset origin, but ThreadedCPS won't allow that. NodeOrigin origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), false); Vector<Node*> locals(baseline->m_numCalleeLocals); for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); locals[local] = newRoot->appendNode( m_graph, variable->prediction(), ExtractOSREntryLocal, origin, OpInfo(variable->local().offset())); newRoot->appendNode( m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()), Edge(locals[local])); } // Now use the origin of the target, since it's not OK to exit, and we will probably hoist // type checks to here. origin = target->at(0)->origin; for (int argument = 0; argument < baseline->numParameters(); ++argument) { Node* oldNode = target->variablesAtHead.argument(argument); if (!oldNode) { // Just for sanity, always have a SetArgument even if it's not needed. oldNode = m_graph.m_arguments[argument]; } Node* node = newRoot->appendNode( m_graph, SpecNone, SetArgument, origin, OpInfo(oldNode->variableAccessData())); m_graph.m_arguments[argument] = node; } for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); Node* node = locals[local]; newRoot->appendNode( m_graph, SpecNone, SetLocal, origin, OpInfo(variable), Edge(node)); } newRoot->appendNode( m_graph, SpecNone, Jump, origin, OpInfo(createPreHeader(m_graph, insertionSet, target))); insertionSet.execute(); m_graph.resetReachability(); m_graph.killUnreachableBlocks(); return true; }
void fixupNode(Node& node) { if (!node.shouldGenerate()) return; NodeType op = node.op(); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex); #endif switch (op) { case GetById: { if (!isInt32Speculation(m_graph[m_compileIndex].prediction())) break; if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length) break; bool isArray = isArraySpeculation(m_graph[node.child1()].prediction()); bool isArguments = isArgumentsSpeculation(m_graph[node.child1()].prediction()); bool isString = isStringSpeculation(m_graph[node.child1()].prediction()); bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array(); bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array(); bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array(); bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array(); bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray(); bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array(); bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array(); bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array(); bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array(); if (!isArray && !isArguments && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array) break; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength"); #endif if (isArray) { node.setOp(GetArrayLength); ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); ArrayProfile* arrayProfile = m_graph.baselineCodeBlockFor(node.codeOrigin)->getArrayProfile( node.codeOrigin.bytecodeIndex); if (!arrayProfile) break; arrayProfile->computeUpdatedPrediction(); if (!arrayProfile->hasDefiniteStructure()) break; m_graph.ref(node.child1()); Node checkStructure(CheckStructure, node.codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), node.child1().index()); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); m_insertionSet.append(m_indexInBlock, checkStructureIndex); break; } if (isArguments) node.setOp(GetArgumentsLength); else if (isString) node.setOp(GetStringLength); else if (isInt8Array) node.setOp(GetInt8ArrayLength); else if (isInt16Array) node.setOp(GetInt16ArrayLength); else if (isInt32Array) node.setOp(GetInt32ArrayLength); else if (isUint8Array) node.setOp(GetUint8ArrayLength); else if (isUint8ClampedArray) node.setOp(GetUint8ClampedArrayLength); else if (isUint16Array) node.setOp(GetUint16ArrayLength); else if (isUint32Array) node.setOp(GetUint32ArrayLength); else if (isFloat32Array) node.setOp(GetFloat32ArrayLength); else if (isFloat64Array) node.setOp(GetFloat64ArrayLength); else ASSERT_NOT_REACHED(); // No longer MustGenerate ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); break; } case GetIndexedPropertyStorage: { if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].shouldSpeculateInteger() || m_graph[node.child1()].shouldSpeculateArguments()) { node.setOpAndDefaultFlags(Nop); m_graph.clearAndDerefChild1(node); m_graph.clearAndDerefChild2(node); m_graph.clearAndDerefChild3(node); node.setRefCount(0); } break; } case GetByVal: case StringCharAt: case StringCharCodeAt: { if (!!node.child3() && m_graph[node.child3()].op() == Nop) node.children.child3() = Edge(); break; } case ValueToInt32: { if (m_graph[node.child1()].shouldSpeculateNumber() && node.mustGenerate()) { node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); } break; } case BitAnd: case BitOr: case BitXor: case BitRShift: case BitLShift: case BitURShift: { fixIntEdge(node.children.child1()); fixIntEdge(node.children.child2()); break; } case CompareEq: case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareStrictEq: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case LogicalNot: { if (m_graph[node.child1()].shouldSpeculateInteger()) break; if (!m_graph[node.child1()].shouldSpeculateNumber()) break; fixDoubleEdge(0); break; } case Branch: { if (!m_graph[node.child1()].shouldSpeculateInteger() && m_graph[node.child1()].shouldSpeculateNumber()) fixDoubleEdge(0); Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed Edge logicalNotEdge = myNode.child1(); Node& logicalNot = m_graph[logicalNotEdge]; if (logicalNot.op() == LogicalNot && logicalNot.adjustedRefCount() == 1) { Edge newChildEdge = logicalNot.child1(); if (m_graph[newChildEdge].hasBooleanResult()) { m_graph.ref(newChildEdge); m_graph.deref(logicalNotEdge); myNode.children.setChild1(newChildEdge); BlockIndex toBeTaken = myNode.notTakenBlockIndex(); BlockIndex toBeNotTaken = myNode.takenBlockIndex(); myNode.setTakenBlockIndex(toBeTaken); myNode.setNotTakenBlockIndex(toBeNotTaken); } } break; } case SetLocal: { if (node.variableAccessData()->isCaptured()) break; if (!node.variableAccessData()->shouldUseDoubleFormat()) break; fixDoubleEdge(0); break; } case ArithAdd: case ValueAdd: { if (m_graph.addShouldSpeculateInteger(node)) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithSub: { if (m_graph.addShouldSpeculateInteger(node) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithNegate: { if (m_graph.negateShouldSpeculateInteger(node)) break; fixDoubleEdge(0); break; } case ArithMin: case ArithMax: case ArithMod: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithMul: { if (m_graph.mulShouldSpeculateInteger(node)) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithDiv: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) { if (isX86()) break; fixDoubleEdge(0); fixDoubleEdge(1); Node& oldDivision = m_graph[m_compileIndex]; Node newDivision = oldDivision; newDivision.setRefCount(2); newDivision.predict(SpecDouble); NodeIndex newDivisionIndex = m_graph.size(); oldDivision.setOp(DoubleAsInt32); oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge()); m_graph.append(newDivision); m_insertionSet.append(m_indexInBlock, newDivisionIndex); break; } fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithAbs: { if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) break; fixDoubleEdge(0); break; } case ArithSqrt: { fixDoubleEdge(0); break; } case PutByVal: case PutByValSafe: { Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); if (!m_graph[child1].prediction() || !m_graph[child2].prediction()) break; if (!m_graph[child2].shouldSpeculateInteger()) break; if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) { if (m_graph[child3].isConstant()) break; if (m_graph[child3].shouldSpeculateInteger()) break; fixDoubleEdge(2); break; } if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) { fixDoubleEdge(2); break; } break; } default: break; } #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) if (!(node.flags() & NodeHasVarArgs)) { dataLog("new children: "); node.dumpChildren(WTF::dataFile()); } dataLog("\n"); #endif }
bool run() { for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; variable->clearVotes(); } // Identify the set of variables that are always subject to the same structure // checks. For now, only consider monomorphic structure checks (one structure). for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; VariableAccessData* variable = child.variableAccessData(); variable->vote(VoteStructureCheck); if (variable->isCaptured() || variable->structureCheckHoistingFailed()) break; if (!isCellSpeculation(variable->prediction())) break; noticeStructureCheck(variable, node.structureSet()); break; } case ForwardCheckStructure: case ForwardStructureTransitionWatchpoint: // We currently rely on the fact that we're the only ones who would // insert this node. ASSERT_NOT_REACHED(); break; case GetByOffset: case PutByOffset: case PutStructure: case StructureTransitionWatchpoint: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetPropertyStorage: case GetByVal: case PutByVal: case PutByValAlias: case GetArrayLength: case CheckArray: case GetIndexedPropertyStorage: case Phantom: // Don't count these uses. break; default: m_graph.vote(node, VoteOther); break; } } } // Disable structure hoisting on variables that appear to mostly be used in // contexts where it doesn't make sense. for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->voteRatio() >= Options::structureCheckVoteRatioForHoisting()) continue; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog("Zeroing the structure to hoist for %s because the ratio is %lf.\n", m_graph.nameOfVariableAccessData(variable), variable->voteRatio()); #endif iter->second.m_structure = 0; } // Identify the set of variables that are live across a structure clobber. Operands<VariableAccessData*> live( m_graph.m_blocks[0]->variablesAtTail.numberOfArguments(), m_graph.m_blocks[0]->variablesAtTail.numberOfLocals()); for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; ASSERT(live.numberOfArguments() == block->variablesAtTail.numberOfArguments()); ASSERT(live.numberOfLocals() == block->variablesAtTail.numberOfLocals()); for (unsigned i = live.size(); i--;) { NodeIndex indexAtTail = block->variablesAtTail[i]; VariableAccessData* variable; if (indexAtTail == NoNode) variable = 0; else variable = m_graph[indexAtTail].variableAccessData(); live[i] = variable; } for (unsigned indexInBlock = block->size(); indexInBlock--;) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case GetLocal: case Flush: // This is a birth. live.operand(node.local()) = node.variableAccessData(); break; case SetLocal: case SetArgument: ASSERT(live.operand(node.local())); // Must be live. ASSERT(live.operand(node.local()) == node.variableAccessData()); // Must have the variable we expected. // This is a death. live.operand(node.local()) = 0; break; // Use the CFA's notion of what clobbers the world. case ValueAdd: if (m_graph.addShouldSpeculateInteger(node)) break; if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; clobber(live); break; case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareEq: { Node& left = m_graph[node.child1()]; Node& right = m_graph[node.child2()]; if (Node::shouldSpeculateInteger(left, right)) break; if (Node::shouldSpeculateNumber(left, right)) break; if (node.op() == CompareEq) { if ((m_graph.isConstant(node.child1().index()) && m_graph.valueOfJSConstant(node.child1().index()).isNull()) || (m_graph.isConstant(node.child2().index()) && m_graph.valueOfJSConstant(node.child2().index()).isNull())) break; if (Node::shouldSpeculateFinalObject(left, right)) break; if (Node::shouldSpeculateArray(left, right)) break; if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) break; if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) break; if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) break; if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) break; } clobber(live); break; } case GetByVal: case PutByVal: case PutByValAlias: if (m_graph.byValIsPure(node)) break; clobber(live); break; case GetMyArgumentsLengthSafe: case GetMyArgumentByValSafe: case GetById: case GetByIdFlush: case PutStructure: case PhantomPutStructure: case PutById: case PutByIdDirect: case Call: case Construct: case Resolve: case ResolveBase: case ResolveBaseStrictPut: case ResolveGlobal: clobber(live); break; default: ASSERT(node.op() != Phi); break; } } } bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { if (!it->second.m_structure) { dataLog("Not hoisting checks for %s because of heuristics.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } if (it->second.m_isClobbered && !it->second.m_structure->transitionWatchpointSetIsStillValid()) { dataLog("Not hoisting checks for %s because the structure is clobbered and has an invalid watchpoint set.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } dataLog("Hoisting checks for %s\n", m_graph.nameOfVariableAccessData(it->first)); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) // Make changes: // 1) If a variable's live range does not span a clobber, then inject structure // checks before the SetLocal. // 2) If a variable's live range spans a clobber but is watchpointable, then // inject structure checks before the SetLocal and replace all other structure // checks on that variable with structure transition watchpoints. InsertionSet<NodeIndex> insertionSet; for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. if (!node.shouldGenerate()) continue; switch (node.op()) { case SetArgument: { ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its // variable-at-tail, then replace it with this GetLocal. VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; node.ref(); CodeOrigin codeOrigin = node.codeOrigin; Node getLocal(GetLocal, codeOrigin, OpInfo(variable), nodeIndex); getLocal.predict(variable->prediction()); getLocal.ref(); NodeIndex getLocalIndex = m_graph.size(); m_graph.append(getLocal); insertionSet.append(indexInBlock + 1, getLocalIndex); Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), getLocalIndex); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock + 1, checkStructureIndex); if (block->variablesAtTail.operand(variable->local()) == nodeIndex) block->variablesAtTail.operand(variable->local()) = getLocalIndex; m_graph.substituteGetLocal(*block, indexInBlock, variable, getLocalIndex); changed = true; break; } case SetLocal: { VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; // First insert a dead SetLocal to tell OSR that the child's value should // be dropped into this bytecode variable if the CheckStructure decides // to exit. CodeOrigin codeOrigin = node.codeOrigin; NodeIndex child1 = node.child1().index(); Node setLocal(SetLocal, codeOrigin, OpInfo(variable), child1); NodeIndex setLocalIndex = m_graph.size(); m_graph.append(setLocal); insertionSet.append(indexInBlock, setLocalIndex); m_graph[child1].ref(); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. Node checkStructure(ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), child1); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock, checkStructureIndex); changed = true; break; } case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(child.variableAccessData()); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (!iter->second.m_isClobbered) { node.setOpAndDefaultFlags(Phantom); ASSERT(node.refCount() == 1); break; } if (!iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; ASSERT(iter->second.m_structure == node.structureSet().singletonStructure()); node.convertToStructureTransitionWatchpoint(); changed = true; break; } default: break; } } insertionSet.execute(*block); } return changed; }