void propagate(Node& node) { if (!node.shouldGenerate()) return; NodeType op = node.op(); NodeFlags flags = node.flags() & NodeBackPropMask; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" %s @%u: %s ", Graph::opName(op), m_compileIndex, nodeFlagsAsString(flags)); #endif bool changed = false; switch (op) { case JSConstant: case WeakJSConstant: { changed |= setPrediction(speculationFromValue(m_graph.valueOfJSConstant(m_compileIndex))); break; } case GetLocal: { VariableAccessData* variableAccessData = node.variableAccessData(); SpeculatedType prediction = variableAccessData->prediction(); if (prediction) changed |= mergePrediction(prediction); changed |= variableAccessData->mergeFlags(flags); break; } case SetLocal: { VariableAccessData* variableAccessData = node.variableAccessData(); changed |= variableAccessData->predict(m_graph[node.child1()].prediction()); changed |= m_graph[node.child1()].mergeFlags(variableAccessData->flags()); break; } case Flush: { // Make sure that the analysis knows that flushed locals escape. VariableAccessData* variableAccessData = node.variableAccessData(); changed |= variableAccessData->mergeFlags(NodeUsedAsValue); break; } case BitAnd: case BitOr: case BitXor: case BitRShift: case BitLShift: case BitURShift: { changed |= setPrediction(SpecInt32); flags |= NodeUsedAsInt; flags &= ~(NodeUsedAsNumber | NodeNeedsNegZero); changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ValueToInt32: { changed |= setPrediction(SpecInt32); flags |= NodeUsedAsInt; flags &= ~(NodeUsedAsNumber | NodeNeedsNegZero); changed |= m_graph[node.child1()].mergeFlags(flags); break; } case ArrayPop: { changed |= mergePrediction(node.getHeapPrediction()); changed |= mergeDefaultFlags(node); break; } case ArrayPush: { changed |= mergePrediction(node.getHeapPrediction()); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsValue); break; } case RegExpExec: case RegExpTest: { changed |= mergePrediction(node.getHeapPrediction()); changed |= mergeDefaultFlags(node); break; } case StringCharCodeAt: { changed |= mergePrediction(SpecInt32); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); break; } case ArithMod: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (isInt32Speculation(mergeSpeculations(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags())) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(SpecDouble); } flags |= NodeUsedAsValue; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case UInt32ToNumber: { if (nodeCanSpeculateInteger(node.arithNodeFlags())) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(SpecNumber); changed |= m_graph[node.child1()].mergeFlags(flags); break; } case ValueAdd: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (isNumberSpeculation(left) && isNumberSpeculation(right)) { if (m_graph.addShouldSpeculateInteger(node)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); } else if (!(left & SpecNumber) || !(right & SpecNumber)) { // left or right is definitely something other than a number. changed |= mergePrediction(SpecString); } else changed |= mergePrediction(SpecString | SpecInt32 | SpecDouble); } if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index())) flags &= ~NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithAdd: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (m_graph.addShouldSpeculateInteger(node)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); } if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index())) flags &= ~NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithSub: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (m_graph.addShouldSpeculateInteger(node)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); } if (isNotZero(node.child1().index()) || isNotZero(node.child2().index())) flags &= ~NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithNegate: if (m_graph[node.child1()].prediction()) { if (m_graph.negateShouldSpeculateInteger(node)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPrediction(m_graph[node.child1()].prediction())); } changed |= m_graph[node.child1()].mergeFlags(flags); break; case ArithMin: case ArithMax: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (isInt32Speculation(mergeSpeculations(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags())) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); } flags |= NodeUsedAsNumber; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithMul: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (m_graph.mulShouldSpeculateInteger(node)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); } // As soon as a multiply happens, we can easily end up in the part // of the double domain where the point at which you do truncation // can change the outcome. So, ArithMul always checks for overflow // no matter what, and always forces its inputs to check as well. flags |= NodeUsedAsNumber | NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithDiv: { SpeculatedType left = m_graph[node.child1()].prediction(); SpeculatedType right = m_graph[node.child2()].prediction(); if (left && right) { if (isInt32Speculation(mergeSpeculations(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags())) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(SpecDouble); } // As soon as a multiply happens, we can easily end up in the part // of the double domain where the point at which you do truncation // can change the outcome. So, ArithMul always checks for overflow // no matter what, and always forces its inputs to check as well. flags |= NodeUsedAsNumber | NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); changed |= m_graph[node.child2()].mergeFlags(flags); break; } case ArithSqrt: { changed |= setPrediction(SpecDouble); changed |= m_graph[node.child1()].mergeFlags(flags | NodeUsedAsValue); break; } case ArithAbs: { SpeculatedType child = m_graph[node.child1()].prediction(); if (nodeCanSpeculateInteger(node.arithNodeFlags())) changed |= mergePrediction(child); else changed |= setPrediction(speculatedDoubleTypeForPrediction(child)); flags &= ~NodeNeedsNegZero; changed |= m_graph[node.child1()].mergeFlags(flags); break; } case LogicalNot: case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareEq: case CompareStrictEq: case InstanceOf: case IsUndefined: case IsBoolean: case IsNumber: case IsString: case IsObject: case IsFunction: { changed |= setPrediction(SpecBoolean); changed |= mergeDefaultFlags(node); break; } case GetById: { changed |= mergePrediction(node.getHeapPrediction()); changed |= mergeDefaultFlags(node); break; } case GetByIdFlush: changed |= mergePrediction(node.getHeapPrediction()); changed |= mergeDefaultFlags(node); break; case GetByVal: { if (m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array()) changed |= mergePrediction(SpecDouble); else changed |= mergePrediction(node.getHeapPrediction()); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); break; } case GetMyArgumentByValSafe: { changed |= mergePrediction(node.getHeapPrediction()); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); break; } case GetMyArgumentsLengthSafe: { changed |= setPrediction(SpecInt32); break; } case GetScopeRegisters: case GetButterfly: case GetIndexedPropertyStorage: case AllocatePropertyStorage: case ReallocatePropertyStorage: { changed |= setPrediction(SpecOther); changed |= mergeDefaultFlags(node); break; } case GetByOffset: { changed |= mergePrediction(node.getHeapPrediction()); changed |= mergeDefaultFlags(node); break; } case Call: case Construct: { changed |= mergePrediction(node.getHeapPrediction()); for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); ++childIdx) { Edge edge = m_graph.m_varArgChildren[childIdx]; changed |= m_graph[edge].mergeFlags(NodeUsedAsValue); } break; } case ConvertThis: { SpeculatedType prediction = m_graph[node.child1()].prediction(); if (prediction) { if (prediction & ~SpecObjectMask) { prediction &= SpecObjectMask; prediction = mergeSpeculations(prediction, SpecObjectOther); } changed |= mergePrediction(prediction); } changed |= mergeDefaultFlags(node); break; } case GetGlobalVar: { changed |= mergePrediction(node.getHeapPrediction()); break; } case PutGlobalVar: case PutGlobalVarCheck: { changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); break; } case GetScopedVar: case Resolve: case ResolveBase: case ResolveBaseStrictPut: case ResolveGlobal: { SpeculatedType prediction = node.getHeapPrediction(); changed |= mergePrediction(prediction); break; } case GetScope: { changed |= setPrediction(SpecCellOther); break; } case GetCallee: { changed |= setPrediction(SpecFunction); break; } case CreateThis: case NewObject: { changed |= setPrediction(SpecFinalObject); changed |= mergeDefaultFlags(node); break; } case NewArray: { changed |= setPrediction(SpecArray); for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); ++childIdx) { Edge edge = m_graph.m_varArgChildren[childIdx]; changed |= m_graph[edge].mergeFlags(NodeUsedAsValue); } break; } case NewArrayWithSize: { changed |= setPrediction(SpecArray); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); break; } case NewArrayBuffer: { changed |= setPrediction(SpecArray); break; } case NewRegexp: { changed |= setPrediction(SpecObjectOther); break; } case StringCharAt: { changed |= setPrediction(SpecString); changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); break; } case StrCat: { changed |= setPrediction(SpecString); for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); ++childIdx) changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeFlags(NodeUsedAsNumber); break; } case ToPrimitive: { SpeculatedType child = m_graph[node.child1()].prediction(); if (child) { if (isObjectSpeculation(child)) { // I'd love to fold this case into the case below, but I can't, because // removing SpecObjectMask from something that only has an object // prediction and nothing else means we have an ill-formed SpeculatedType // (strong predict-none). This should be killed once we remove all traces // of static (aka weak) predictions. changed |= mergePrediction(SpecString); } else if (child & SpecObjectMask) { // Objects get turned into strings. So if the input has hints of objectness, // the output will have hinsts of stringiness. changed |= mergePrediction( mergeSpeculations(child & ~SpecObjectMask, SpecString)); } else changed |= mergePrediction(child); } changed |= m_graph[node.child1()].mergeFlags(flags); break; } case CreateActivation: { changed |= setPrediction(SpecObjectOther); break; } case CreateArguments: { // At this stage we don't try to predict whether the arguments are ours or // someone else's. We could, but we don't, yet. changed |= setPrediction(SpecArguments); break; } case NewFunction: case NewFunctionNoCheck: case NewFunctionExpression: { changed |= setPrediction(SpecFunction); break; } case PutByValAlias: case GetArrayLength: case Int32ToDouble: case DoubleAsInt32: case GetLocalUnlinked: case GetMyArgumentsLength: case GetMyArgumentByVal: case PhantomPutStructure: case PhantomArguments: case CheckArray: case Arrayify: { // This node should never be visible at this stage of compilation. It is // inserted by fixup(), which follows this phase. ASSERT_NOT_REACHED(); break; } case PutByVal: changed |= m_graph[m_graph.varArgChild(node, 0)].mergeFlags(NodeUsedAsValue); changed |= m_graph[m_graph.varArgChild(node, 1)].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt); changed |= m_graph[m_graph.varArgChild(node, 2)].mergeFlags(NodeUsedAsValue); break; case PutScopedVar: case Return: case Throw: changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); break; case PutById: case PutByIdDirect: changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsValue); break; case PutByOffset: changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue); changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue); break; case Phi: break; #ifndef NDEBUG // These get ignored because they don't return anything. case DFG::Jump: case Branch: case Breakpoint: case CheckHasInstance: case ThrowReferenceError: case ForceOSRExit: case SetArgument: case CheckStructure: case ForwardCheckStructure: case StructureTransitionWatchpoint: case ForwardStructureTransitionWatchpoint: case CheckFunction: case PutStructure: case TearOffActivation: case TearOffArguments: case CheckNumber: case CheckArgumentsNotCreated: case GlobalVarWatchpoint: case GarbageValue: changed |= mergeDefaultFlags(node); break; // These gets ignored because it doesn't do anything. case Phantom: case InlineStart: case Nop: break; case LastNodeType: ASSERT_NOT_REACHED(); break; #else default: changed |= mergeDefaultFlags(node); break; #endif } #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog("%s\n", speculationToString(m_graph[m_compileIndex].prediction())); #endif m_changed |= changed; }
bool run() { RELEASE_ASSERT(m_graph.m_plan.mode == FTLForOSREntryMode); RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex; RELEASE_ASSERT(bytecodeIndex); RELEASE_ASSERT(bytecodeIndex != UINT_MAX); // Needed by createPreHeader(). m_graph.ensureDominators(); CodeBlock* baseline = m_graph.m_profiledBlock; BasicBlock* target = 0; for (unsigned blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; unsigned nodeIndex = 0; Node* firstNode = block->at(0); while (firstNode->isSemanticallySkippable()) firstNode = block->at(++nodeIndex); if (firstNode->op() == LoopHint && firstNode->origin.semantic == CodeOrigin(bytecodeIndex)) { target = block; break; } } if (!target) { // This is a terrible outcome. It shouldn't often happen but it might // happen and so we should defend against it. If it happens, then this // compilation is a failure. return false; } BlockInsertionSet insertionSet(m_graph); // We say that the execution count of the entry block is 1, because we know for sure // that this must be the case. Under our definition of executionCount, "1" means "once // per invocation". We could have said NaN here, since that would ask any clients of // executionCount to use best judgement - but that seems unnecessary since we know for // sure what the executionCount should be in this case. BasicBlock* newRoot = insertionSet.insert(0, 1); // We'd really like to use an unset origin, but ThreadedCPS won't allow that. NodeOrigin origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), false); Vector<Node*> locals(baseline->m_numCalleeLocals); for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); locals[local] = newRoot->appendNode( m_graph, variable->prediction(), ExtractOSREntryLocal, origin, OpInfo(variable->local().offset())); newRoot->appendNode( m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()), Edge(locals[local])); } // Now use the origin of the target, since it's not OK to exit, and we will probably hoist // type checks to here. origin = target->at(0)->origin; for (int argument = 0; argument < baseline->numParameters(); ++argument) { Node* oldNode = target->variablesAtHead.argument(argument); if (!oldNode) { // Just for sanity, always have a SetArgument even if it's not needed. oldNode = m_graph.m_arguments[argument]; } Node* node = newRoot->appendNode( m_graph, SpecNone, SetArgument, origin, OpInfo(oldNode->variableAccessData())); m_graph.m_arguments[argument] = node; } for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); Node* node = locals[local]; newRoot->appendNode( m_graph, SpecNone, SetLocal, origin, OpInfo(variable), Edge(node)); } newRoot->appendNode( m_graph, SpecNone, Jump, origin, OpInfo(createPreHeader(m_graph, insertionSet, target))); insertionSet.execute(); m_graph.resetReachability(); m_graph.killUnreachableBlocks(); return true; }
bool run() { for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; variable->clearVotes(); } // Identify the set of variables that are always subject to the same structure // checks. For now, only consider monomorphic structure checks (one structure). for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; VariableAccessData* variable = child.variableAccessData(); variable->vote(VoteStructureCheck); if (variable->isCaptured() || variable->structureCheckHoistingFailed()) break; if (!isCellSpeculation(variable->prediction())) break; noticeStructureCheck(variable, node.structureSet()); break; } case ForwardCheckStructure: case ForwardStructureTransitionWatchpoint: // We currently rely on the fact that we're the only ones who would // insert this node. ASSERT_NOT_REACHED(); break; case GetByOffset: case PutByOffset: case PutStructure: case StructureTransitionWatchpoint: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetPropertyStorage: case GetByVal: case PutByVal: case PutByValAlias: case GetArrayLength: case CheckArray: case GetIndexedPropertyStorage: case Phantom: // Don't count these uses. break; default: m_graph.vote(node, VoteOther); break; } } } // Disable structure hoisting on variables that appear to mostly be used in // contexts where it doesn't make sense. for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->voteRatio() >= Options::structureCheckVoteRatioForHoisting()) continue; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog("Zeroing the structure to hoist for %s because the ratio is %lf.\n", m_graph.nameOfVariableAccessData(variable), variable->voteRatio()); #endif iter->second.m_structure = 0; } // Identify the set of variables that are live across a structure clobber. Operands<VariableAccessData*> live( m_graph.m_blocks[0]->variablesAtTail.numberOfArguments(), m_graph.m_blocks[0]->variablesAtTail.numberOfLocals()); for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; ASSERT(live.numberOfArguments() == block->variablesAtTail.numberOfArguments()); ASSERT(live.numberOfLocals() == block->variablesAtTail.numberOfLocals()); for (unsigned i = live.size(); i--;) { NodeIndex indexAtTail = block->variablesAtTail[i]; VariableAccessData* variable; if (indexAtTail == NoNode) variable = 0; else variable = m_graph[indexAtTail].variableAccessData(); live[i] = variable; } for (unsigned indexInBlock = block->size(); indexInBlock--;) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case GetLocal: case Flush: // This is a birth. live.operand(node.local()) = node.variableAccessData(); break; case SetLocal: case SetArgument: ASSERT(live.operand(node.local())); // Must be live. ASSERT(live.operand(node.local()) == node.variableAccessData()); // Must have the variable we expected. // This is a death. live.operand(node.local()) = 0; break; // Use the CFA's notion of what clobbers the world. case ValueAdd: if (m_graph.addShouldSpeculateInteger(node)) break; if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; clobber(live); break; case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareEq: { Node& left = m_graph[node.child1()]; Node& right = m_graph[node.child2()]; if (Node::shouldSpeculateInteger(left, right)) break; if (Node::shouldSpeculateNumber(left, right)) break; if (node.op() == CompareEq) { if ((m_graph.isConstant(node.child1().index()) && m_graph.valueOfJSConstant(node.child1().index()).isNull()) || (m_graph.isConstant(node.child2().index()) && m_graph.valueOfJSConstant(node.child2().index()).isNull())) break; if (Node::shouldSpeculateFinalObject(left, right)) break; if (Node::shouldSpeculateArray(left, right)) break; if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) break; if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) break; if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) break; if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) break; } clobber(live); break; } case GetByVal: case PutByVal: case PutByValAlias: if (m_graph.byValIsPure(node)) break; clobber(live); break; case GetMyArgumentsLengthSafe: case GetMyArgumentByValSafe: case GetById: case GetByIdFlush: case PutStructure: case PhantomPutStructure: case PutById: case PutByIdDirect: case Call: case Construct: case Resolve: case ResolveBase: case ResolveBaseStrictPut: case ResolveGlobal: clobber(live); break; default: ASSERT(node.op() != Phi); break; } } } bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { if (!it->second.m_structure) { dataLog("Not hoisting checks for %s because of heuristics.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } if (it->second.m_isClobbered && !it->second.m_structure->transitionWatchpointSetIsStillValid()) { dataLog("Not hoisting checks for %s because the structure is clobbered and has an invalid watchpoint set.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } dataLog("Hoisting checks for %s\n", m_graph.nameOfVariableAccessData(it->first)); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) // Make changes: // 1) If a variable's live range does not span a clobber, then inject structure // checks before the SetLocal. // 2) If a variable's live range spans a clobber but is watchpointable, then // inject structure checks before the SetLocal and replace all other structure // checks on that variable with structure transition watchpoints. InsertionSet<NodeIndex> insertionSet; for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. if (!node.shouldGenerate()) continue; switch (node.op()) { case SetArgument: { ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its // variable-at-tail, then replace it with this GetLocal. VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; node.ref(); CodeOrigin codeOrigin = node.codeOrigin; Node getLocal(GetLocal, codeOrigin, OpInfo(variable), nodeIndex); getLocal.predict(variable->prediction()); getLocal.ref(); NodeIndex getLocalIndex = m_graph.size(); m_graph.append(getLocal); insertionSet.append(indexInBlock + 1, getLocalIndex); Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), getLocalIndex); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock + 1, checkStructureIndex); if (block->variablesAtTail.operand(variable->local()) == nodeIndex) block->variablesAtTail.operand(variable->local()) = getLocalIndex; m_graph.substituteGetLocal(*block, indexInBlock, variable, getLocalIndex); changed = true; break; } case SetLocal: { VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; // First insert a dead SetLocal to tell OSR that the child's value should // be dropped into this bytecode variable if the CheckStructure decides // to exit. CodeOrigin codeOrigin = node.codeOrigin; NodeIndex child1 = node.child1().index(); Node setLocal(SetLocal, codeOrigin, OpInfo(variable), child1); NodeIndex setLocalIndex = m_graph.size(); m_graph.append(setLocal); insertionSet.append(indexInBlock, setLocalIndex); m_graph[child1].ref(); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. Node checkStructure(ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), child1); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock, checkStructureIndex); changed = true; break; } case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(child.variableAccessData()); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (!iter->second.m_isClobbered) { node.setOpAndDefaultFlags(Phantom); ASSERT(node.refCount() == 1); break; } if (!iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; ASSERT(iter->second.m_structure == node.structureSet().singletonStructure()); node.convertToStructureTransitionWatchpoint(); changed = true; break; } default: break; } } insertionSet.execute(*block); } return changed; }
bool run() { ASSERT(m_graph.m_form == ThreadedCPS); for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; variable->clearVotes(); } // Identify the set of variables that are always subject to the same structure // checks. For now, only consider monomorphic structure checks (one structure). for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); switch (node->op()) { case CheckStructure: case StructureTransitionWatchpoint: { Node* child = node->child1().node(); if (child->op() != GetLocal) break; VariableAccessData* variable = child->variableAccessData(); variable->vote(VoteStructureCheck); if (!shouldConsiderForHoisting(variable)) break; noticeStructureCheck(variable, node->structureSet()); break; } case ForwardCheckStructure: case ForwardStructureTransitionWatchpoint: // We currently rely on the fact that we're the only ones who would // insert this node. RELEASE_ASSERT_NOT_REACHED(); break; case GetByOffset: case PutByOffset: case PutStructure: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetButterfly: case GetByVal: case PutByVal: case PutByValAlias: case GetArrayLength: case CheckArray: case GetIndexedPropertyStorage: case Phantom: // Don't count these uses. break; case ArrayifyToStructure: case Arrayify: if (node->arrayMode().conversion() == Array::RageConvert) { // Rage conversion changes structures. We should avoid tying to do // any kind of hoisting when rage conversion is in play. Node* child = node->child1().node(); if (child->op() != GetLocal) break; VariableAccessData* variable = child->variableAccessData(); variable->vote(VoteOther); if (!shouldConsiderForHoisting(variable)) break; noticeStructureCheck(variable, 0); } break; case SetLocal: { // Find all uses of the source of the SetLocal. If any of them are a // kind of CheckStructure, then we should notice them to ensure that // we're not hoisting a check that would contravene checks that are // already being performed. VariableAccessData* variable = node->variableAccessData(); if (!shouldConsiderForHoisting(variable)) break; Node* source = node->child1().node(); for (unsigned subIndexInBlock = 0; subIndexInBlock < block->size(); ++subIndexInBlock) { Node* subNode = block->at(subIndexInBlock); switch (subNode->op()) { case CheckStructure: { if (subNode->child1() != source) break; noticeStructureCheck(variable, subNode->structureSet()); break; } case StructureTransitionWatchpoint: { if (subNode->child1() != source) break; noticeStructureCheck(variable, subNode->structure()); break; } default: break; } } m_graph.voteChildren(node, VoteOther); break; } case GarbageValue: break; default: m_graph.voteChildren(node, VoteOther); break; } } } // Disable structure hoisting on variables that appear to mostly be used in // contexts where it doesn't make sense. for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->voteRatio() >= Options::structureCheckVoteRatioForHoisting()) continue; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the ratio is ", variable->voteRatio(), ".\n"); #endif iter->value.m_structure = 0; } // Disable structure check hoisting for variables that cross the OSR entry that // we're currently taking, and where the value currently does not have the // structure we want. for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; ASSERT(block->isReachable); if (!block->isOSRTarget) continue; if (block->bytecodeBegin != m_graph.m_osrEntryBytecodeIndex) continue; for (size_t i = 0; i < m_graph.m_mustHandleValues.size(); ++i) { int operand = m_graph.m_mustHandleValues.operandForIndex(i); Node* node = block->variablesAtHead.operand(operand); if (!node) continue; VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; if (!iter->value.m_structure) continue; JSValue value = m_graph.m_mustHandleValues[i]; if (!value || !value.isCell()) { #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the OSR entry value is not a cell: ", value, ".\n"); #endif iter->value.m_structure = 0; continue; } if (value.asCell()->structure() != iter->value.m_structure) { #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog( "Zeroing the structure to hoist for ", VariableAccessDataDump(m_graph, variable), " because the OSR entry value has structure ", RawPointer(value.asCell()->structure()), " and we wanted ", RawPointer(iter->value.m_structure), ".\n"); #endif iter->value.m_structure = 0; continue; } } } bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { if (!it->value.m_structure) { dataLog( "Not hoisting checks for ", VariableAccessDataDump(m_graph, it->key), " because of heuristics.\n"); continue; } dataLog("Hoisting checks for ", VariableAccessDataDump(m_graph, it->key), "\n"); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) // Place CheckStructure's at SetLocal sites. InsertionSet insertionSet(m_graph); for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. switch (node->op()) { case SetArgument: { ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its // variable-at-tail, then replace it with this GetLocal. VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->value.m_structure) break; CodeOrigin codeOrigin = node->codeOrigin; Node* getLocal = insertionSet.insertNode( indexInBlock + 1, variable->prediction(), GetLocal, codeOrigin, OpInfo(variable), Edge(node)); insertionSet.insertNode( indexInBlock + 1, SpecNone, CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(getLocal, CellUse)); if (block->variablesAtTail.operand(variable->local()) == node) block->variablesAtTail.operand(variable->local()) = getLocal; m_graph.substituteGetLocal(*block, indexInBlock, variable, getLocal); changed = true; break; } case SetLocal: { VariableAccessData* variable = node->variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->value.m_structure) break; // First insert a dead SetLocal to tell OSR that the child's value should // be dropped into this bytecode variable if the CheckStructure decides // to exit. CodeOrigin codeOrigin = node->codeOrigin; Edge child1 = node->child1(); insertionSet.insertNode( indexInBlock, SpecNone, SetLocal, codeOrigin, OpInfo(variable), child1); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. insertionSet.insertNode( indexInBlock, SpecNone, ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(child1.node(), CellUse)); changed = true; break; } default: break; } } insertionSet.execute(block); } return changed; }
bool run() { RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); m_graph.clearReplacements(); m_graph.m_dominators.computeIfNecessary(m_graph); if (verbose) { dataLog("Graph before SSA transformation:\n"); m_graph.dump(); } // Create a SSACalculator::Variable for every root VariableAccessData. for (VariableAccessData& variable : m_graph.m_variableAccessData) { if (!variable.isRoot() || variable.isCaptured()) continue; SSACalculator::Variable* ssaVariable = m_calculator.newVariable(); ASSERT(ssaVariable->index() == m_variableForSSAIndex.size()); m_variableForSSAIndex.append(&variable); m_ssaVariableForVariable.add(&variable, ssaVariable); } // Find all SetLocals and create Defs for them. We handle SetArgument by creating a // GetLocal, and recording the flush format. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; // Must process the block in forward direction because we want to see the last // assignment for every local. for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); if (node->op() != SetLocal && node->op() != SetArgument) continue; VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) continue; Node* childNode; if (node->op() == SetLocal) childNode = node->child1().node(); else { ASSERT(node->op() == SetArgument); childNode = m_insertionSet.insertNode( nodeIndex, node->variableAccessData()->prediction(), GetStack, node->origin, OpInfo(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat()))); if (!ASSERT_DISABLED) m_argumentGetters.add(childNode); m_argumentMapping.add(node, childNode); } m_calculator.newDef( m_ssaVariableForVariable.get(variable), block, childNode); } m_insertionSet.execute(block); } // Decide where Phis are to be inserted. This creates the Phi's but doesn't insert them // yet. We will later know where to insert them because SSACalculator is such a bro. m_calculator.computePhis( [&] (SSACalculator::Variable* ssaVariable, BasicBlock* block) -> Node* { VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()]; // Prune by liveness. This doesn't buy us much other than compile times. Node* headNode = block->variablesAtHead.operand(variable->local()); if (!headNode) return nullptr; // There is the possibiltiy of "rebirths". The SSA calculator will already prune // rebirths for the same VariableAccessData. But it will not be able to prune // rebirths that arose from the same local variable number but a different // VariableAccessData. We do that pruning here. // // Here's an example of a rebirth that this would catch: // // var x; // if (foo) { // if (bar) { // x = 42; // } else { // x = 43; // } // print(x); // x = 44; // } else { // x = 45; // } // print(x); // Without this check, we'd have a Phi for x = 42|43 here. // // FIXME: Consider feeding local variable numbers, not VariableAccessData*'s, as // the "variables" for SSACalculator. That would allow us to eliminate this // special case. // https://bugs.webkit.org/show_bug.cgi?id=136641 if (headNode->variableAccessData() != variable) return nullptr; Node* phiNode = m_graph.addNode( variable->prediction(), Phi, NodeOrigin()); FlushFormat format = variable->flushFormat(); NodeFlags result = resultFor(format); phiNode->mergeFlags(result); return phiNode; }); if (verbose) { dataLog("Computed Phis, about to transform the graph.\n"); dataLog("\n"); dataLog("Graph:\n"); m_graph.dump(); dataLog("\n"); dataLog("Mappings:\n"); for (unsigned i = 0; i < m_variableForSSAIndex.size(); ++i) dataLog(" ", i, ": ", VariableAccessDataDump(m_graph, m_variableForSSAIndex[i]), "\n"); dataLog("\n"); dataLog("SSA calculator: ", m_calculator, "\n"); } // Do the bulk of the SSA conversion. For each block, this tracks the operand->Node // mapping based on a combination of what the SSACalculator tells us, and us walking over // the block in forward order. We use our own data structure, valueForOperand, for // determining the local mapping, but we rely on SSACalculator for the non-local mapping. // // This does three things at once: // // - Inserts the Phis in all of the places where they need to go. We've already created // them and they are accounted for in the SSACalculator's data structures, but we // haven't inserted them yet, mostly because we want to insert all of a block's Phis in // one go to amortize the cost of node insertion. // // - Create and insert Upsilons. // // - Convert all of the preexisting SSA nodes (other than the old CPS Phi nodes) into SSA // form by replacing as follows: // // - MovHint has KillLocal prepended to it. // // - GetLocal over captured variables lose their phis and become GetStack. // // - GetLocal over uncaptured variables die and get replaced with references to the node // specified by valueForOperand. // // - SetLocal turns into PutStack if it's flushed, or turns into a Check otherwise. // // - Flush loses its children and turns into a Phantom. // // - PhantomLocal becomes Phantom, and its child is whatever is specified by // valueForOperand. // // - SetArgument is removed. Note that GetStack nodes have already been inserted. Operands<Node*> valueForOperand(OperandsLike, m_graph.block(0)->variablesAtHead); for (BasicBlock* block : m_graph.blocksInPreOrder()) { valueForOperand.clear(); // CPS will claim that the root block has all arguments live. But we have already done // the first step of SSA conversion: argument locals are no longer live at head; // instead we have GetStack nodes for extracting the values of arguments. So, we // skip the at-head available value calculation for the root block. if (block != m_graph.block(0)) { for (size_t i = valueForOperand.size(); i--;) { Node* nodeAtHead = block->variablesAtHead[i]; if (!nodeAtHead) continue; VariableAccessData* variable = nodeAtHead->variableAccessData(); if (variable->isCaptured()) continue; if (verbose) dataLog("Considering live variable ", VariableAccessDataDump(m_graph, variable), " at head of block ", *block, "\n"); SSACalculator::Variable* ssaVariable = m_ssaVariableForVariable.get(variable); SSACalculator::Def* def = m_calculator.reachingDefAtHead(block, ssaVariable); if (!def) { // If we are required to insert a Phi, then we won't have a reaching def // at head. continue; } Node* node = def->value(); if (node->replacement) { // This will occur when a SetLocal had a GetLocal as its source. The // GetLocal would get replaced with an actual SSA value by the time we get // here. Note that the SSA value with which the GetLocal got replaced // would not in turn have a replacement. node = node->replacement; ASSERT(!node->replacement); } if (verbose) dataLog("Mapping: ", VirtualRegister(valueForOperand.operandForIndex(i)), " -> ", node, "\n"); valueForOperand[i] = node; } } // Insert Phis by asking the calculator what phis there are in this block. Also update // valueForOperand with those Phis. For Phis associated with variables that are not // flushed, we also insert a MovHint. size_t phiInsertionPoint = 0; for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(block)) { VariableAccessData* variable = m_variableForSSAIndex[phiDef->variable()->index()]; m_insertionSet.insert(phiInsertionPoint, phiDef->value()); valueForOperand.operand(variable->local()) = phiDef->value(); m_insertionSet.insertNode( phiInsertionPoint, SpecNone, MovHint, NodeOrigin(), OpInfo(variable->local().offset()), phiDef->value()->defaultEdge()); } for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); if (verbose) { dataLog("Processing node ", node, ":\n"); m_graph.dump(WTF::dataFile(), " ", node); } m_graph.performSubstitution(node); switch (node->op()) { case MovHint: { m_insertionSet.insertNode( nodeIndex, SpecNone, KillStack, node->origin, OpInfo(node->unlinkedLocal().offset())); break; } case SetLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured() || !!(node->flags() & NodeIsFlushed)) { node->convertToPutStack( m_graph.m_stackAccessData.add( variable->local(), variable->flushFormat())); } else node->setOpAndDefaultFlags(Check); if (!variable->isCaptured()) { if (verbose) dataLog("Mapping: ", variable->local(), " -> ", node->child1().node(), "\n"); valueForOperand.operand(variable->local()) = node->child1().node(); } break; } case GetStack: { ASSERT(m_argumentGetters.contains(node)); valueForOperand.operand(node->stackAccessData()->local) = node; break; } case GetLocal: { VariableAccessData* variable = node->variableAccessData(); node->children.reset(); if (variable->isCaptured()) { node->convertToGetStack(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat())); break; } node->convertToPhantom(); if (verbose) dataLog("Replacing node ", node, " with ", valueForOperand.operand(variable->local()), "\n"); node->replacement = valueForOperand.operand(variable->local()); break; } case Flush: { node->children.reset(); node->convertToPhantom(); break; } case PhantomLocal: { ASSERT(node->child1().useKind() == UntypedUse); VariableAccessData* variable = node->variableAccessData(); if (variable->isCaptured()) { // This is a fun case. We could have a captured variable that had some // or all of its uses strength reduced to phantoms rather than flushes. // SSA conversion will currently still treat it as flushed, in the sense // that it will just keep the SetLocal. Therefore, there is nothing that // needs to be done here: we don't need to also keep the source value // alive. And even if we did want to keep the source value alive, we // wouldn't be able to, because the variablesAtHead value for a captured // local wouldn't have been computed by the Phi reduction algorithm // above. node->children.reset(); } else node->child1() = valueForOperand.operand(variable->local())->defaultEdge(); node->convertToPhantom(); break; } case SetArgument: { node->convertToPhantom(); break; } default: break; } } // We want to insert Upsilons just before the end of the block. On the surface this // seems dangerous because the Upsilon will have a checking UseKind. But, we will not // actually be performing the check at the point of the Upsilon; the check will // already have been performed at the point where the original SetLocal was. size_t upsilonInsertionPoint = block->size() - 1; NodeOrigin upsilonOrigin = block->last()->origin; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successorBlock = block->successor(successorIndex); for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(successorBlock)) { Node* phiNode = phiDef->value(); SSACalculator::Variable* ssaVariable = phiDef->variable(); VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()]; FlushFormat format = variable->flushFormat(); UseKind useKind = useKindFor(format); m_insertionSet.insertNode( upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, OpInfo(phiNode), Edge( valueForOperand.operand(variable->local()), useKind)); } } m_insertionSet.execute(block); } // Free all CPS phis and reset variables vectors. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) m_graph.m_allocator.free(block->phis[phiIndex]); block->phis.clear(); block->variablesAtHead.clear(); block->variablesAtTail.clear(); block->valuesAtHead.clear(); block->valuesAtHead.clear(); block->ssa = std::make_unique<BasicBlock::SSAData>(block); } m_graph.m_argumentFormats.resize(m_graph.m_arguments.size()); for (unsigned i = m_graph.m_arguments.size(); i--;) { FlushFormat format = FlushedJSValue; Node* node = m_argumentMapping.get(m_graph.m_arguments[i]); // m_argumentMapping.get could return null for a captured local. That's fine. We only // track the argument loads of those arguments for which we speculate type. We don't // speculate type for captured arguments. if (node) format = node->stackAccessData()->format; m_graph.m_argumentFormats[i] = format; m_graph.m_arguments[i] = node; // Record the load that loads the arguments for the benefit of exit profiling. } m_graph.m_form = SSA; if (verbose) { dataLog("Graph after SSA transformation:\n"); m_graph.dump(); } return true; }
bool run() { RELEASE_ASSERT(m_graph.m_plan.mode == FTLForOSREntryMode); RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex; RELEASE_ASSERT(bytecodeIndex); RELEASE_ASSERT(bytecodeIndex != UINT_MAX); // Needed by createPreHeader(). m_graph.m_dominators.computeIfNecessary(m_graph); CodeBlock* baseline = m_graph.m_profiledBlock; BasicBlock* target = 0; for (unsigned blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; unsigned nodeIndex = 0; Node* firstNode = block->at(0); while (firstNode->isSemanticallySkippable()) firstNode = block->at(++nodeIndex); if (firstNode->op() == LoopHint && firstNode->origin.semantic == CodeOrigin(bytecodeIndex)) { target = block; break; } } if (!target) { // This is a terrible outcome. It shouldn't often happen but it might // happen and so we should defend against it. If it happens, then this // compilation is a failure. return false; } BlockInsertionSet insertionSet(m_graph); BasicBlock* newRoot = insertionSet.insert(0, QNaN); NodeOrigin origin = target->at(0)->origin; Vector<Node*> locals(baseline->m_numCalleeRegisters); for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); locals[local] = newRoot->appendNode( m_graph, variable->prediction(), ExtractOSREntryLocal, origin, OpInfo(variable->local().offset())); newRoot->appendNode( m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()), Edge(locals[local])); } for (int argument = 0; argument < baseline->numParameters(); ++argument) { Node* oldNode = target->variablesAtHead.argument(argument); if (!oldNode) { // Just for sanity, always have a SetArgument even if it's not needed. oldNode = m_graph.m_arguments[argument]; } Node* node = newRoot->appendNode( m_graph, SpecNone, SetArgument, origin, OpInfo(oldNode->variableAccessData())); m_graph.m_arguments[argument] = node; } for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); Node* node = locals[local]; newRoot->appendNode( m_graph, SpecNone, SetLocal, origin, OpInfo(variable), Edge(node)); } newRoot->appendNode( m_graph, SpecNone, Jump, origin, OpInfo(createPreHeader(m_graph, insertionSet, target))); insertionSet.execute(); m_graph.resetReachability(); m_graph.killUnreachableBlocks(); return true; }