void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; result.m_codeBlock = m_codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size()); for (unsigned i = m_basicBlocks.size(); i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->bytecodeOffsets().size(); i--;) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; stepOverInstruction( m_codeBlock, block, m_basicBlocks, bytecodeOffset, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; CodeBlock* codeBlock = m_graph.codeBlock(); result.m_codeBlock = codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(codeBlock->instructions().size()); for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) { if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->offsets().size(); i--;) { unsigned bytecodeOffset = block->offsets()[i]; stepOverInstruction( m_graph, bytecodeOffset, out, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
void BytecodeLivenessAnalysis::runLivenessFixpoint() { UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock(); unsigned numberOfVariables = unlinkedCodeBlock->m_numCalleeLocals; for (unsigned i = 0; i < m_basicBlocks.size(); i++) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); block->in().resize(numberOfVariables); block->out().resize(numberOfVariables); } bool changed; m_basicBlocks.last()->in().clearAll(); m_basicBlocks.last()->out().clearAll(); FastBitVector newOut; newOut.resize(m_basicBlocks.last()->out().numBits()); do { changed = false; for (unsigned i = m_basicBlocks.size() - 1; i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); newOut.clearAll(); for (unsigned j = 0; j < block->successors().size(); j++) newOut.merge(block->successors()[j]->in()); bool outDidChange = block->out().setAndCheck(newOut); computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks); changed |= outDidChange; } } while (changed); }
static void setForOperand(CodeBlock* codeBlock, FastBitVector& bits, int operand) { ASSERT(isValidRegisterForLiveness(codeBlock, operand)); VirtualRegister virtualReg(operand); if (virtualReg.offset() > captureStart(codeBlock)) bits.set(virtualReg.toLocal()); else bits.set(virtualReg.toLocal() - numberOfCapturedVariables(codeBlock)); }
bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset) { int numCapturedVars = numberOfCapturedVariables(m_codeBlock); if (VirtualRegister(operand).isArgument()) return true; if (operand <= captureStart(m_codeBlock) && operand > captureEnd(m_codeBlock)) return true; FastBitVector result; getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, result); return result.get(operand - numCapturedVars); }
static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& out) { stepOverInstruction( codeBlock, block, basicBlocks, bytecodeOffset, [&] (unsigned bitIndex) { // This is the use functor, so we set the bit. out.set(bitIndex); }, [&] (unsigned bitIndex) { // This is the def functor, so we clear the bit. out.clear(bitIndex); }); }
void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) { BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset); ASSERT(block); ASSERT(!block->isEntryBlock()); ASSERT(!block->isExitBlock()); result.resize(block->out().numBits()); computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result); }
void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) { BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset); ASSERT(block); ASSERT(!block->isEntryBlock()); ASSERT(!block->isExitBlock()); result.resize(block->out().numBits()); computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result); }
void Plan::cleanMustHandleValuesIfNecessary() { LockHolder locker(mustHandleValueCleaningLock); if (!mustHandleValuesMayIncludeGarbage) return; mustHandleValuesMayIncludeGarbage = false; if (!codeBlock) return; if (!mustHandleValues.numberOfLocals()) return; FastBitVector liveness = codeBlock->alternative()->livenessAnalysis().getLivenessInfoAtBytecodeOffset(osrEntryBytecodeIndex); for (unsigned local = mustHandleValues.numberOfLocals(); local--;) { if (!liveness.get(local)) mustHandleValues.local(local) = jsUndefined(); } }
static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned targetOffset, FastBitVector& result) { ASSERT(!block->isExitBlock()); ASSERT(!block->isEntryBlock()); FastBitVector out = block->out(); for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; if (targetOffset > bytecodeOffset) break; stepOverInstruction(codeBlock, block, basicBlocks, bytecodeOffset, out); } result.set(out); }
void BytecodeLivenessAnalysis::dumpResults() { Interpreter* interpreter = m_codeBlock->vm()->interpreter; Instruction* instructionsBegin = m_codeBlock->instructions().begin(); for (unsigned i = 0; i < m_basicBlocks.size(); i++) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength()); dataLogF("Predecessors: "); for (unsigned j = 0; j < block->predecessors().size(); j++) { BytecodeBasicBlock* predecessor = block->predecessors()[j]; dataLogF("%p ", predecessor); } dataLogF("\n"); dataLogF("Successors: "); for (unsigned j = 0; j < block->successors().size(); j++) { BytecodeBasicBlock* successor = block->successors()[j]; dataLogF("%p ", successor); } dataLogF("\n"); if (block->isEntryBlock()) { dataLogF("Entry block %p\n", block); continue; } if (block->isExitBlock()) { dataLogF("Exit block: %p\n", block); continue; } for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; dataLogF("Live variables: "); FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset); for (unsigned j = 0; j < liveBefore.numBits(); j++) { if (liveBefore.get(j)) dataLogF("%u ", j); } dataLogF("\n"); m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction); OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); unsigned opcodeLength = opcodeLengths[opcodeID]; bytecodeOffset += opcodeLength; } dataLogF("Live variables: "); FastBitVector liveAfter = block->out(); for (unsigned j = 0; j < liveAfter.numBits(); j++) { if (liveAfter.get(j)) dataLogF("%u ", j); } dataLogF("\n"); } }
void NaturalLoops::compute(Graph& graph) { // Implement the classic dominator-based natural loop finder. The first // step is to find all control flow edges A -> B where B dominates A. // Then B is a loop header and A is a backward branching block. We will // then accumulate, for each loop header, multiple backward branching // blocks. Then we backwards graph search from the backward branching // blocks to their loop headers, which gives us all of the blocks in the // loop body. static const bool verbose = false; graph.m_dominators.computeIfNecessary(graph); if (verbose) { dataLog("Dominators:\n"); graph.m_dominators.dump(graph, WTF::dataFile()); } m_loops.resize(0); for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; for (unsigned i = block->numSuccessors(); i--;) { BasicBlock* successor = block->successor(i); if (!graph.m_dominators.dominates(successor, block)) continue; bool found = false; for (unsigned j = m_loops.size(); j--;) { if (m_loops[j].header() == successor) { m_loops[j].addBlock(block); found = true; break; } } if (found) continue; NaturalLoop loop(successor, m_loops.size()); loop.addBlock(block); m_loops.append(loop); } } if (verbose) dataLog("After bootstrap: ", *this, "\n"); FastBitVector seenBlocks; Vector<BasicBlock*, 4> blockWorklist; seenBlocks.resize(graph.numBlocks()); for (unsigned i = m_loops.size(); i--;) { NaturalLoop& loop = m_loops[i]; seenBlocks.clearAll(); ASSERT(blockWorklist.isEmpty()); if (verbose) dataLog("Dealing with loop ", loop, "\n"); for (unsigned j = loop.size(); j--;) { seenBlocks.set(loop[j]->index); blockWorklist.append(loop[j]); } while (!blockWorklist.isEmpty()) { BasicBlock* block = blockWorklist.takeLast(); if (verbose) dataLog(" Dealing with ", *block, "\n"); if (block == loop.header()) continue; for (unsigned j = block->predecessors.size(); j--;) { BasicBlock* predecessor = block->predecessors[j]; if (seenBlocks.get(predecessor->index)) continue; loop.addBlock(predecessor); blockWorklist.append(predecessor); seenBlocks.set(predecessor->index); } } } // Figure out reverse mapping from blocks to loops. for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; for (unsigned i = BasicBlock::numberOfInnerMostLoopIndices; i--;) block->innerMostLoopIndices[i] = UINT_MAX; } for (unsigned loopIndex = m_loops.size(); loopIndex--;) { NaturalLoop& loop = m_loops[loopIndex]; for (unsigned blockIndexInLoop = loop.size(); blockIndexInLoop--;) { BasicBlock* block = loop[blockIndexInLoop]; for (unsigned i = 0; i < BasicBlock::numberOfInnerMostLoopIndices; ++i) { unsigned thisIndex = block->innerMostLoopIndices[i]; if (thisIndex == UINT_MAX || loop.size() < m_loops[thisIndex].size()) { insertIntoBoundedVector( block->innerMostLoopIndices, BasicBlock::numberOfInnerMostLoopIndices, loopIndex, i); break; } } } } // Now each block knows its inner-most loop and its next-to-inner-most loop. Use // this to figure out loop parenting. for (unsigned i = m_loops.size(); i--;) { NaturalLoop& loop = m_loops[i]; RELEASE_ASSERT(loop.header()->innerMostLoopIndices[0] == i); loop.m_outerLoopIndex = loop.header()->innerMostLoopIndices[1]; } if (validationEnabled()) { // Do some self-verification that we've done some of this correctly. for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; Vector<const NaturalLoop*> simpleLoopsOf; for (unsigned i = m_loops.size(); i--;) { if (m_loops[i].contains(block)) simpleLoopsOf.append(&m_loops[i]); } Vector<const NaturalLoop*> fancyLoopsOf = loopsOf(block); std::sort(simpleLoopsOf.begin(), simpleLoopsOf.end()); std::sort(fancyLoopsOf.begin(), fancyLoopsOf.end()); RELEASE_ASSERT(simpleLoopsOf == fancyLoopsOf); } } if (verbose) dataLog("Results: ", *this, "\n"); }
void handleBlockForTryCatch(BasicBlock* block, InsertionSet& insertionSet) { HandlerInfo* currentExceptionHandler = nullptr; FastBitVector liveAtCatchHead; liveAtCatchHead.resize(m_graph.block(0)->variablesAtTail.numberOfLocals()); HandlerInfo* cachedHandlerResult; CodeOrigin cachedCodeOrigin; auto catchHandler = [&] (CodeOrigin origin) -> HandlerInfo* { ASSERT(origin); if (origin == cachedCodeOrigin) return cachedHandlerResult; unsigned bytecodeIndexToCheck = origin.bytecodeIndex; cachedCodeOrigin = origin; while (1) { InlineCallFrame* inlineCallFrame = origin.inlineCallFrame; CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame); if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) { liveAtCatchHead.clearAll(); unsigned catchBytecodeIndex = handler->target; m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) { liveAtCatchHead[operand.toLocal()] = true; }); cachedHandlerResult = handler; break; } if (!inlineCallFrame) { cachedHandlerResult = nullptr; break; } bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex; origin = inlineCallFrame->directCaller; } return cachedHandlerResult; }; Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); HashSet<InlineCallFrame*> seenInlineCallFrames; auto flushEverything = [&] (NodeOrigin origin, unsigned index) { RELEASE_ASSERT(currentExceptionHandler); auto flush = [&] (VirtualRegister operand, bool alwaysInsert) { if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) || operand.isArgument() || alwaysInsert) { ASSERT(isValidFlushLocation(block, index, operand)); VariableAccessData* accessData = currentBlockAccessData.operand(operand); if (!accessData) accessData = newVariableAccessData(operand); currentBlockAccessData.operand(operand) = accessData; insertionSet.insertNode(index, SpecNone, Flush, origin, OpInfo(accessData)); } }; for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++) flush(virtualRegisterForLocal(local), false); for (InlineCallFrame* inlineCallFrame : seenInlineCallFrames) flush(VirtualRegister(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()), true); flush(VirtualRegister(CallFrame::thisArgumentOffset()), true); seenInlineCallFrames.clear(); }; for (unsigned nodeIndex = 0; nodeIndex < block->size(); nodeIndex++) { Node* node = block->at(nodeIndex); { HandlerInfo* newHandler = catchHandler(node->origin.semantic); if (newHandler != currentExceptionHandler && currentExceptionHandler) flushEverything(node->origin, nodeIndex); currentExceptionHandler = newHandler; } if (currentExceptionHandler && (node->op() == SetLocal || node->op() == SetArgument)) { InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; if (inlineCallFrame) seenInlineCallFrames.add(inlineCallFrame); VirtualRegister operand = node->local(); int stackOffset = inlineCallFrame ? inlineCallFrame->stackOffset : 0; if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) || operand.isArgument() || (operand.offset() == stackOffset + CallFrame::thisArgumentOffset())) { ASSERT(isValidFlushLocation(block, nodeIndex, operand)); VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand); if (!variableAccessData) variableAccessData = newVariableAccessData(operand); insertionSet.insertNode(nodeIndex, SpecNone, Flush, node->origin, OpInfo(variableAccessData)); } } if (node->accessesStack(m_graph)) currentBlockAccessData.operand(node->local()) = node->variableAccessData(); } if (currentExceptionHandler) { NodeOrigin origin = block->at(block->size() - 1)->origin; flushEverything(origin, block->size()); } }
FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset) { FastBitVector temp; FastBitVector result; getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, temp); unsigned numCapturedVars = numberOfCapturedVariables(m_codeBlock); if (numCapturedVars) { int firstCapturedLocal = VirtualRegister(captureStart(m_codeBlock)).toLocal(); result.resize(temp.numBits() + numCapturedVars); for (unsigned i = 0; i < numCapturedVars; ++i) result.set(firstCapturedLocal + i); } else result.resize(temp.numBits()); int tempLength = temp.numBits(); ASSERT(tempLength >= 0); for (int i = 0; i < tempLength; i++) { if (!temp.get(i)) continue; if (!numCapturedVars) { result.set(i); continue; } if (virtualRegisterForLocal(i).offset() > captureStart(m_codeBlock)) result.set(i); else result.set(numCapturedVars + i); } return result; }
static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned targetOffset, FastBitVector& result) { ASSERT(!block->isExitBlock()); ASSERT(!block->isEntryBlock()); FastBitVector out = block->out(); HandlerInfo* handler = 0; FastBitVector uses; FastBitVector defs; uses.resize(out.numBits()); defs.resize(out.numBits()); for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; if (targetOffset > bytecodeOffset) break; uses.clearAll(); defs.clearAll(); computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, uses); computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, defs); out.exclude(defs); out.merge(uses); // If we have an exception handler, we want the live-in variables of the // exception handler block to be included in the live-in of this particular bytecode. if ((handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset))) { BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); ASSERT(handlerBlock); out.merge(handlerBlock->in()); } } result.set(out); }
static void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, FastBitVector& defs) { Interpreter* interpreter = codeBlock->vm()->interpreter; Instruction* instructionsBegin = codeBlock->instructions().begin(); Instruction* instruction = &instructionsBegin[bytecodeOffset]; OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); switch (opcodeID) { // These don't define anything. case op_init_global_const: case op_init_global_const_nop: case op_push_name_scope: case op_push_with_scope: case op_put_to_scope: case op_pop_scope: case op_end: case op_profile_will_call: case op_profile_did_call: case op_throw: case op_throw_static_error: case op_debug: case op_ret: case op_ret_object_or_this: case op_jmp: case op_jtrue: case op_jfalse: case op_jeq_null: case op_jneq_null: case op_jneq_ptr: case op_jless: case op_jlesseq: case op_jgreater: case op_jgreatereq: case op_jnless: case op_jnlesseq: case op_jngreater: case op_jngreatereq: case op_loop_hint: case op_switch_imm: case op_switch_char: case op_switch_string: case op_put_by_id: case op_put_by_id_out_of_line: case op_put_by_id_replace: case op_put_by_id_transition: case op_put_by_id_transition_direct: case op_put_by_id_transition_direct_out_of_line: case op_put_by_id_transition_normal: case op_put_by_id_transition_normal_out_of_line: case op_put_by_id_generic: case op_put_getter_setter: case op_put_by_val: case op_put_by_val_direct: case op_put_by_index: case op_del_by_id: case op_del_by_val: #define LLINT_HELPER_OPCODES(opcode, length) case opcode: FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES); #undef LLINT_HELPER_OPCODES return; // These all have a single destination for the first argument. case op_next_pname: case op_get_pnames: case op_resolve_scope: case op_strcat: case op_tear_off_activation: case op_to_primitive: case op_catch: case op_create_this: case op_new_array: case op_new_array_buffer: case op_new_array_with_size: case op_new_regexp: case op_new_func: case op_new_func_exp: case op_call_varargs: case op_get_from_scope: case op_call: case op_call_eval: case op_construct: case op_get_by_id: case op_get_by_id_out_of_line: case op_get_by_id_self: case op_get_by_id_proto: case op_get_by_id_chain: case op_get_by_id_getter_self: case op_get_by_id_getter_proto: case op_get_by_id_getter_chain: case op_get_by_id_custom_self: case op_get_by_id_custom_proto: case op_get_by_id_custom_chain: case op_get_by_id_generic: case op_get_array_length: case op_get_string_length: case op_check_has_instance: case op_instanceof: case op_get_by_val: case op_get_argument_by_val: case op_get_by_pname: case op_get_arguments_length: case op_typeof: case op_is_undefined: case op_is_boolean: case op_is_number: case op_is_string: case op_is_object: case op_is_function: case op_in: case op_to_number: case op_negate: case op_add: case op_mul: case op_div: case op_mod: case op_sub: case op_lshift: case op_rshift: case op_urshift: case op_bitand: case op_bitxor: case op_bitor: case op_inc: case op_dec: case op_eq: case op_neq: case op_stricteq: case op_nstricteq: case op_less: case op_lesseq: case op_greater: case op_greatereq: case op_neq_null: case op_eq_null: case op_not: case op_mov: case op_new_object: case op_to_this: case op_get_callee: case op_init_lazy_reg: case op_create_activation: case op_create_arguments: { if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand)) setForOperand(codeBlock, defs, instruction[1].u.operand); return; } case op_tear_off_arguments: { if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand - 1)) setForOperand(codeBlock, defs, instruction[1].u.operand - 1); return; } case op_enter: { defs.setAll(); return; } } }