void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) { BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset); ASSERT(block); ASSERT(!block->isEntryBlock()); ASSERT(!block->isExitBlock()); result.resize(block->out().numBits()); computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result); }
void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) { BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset); ASSERT(block); ASSERT(!block->isEntryBlock()); ASSERT(!block->isExitBlock()); result.resize(block->out().numBits()); computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result); }
void BytecodeLivenessAnalysis::dumpResults() { Interpreter* interpreter = m_codeBlock->vm()->interpreter; Instruction* instructionsBegin = m_codeBlock->instructions().begin(); for (unsigned i = 0; i < m_basicBlocks.size(); i++) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength()); dataLogF("Predecessors: "); for (unsigned j = 0; j < block->predecessors().size(); j++) { BytecodeBasicBlock* predecessor = block->predecessors()[j]; dataLogF("%p ", predecessor); } dataLogF("\n"); dataLogF("Successors: "); for (unsigned j = 0; j < block->successors().size(); j++) { BytecodeBasicBlock* successor = block->successors()[j]; dataLogF("%p ", successor); } dataLogF("\n"); if (block->isEntryBlock()) { dataLogF("Entry block %p\n", block); continue; } if (block->isExitBlock()) { dataLogF("Exit block: %p\n", block); continue; } for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; dataLogF("Live variables: "); FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset); for (unsigned j = 0; j < liveBefore.numBits(); j++) { if (liveBefore.get(j)) dataLogF("%u ", j); } dataLogF("\n"); m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction); OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); unsigned opcodeLength = opcodeLengths[opcodeID]; bytecodeOffset += opcodeLength; } dataLogF("Live variables: "); FastBitVector liveAfter = block->out(); for (unsigned j = 0; j < liveAfter.numBits(); j++) { if (liveAfter.get(j)) dataLogF("%u ", j); } dataLogF("\n"); } }
static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def) { // This abstractly execute the instruction in reverse. Instructions logically first use operands and // then define operands. This logical ordering is necessary for operations that use and def the same // operand, like: // // op_add loc1, loc1, loc2 // // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add // operation cannot travel forward in time to read the value that it will produce after reading that // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of // uses before defs). // // Since this is a liveness analysis, this ordering ends up being particularly important: if we did // uses before defs, then the add operation above would appear to not have loc1 live, since we'd // first add it to the out set (the use), and then we'd remove it (the def). computeDefsForBytecodeOffset( codeBlock, block, bytecodeOffset, [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { if (isValidRegisterForLiveness(codeBlock, operand)) def(VirtualRegister(operand).toLocal()); }); computeUsesForBytecodeOffset( codeBlock, block, bytecodeOffset, [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { if (isValidRegisterForLiveness(codeBlock, operand)) use(VirtualRegister(operand).toLocal()); }); // If we have an exception handler, we want the live-in variables of the // exception handler block to be included in the live-in of this particular bytecode. if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) { // FIXME: This resume check should not be needed. // https://bugs.webkit.org/show_bug.cgi?id=159281 Interpreter* interpreter = codeBlock->vm()->interpreter; Instruction* instructionsBegin = codeBlock->instructions().begin(); Instruction* instruction = &instructionsBegin[bytecodeOffset]; OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); if (opcodeID != op_resume) { BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); ASSERT(handlerBlock); handlerBlock->in().forEachSetBit(use); } } }
void BytecodeLivenessAnalysis::runLivenessFixpoint() { UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock(); unsigned numberOfVariables = unlinkedCodeBlock->m_numCalleeLocals; for (unsigned i = 0; i < m_basicBlocks.size(); i++) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); block->in().resize(numberOfVariables); block->out().resize(numberOfVariables); } bool changed; m_basicBlocks.last()->in().clearAll(); m_basicBlocks.last()->out().clearAll(); FastBitVector newOut; newOut.resize(m_basicBlocks.last()->out().numBits()); do { changed = false; for (unsigned i = m_basicBlocks.size() - 1; i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); newOut.clearAll(); for (unsigned j = 0; j < block->successors().size(); j++) newOut.merge(block->successors()[j]->in()); bool outDidChange = block->out().setAndCheck(newOut); computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks); changed |= outDidChange; } } while (changed); }
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; result.m_codeBlock = m_codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size()); for (unsigned i = m_basicBlocks.size(); i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->bytecodeOffsets().size(); i--;) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; stepOverInstruction( m_codeBlock, block, m_basicBlocks, bytecodeOffset, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned targetOffset, FastBitVector& result) { ASSERT(!block->isExitBlock()); ASSERT(!block->isEntryBlock()); FastBitVector out = block->out(); HandlerInfo* handler = 0; FastBitVector uses; FastBitVector defs; uses.resize(out.numBits()); defs.resize(out.numBits()); for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; if (targetOffset > bytecodeOffset) break; uses.clearAll(); defs.clearAll(); computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, uses); computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, defs); out.exclude(defs); out.merge(uses); // If we have an exception handler, we want the live-in variables of the // exception handler block to be included in the live-in of this particular bytecode. if ((handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset))) { BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); ASSERT(handlerBlock); out.merge(handlerBlock->in()); } } result.set(out); }
void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result) { FastBitVector out; result.m_map.resize(m_codeBlock->instructions().size()); for (unsigned i = m_basicBlocks.size(); i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->bytecodeOffsets().size(); i--;) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; stepOverInstruction(m_codeBlock, block, m_basicBlocks, bytecodeOffset, out); result.m_map[bytecodeOffset] = out; } } }
void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks) { Vector<unsigned, 32> jumpTargets; computePreciseJumpTargets(codeBlock, jumpTargets); // Create the entry and exit basic blocks. BytecodeBasicBlock* entry = new BytecodeBasicBlock(BytecodeBasicBlock::EntryBlock); basicBlocks.append(adoptRef(entry)); BytecodeBasicBlock* exit = new BytecodeBasicBlock(BytecodeBasicBlock::ExitBlock); // Find basic block boundaries. BytecodeBasicBlock* current = new BytecodeBasicBlock(0, 0); linkBlocks(entry, current); basicBlocks.append(adoptRef(current)); bool nextInstructionIsLeader = false; Interpreter* interpreter = codeBlock->vm()->interpreter; Instruction* instructionsBegin = codeBlock->instructions().begin(); unsigned instructionCount = codeBlock->instructions().size(); for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); unsigned opcodeLength = opcodeLengths[opcodeID]; bool createdBlock = false; // If the current bytecode is a jump target, then it's the leader of its own basic block. if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { BytecodeBasicBlock* block = new BytecodeBasicBlock(bytecodeOffset, opcodeLength); basicBlocks.append(adoptRef(block)); current = block; createdBlock = true; nextInstructionIsLeader = false; bytecodeOffset += opcodeLength; } // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID)) nextInstructionIsLeader = true; if (createdBlock) continue; // Otherwise, just add to the length of the current block. current->addBytecodeLength(opcodeLength); bytecodeOffset += opcodeLength; } // Link basic blocks together. for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* block = basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; bool fallsThrough = true; for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { const Instruction& currentInstruction = instructionsBegin[bytecodeOffset]; OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction.u.opcode); unsigned opcodeLength = opcodeLengths[opcodeID]; // If we found a terminal bytecode, link to the exit block. if (isTerminal(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); linkBlocks(block, exit); fallsThrough = false; break; } // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. // If there isn't one, treat this throw as a terminal. This is true even if we have a finally // block because the finally block will create its own catch, which will generate a HandlerInfo. if (isThrow(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); fallsThrough = false; if (!handler) { linkBlocks(block, exit); break; } for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (handler->target == otherBlock->leaderBytecodeOffset()) { linkBlocks(block, otherBlock); break; } } break; } // If we found a branch, link to the block(s) that we jump to. if (isBranch(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); Vector<unsigned, 1> bytecodeOffsetsJumpedTo; findJumpTargetsForBytecodeOffset(codeBlock, bytecodeOffset, bytecodeOffsetsJumpedTo); for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderBytecodeOffset())) linkBlocks(block, otherBlock); } if (isUnconditionalBranch(opcodeID)) fallsThrough = false; break; } bytecodeOffset += opcodeLength; } // If we fall through then link to the next block in program order. if (fallsThrough) { ASSERT(i + 1 < basicBlocks.size()); BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get(); linkBlocks(block, nextBlock); } } basicBlocks.append(adoptRef(exit)); }
void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) { Vector<unsigned, 32> jumpTargets; computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets); auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) { block->m_index = basicBlocks.size(); basicBlocks.append(WTFMove(block)); }; auto linkBlocks = [&] (BytecodeBasicBlock* from, BytecodeBasicBlock* to) { from->addSuccessor(to); }; // Create the entry and exit basic blocks. basicBlocks.reserveCapacity(jumpTargets.size() + 2); auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0); linkBlocks(entry.get(), firstBlock.get()); appendBlock(WTFMove(entry)); BytecodeBasicBlock* current = firstBlock.get(); appendBlock(WTFMove(firstBlock)); auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock); bool nextInstructionIsLeader = false; Interpreter* interpreter = codeBlock->vm()->interpreter; for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]); unsigned opcodeLength = opcodeLengths[opcodeID]; bool createdBlock = false; // If the current bytecode is a jump target, then it's the leader of its own basic block. if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength); current = newBlock.get(); appendBlock(WTFMove(newBlock)); createdBlock = true; nextInstructionIsLeader = false; bytecodeOffset += opcodeLength; } // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID)) nextInstructionIsLeader = true; if (createdBlock) continue; // Otherwise, just add to the length of the current block. current->addLength(opcodeLength); bytecodeOffset += opcodeLength; } // Link basic blocks together. for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* block = basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; bool fallsThrough = true; for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]); unsigned opcodeLength = opcodeLengths[opcodeID]; // If we found a terminal bytecode, link to the exit block. if (isTerminal(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); linkBlocks(block, exit.get()); fallsThrough = false; break; } // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. // If there isn't one, treat this throw as a terminal. This is true even if we have a finally // block because the finally block will create its own catch, which will generate a HandlerInfo. if (isThrow(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); fallsThrough = false; if (!handler) { linkBlocks(block, exit.get()); break; } for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (handler->target == otherBlock->leaderOffset()) { linkBlocks(block, otherBlock); break; } } break; } // If we found a branch, link to the block(s) that we jump to. if (isBranch(opcodeID)) { ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); Vector<unsigned, 1> bytecodeOffsetsJumpedTo; findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo); for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderOffset())) linkBlocks(block, otherBlock); } if (isUnconditionalBranch(opcodeID)) fallsThrough = false; break; } bytecodeOffset += opcodeLength; } // If we fall through then link to the next block in program order. if (fallsThrough) { ASSERT(i + 1 < basicBlocks.size()); BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get(); linkBlocks(block, nextBlock); } } appendBlock(WTFMove(exit)); for (auto& basicBlock : basicBlocks) basicBlock->shrinkToFit(); }
void BytecodeBasicBlock::computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) { Vector<InstructionStream::Offset, 32> jumpTargets; computePreciseJumpTargets(codeBlock, instructions, jumpTargets); auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) { block->m_index = basicBlocks.size(); basicBlocks.append(WTFMove(block)); }; auto linkBlocks = [&] (BytecodeBasicBlock* from, BytecodeBasicBlock* to) { from->addSuccessor(to); }; // Create the entry and exit basic blocks. basicBlocks.reserveCapacity(jumpTargets.size() + 2); auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); auto firstBlock = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); linkBlocks(entry.get(), firstBlock.get()); appendBlock(WTFMove(entry)); BytecodeBasicBlock* current = firstBlock.get(); appendBlock(WTFMove(firstBlock)); auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock); bool nextInstructionIsLeader = false; for (const auto& instruction : instructions) { auto bytecodeOffset = instruction.offset(); OpcodeID opcodeID = instruction->opcodeID(); bool createdBlock = false; // If the current bytecode is a jump target, then it's the leader of its own basic block. if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { auto newBlock = std::make_unique<BytecodeBasicBlock>(instruction); current = newBlock.get(); appendBlock(WTFMove(newBlock)); createdBlock = true; nextInstructionIsLeader = false; } // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID)) nextInstructionIsLeader = true; if (createdBlock) continue; // Otherwise, just add to the length of the current block. current->addLength(instruction->size()); } // Link basic blocks together. for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* block = basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; bool fallsThrough = true; for (auto bytecodeOffset : block->offsets()) { auto instruction = instructions.at(bytecodeOffset); OpcodeID opcodeID = instruction->opcodeID(); // If we found a terminal bytecode, link to the exit block. if (isTerminal(opcodeID)) { ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); linkBlocks(block, exit.get()); fallsThrough = false; break; } // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. // If there isn't one, treat this throw as a terminal. This is true even if we have a finally // block because the finally block will create its own catch, which will generate a HandlerInfo. if (isThrow(opcodeID)) { ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); auto* handler = codeBlock->handlerForBytecodeOffset(instruction.offset()); fallsThrough = false; if (!handler) { linkBlocks(block, exit.get()); break; } for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (handler->target == otherBlock->leaderOffset()) { linkBlocks(block, otherBlock); break; } } break; } // If we found a branch, link to the block(s) that we jump to. if (isBranch(opcodeID)) { ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); Vector<InstructionStream::Offset, 1> bytecodeOffsetsJumpedTo; findJumpTargetsForInstruction(codeBlock, instruction, bytecodeOffsetsJumpedTo); size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size(); ASSERT(numberOfJumpTargets); for (unsigned i = 0; i < basicBlocks.size(); i++) { BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderOffset())) { linkBlocks(block, otherBlock); --numberOfJumpTargets; if (!numberOfJumpTargets) break; } } // numberOfJumpTargets may not be 0 here if there are multiple jumps targeting the same // basic blocks (e.g. in a switch type opcode). Since we only decrement numberOfJumpTargets // once per basic block, the duplicates are not accounted for. For our purpose here, // that doesn't matter because we only need to link to the target block once regardless // of how many ways this block can jump there. if (isUnconditionalBranch(opcodeID)) fallsThrough = false; break; } } // If we fall through then link to the next block in program order. if (fallsThrough) { ASSERT(i + 1 < basicBlocks.size()); BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get(); linkBlocks(block, nextBlock); } } appendBlock(WTFMove(exit)); for (auto& basicBlock : basicBlocks) basicBlock->shrinkToFit(); }