FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset) { FastBitVector temp; FastBitVector result; getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, temp); unsigned numCapturedVars = numberOfCapturedVariables(m_codeBlock); if (numCapturedVars) { int firstCapturedLocal = VirtualRegister(captureStart(m_codeBlock)).toLocal(); result.resize(temp.numBits() + numCapturedVars); for (unsigned i = 0; i < numCapturedVars; ++i) result.set(firstCapturedLocal + i); } else result.resize(temp.numBits()); int tempLength = temp.numBits(); ASSERT(tempLength >= 0); for (int i = 0; i < tempLength; i++) { if (!temp.get(i)) continue; if (!numCapturedVars) { result.set(i); continue; } if (virtualRegisterForLocal(i).offset() > captureStart(m_codeBlock)) result.set(i); else result.set(numCapturedVars + i); } return result; }
static void setForOperand(CodeBlock* codeBlock, FastBitVector& bits, int operand) { ASSERT(isValidRegisterForLiveness(codeBlock, operand)); VirtualRegister virtualReg(operand); if (virtualReg.offset() > captureStart(codeBlock)) bits.set(virtualReg.toLocal()); else bits.set(virtualReg.toLocal() - numberOfCapturedVariables(codeBlock)); }
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; CodeBlock* codeBlock = m_graph.codeBlock(); result.m_codeBlock = codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(codeBlock->instructions().size()); for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) { if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->offsets().size(); i--;) { unsigned bytecodeOffset = block->offsets()[i]; stepOverInstruction( m_graph, bytecodeOffset, out, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; result.m_codeBlock = m_codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size()); for (unsigned i = m_basicBlocks.size(); i--;) { BytecodeBasicBlock* block = m_basicBlocks[i].get(); if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->bytecodeOffsets().size(); i--;) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; stepOverInstruction( m_codeBlock, block, m_basicBlocks, bytecodeOffset, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& out) { stepOverInstruction( codeBlock, block, basicBlocks, bytecodeOffset, [&] (unsigned bitIndex) { // This is the use functor, so we set the bit. out.set(bitIndex); }, [&] (unsigned bitIndex) { // This is the def functor, so we clear the bit. out.clear(bitIndex); }); }
static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned targetOffset, FastBitVector& result) { ASSERT(!block->isExitBlock()); ASSERT(!block->isEntryBlock()); FastBitVector out = block->out(); for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; if (targetOffset > bytecodeOffset) break; stepOverInstruction(codeBlock, block, basicBlocks, bytecodeOffset, out); } result.set(out); }
static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned targetOffset, FastBitVector& result) { ASSERT(!block->isExitBlock()); ASSERT(!block->isEntryBlock()); FastBitVector out = block->out(); HandlerInfo* handler = 0; FastBitVector uses; FastBitVector defs; uses.resize(out.numBits()); defs.resize(out.numBits()); for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { unsigned bytecodeOffset = block->bytecodeOffsets()[i]; if (targetOffset > bytecodeOffset) break; uses.clearAll(); defs.clearAll(); computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, uses); computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, defs); out.exclude(defs); out.merge(uses); // If we have an exception handler, we want the live-in variables of the // exception handler block to be included in the live-in of this particular bytecode. if ((handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset))) { BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); ASSERT(handlerBlock); out.merge(handlerBlock->in()); } } result.set(out); }
void NaturalLoops::compute(Graph& graph) { // Implement the classic dominator-based natural loop finder. The first // step is to find all control flow edges A -> B where B dominates A. // Then B is a loop header and A is a backward branching block. We will // then accumulate, for each loop header, multiple backward branching // blocks. Then we backwards graph search from the backward branching // blocks to their loop headers, which gives us all of the blocks in the // loop body. static const bool verbose = false; graph.m_dominators.computeIfNecessary(graph); if (verbose) { dataLog("Dominators:\n"); graph.m_dominators.dump(graph, WTF::dataFile()); } m_loops.resize(0); for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; for (unsigned i = block->numSuccessors(); i--;) { BasicBlock* successor = block->successor(i); if (!graph.m_dominators.dominates(successor, block)) continue; bool found = false; for (unsigned j = m_loops.size(); j--;) { if (m_loops[j].header() == successor) { m_loops[j].addBlock(block); found = true; break; } } if (found) continue; NaturalLoop loop(successor, m_loops.size()); loop.addBlock(block); m_loops.append(loop); } } if (verbose) dataLog("After bootstrap: ", *this, "\n"); FastBitVector seenBlocks; Vector<BasicBlock*, 4> blockWorklist; seenBlocks.resize(graph.numBlocks()); for (unsigned i = m_loops.size(); i--;) { NaturalLoop& loop = m_loops[i]; seenBlocks.clearAll(); ASSERT(blockWorklist.isEmpty()); if (verbose) dataLog("Dealing with loop ", loop, "\n"); for (unsigned j = loop.size(); j--;) { seenBlocks.set(loop[j]->index); blockWorklist.append(loop[j]); } while (!blockWorklist.isEmpty()) { BasicBlock* block = blockWorklist.takeLast(); if (verbose) dataLog(" Dealing with ", *block, "\n"); if (block == loop.header()) continue; for (unsigned j = block->predecessors.size(); j--;) { BasicBlock* predecessor = block->predecessors[j]; if (seenBlocks.get(predecessor->index)) continue; loop.addBlock(predecessor); blockWorklist.append(predecessor); seenBlocks.set(predecessor->index); } } } // Figure out reverse mapping from blocks to loops. for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; for (unsigned i = BasicBlock::numberOfInnerMostLoopIndices; i--;) block->innerMostLoopIndices[i] = UINT_MAX; } for (unsigned loopIndex = m_loops.size(); loopIndex--;) { NaturalLoop& loop = m_loops[loopIndex]; for (unsigned blockIndexInLoop = loop.size(); blockIndexInLoop--;) { BasicBlock* block = loop[blockIndexInLoop]; for (unsigned i = 0; i < BasicBlock::numberOfInnerMostLoopIndices; ++i) { unsigned thisIndex = block->innerMostLoopIndices[i]; if (thisIndex == UINT_MAX || loop.size() < m_loops[thisIndex].size()) { insertIntoBoundedVector( block->innerMostLoopIndices, BasicBlock::numberOfInnerMostLoopIndices, loopIndex, i); break; } } } } // Now each block knows its inner-most loop and its next-to-inner-most loop. Use // this to figure out loop parenting. for (unsigned i = m_loops.size(); i--;) { NaturalLoop& loop = m_loops[i]; RELEASE_ASSERT(loop.header()->innerMostLoopIndices[0] == i); loop.m_outerLoopIndex = loop.header()->innerMostLoopIndices[1]; } if (validationEnabled()) { // Do some self-verification that we've done some of this correctly. for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { BasicBlock* block = graph.block(blockIndex); if (!block) continue; Vector<const NaturalLoop*> simpleLoopsOf; for (unsigned i = m_loops.size(); i--;) { if (m_loops[i].contains(block)) simpleLoopsOf.append(&m_loops[i]); } Vector<const NaturalLoop*> fancyLoopsOf = loopsOf(block); std::sort(simpleLoopsOf.begin(), simpleLoopsOf.end()); std::sort(fancyLoopsOf.begin(), fancyLoopsOf.end()); RELEASE_ASSERT(simpleLoopsOf == fancyLoopsOf); } } if (verbose) dataLog("Results: ", *this, "\n"); }