void DeadCodeEliminationPass::runOnKernel(ir::IRKernel& k) { report("Running dead code elimination on kernel " << k.name); reportE(REPORT_PTX, k); Analysis* dfgAnalysis = getAnalysis(Analysis::DataflowGraphAnalysis); assert(dfgAnalysis != 0); analysis::DataflowGraph& dfg = *static_cast<analysis::DataflowGraph*>(dfgAnalysis); assert(dfg.ssa() != analysis::DataflowGraph::SsaType::None); BlockSet blocks; report(" Starting by scanning all basic blocks"); for(iterator block = dfg.begin(); block != dfg.end(); ++block) { report(" Queueing up BB_" << block->id()); blocks.insert(block); } while(!blocks.empty()) { iterator block = *blocks.begin(); blocks.erase(blocks.begin()); eliminateDeadInstructions(dfg, blocks, block); } report("Finished running dead code elimination on kernel " << k.name); reportE(REPORT_PTX, k); }
Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) { // Find the inner blocks in this loop. Proceed backwards from the entries until // you reach a seen block, collecting as you go. BlockSet InnerBlocks; BlockSet Queue = Entries; while (Queue.size() > 0) { Block *Curr = *(Queue.begin()); Queue.erase(Queue.begin()); if (InnerBlocks.find(Curr) == InnerBlocks.end()) { // This element is new, mark it as inner and remove from outer InnerBlocks.insert(Curr); Blocks.erase(Curr); // Add the elements prior to it for (BlockBranchMap::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) { Queue.insert(iter->first); } } } assert(InnerBlocks.size() > 0); for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Possible = iter->first; if (InnerBlocks.find(Possible) == InnerBlocks.end() && NextEntries.find(Possible) == NextEntries.find(Possible)) { NextEntries.insert(Possible); } } } PrintDebug("creating loop block:\n"); DebugDump(InnerBlocks, " inner blocks:"); DebugDump(Entries, " inner entries:"); DebugDump(Blocks, " outer blocks:"); DebugDump(NextEntries, " outer entries:"); // TODO: Optionally hoist additional blocks into the loop LoopShape *Loop = new LoopShape(); Notice(Loop); // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations) // A. Branches to the loop entries become a continue to this shape for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Solipsize(*iter, Branch::Continue, Loop, InnerBlocks); } // B. Branches to outside the loop (a next entry) become breaks on this shape for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) { Solipsize(*iter, Branch::Break, Loop, InnerBlocks); } // Finish up Shape *Inner = Process(InnerBlocks, Entries, NULL); Loop->Inner = Inner; return Loop; }
// If a block has multiple entries but no exits, and it is small enough, it is useful to split it. // A common example is a C++ function where everything ends up at a final exit block and does some // RAII cleanup. Without splitting, we will be forced to introduce labelled loops to allow // reaching the final block void SplitDeadEnds() { unsigned TotalCodeSize = 0; for (BlockSet::iterator iter = Live.begin(); iter != Live.end(); iter++) { Block *Curr = *iter; TotalCodeSize += strlen(Curr->Code); } BlockSet Splits; BlockSet Removed; //DebugDump(Live, "before"); for (BlockSet::iterator iter = Live.begin(); iter != Live.end(); iter++) { Block *Original = *iter; if (Original->BranchesIn.size() <= 1 || Original->BranchesOut.size() > 0) continue; // only dead ends, for now if (contains(Original->BranchesOut, Original)) continue; // cannot split a looping node if (strlen(Original->Code)*(Original->BranchesIn.size()-1) > TotalCodeSize/5) continue; // if splitting increases raw code size by a significant amount, abort // Split the node (for simplicity, we replace all the blocks, even though we could have reused the original) PrintDebug("Splitting block %d\n", Original->Id); for (BlockSet::iterator iter = Original->BranchesIn.begin(); iter != Original->BranchesIn.end(); iter++) { Block *Prior = *iter; Block *Split = new Block(Original->Code, Original->BranchVar); Parent->AddBlock(Split); PrintDebug(" to %d\n", Split->Id); Split->BranchesIn.insert(Prior); Branch *Details = Prior->BranchesOut[Original]; Prior->BranchesOut[Split] = new Branch(Details->Condition, Details->Code); Prior->BranchesOut.erase(Original); for (BlockBranchMap::iterator iter = Original->BranchesOut.begin(); iter != Original->BranchesOut.end(); iter++) { Block *Post = iter->first; Branch *Details = iter->second; Split->BranchesOut[Post] = new Branch(Details->Condition, Details->Code); Post->BranchesIn.insert(Split); } Splits.insert(Split); Removed.insert(Original); } for (BlockBranchMap::iterator iter = Original->BranchesOut.begin(); iter != Original->BranchesOut.end(); iter++) { Block *Post = iter->first; Post->BranchesIn.erase(Original); } //DebugDump(Live, "mid"); } for (BlockSet::iterator iter = Splits.begin(); iter != Splits.end(); iter++) { Live.insert(*iter); } for (BlockSet::iterator iter = Removed.begin(); iter != Removed.end(); iter++) { Live.erase(*iter); } //DebugDump(Live, "after"); }
Udm::Object MatLabUdmChart::distinguish( Udm::Object udmParent ) { SLSF::State state; static boost::regex stateflowRegex( "stateflow", boost::regex_constants::perl | boost::regex_constants::icase ); boost::match_results<std::string::const_iterator> results; SLSF::Subsystem subsystemParent = SLSF::Subsystem::Cast( udmParent ); BlockSet blockSet = subsystemParent.Block_kind_children(); Udm::Object chartParent = udmParent; for( BlockSet::iterator blsItr = blockSet.begin() ; blsItr != blockSet.end() ; ++blsItr ) { Block block = *blsItr; std::string tag( block.Tag() ); if ( regex_search( tag, results, stateflowRegex ) ) { chartParent = block; break; } } #if PARADIGM == CyberComposition_PARADIGM state = SLSF::State::Create( chartParent ); #else state = SLSF::State::Create( UdmEngine::get_singleton().getTopLevelState() ); SLSF::ConnectorRef connectorRef = SLSF::ConnectorRef::Create( chartParent ); connectorRef.ref() = state; #endif return state; }
Layer Layer::getSubgraphConnectedToTheseOutputs( const NeuronSet& outputs) const { typedef std::set<size_t> BlockSet; BlockSet blocks; // TODO: eliminate the reundant inserts for(auto& output : outputs) { size_t block = (output / getOutputBlockingFactor()) % this->blocks(); blocks.insert(block); } Layer layer(blocks.size(), getInputBlockingFactor(), getOutputBlockingFactor(), blockStep()); for(auto& block : blocks) { size_t blockIndex = block - *blocks.begin(); layer[blockIndex] = (*this)[block]; layer.at_bias(blockIndex) = at_bias(block); } return layer; }
// If a block has multiple entries but no exits, and it is small enough, it is useful to split it. // A common example is a C++ function where everything ends up at a final exit block and does some // RAII cleanup. Without splitting, we will be forced to introduce labelled loops to allow // reaching the final block void SplitDeadEnds() { int TotalCodeSize = 0; for (BlockSet::iterator iter = Live.begin(); iter != Live.end(); iter++) { Block *Curr = *iter; TotalCodeSize += strlen(Curr->Code); } for (BlockSet::iterator iter = Live.begin(); iter != Live.end(); iter++) { Block *Original = *iter; if (Original->BranchesIn.size() <= 1 || Original->BranchesOut.size() > 0) continue; if (strlen(Original->Code)*(Original->BranchesIn.size()-1) > TotalCodeSize/5) continue; // if splitting increases raw code size by a significant amount, abort // Split the node (for simplicity, we replace all the blocks, even though we could have reused the original) for (BlockBranchMap::iterator iter = Original->BranchesIn.begin(); iter != Original->BranchesIn.end(); iter++) { Block *Prior = iter->first; Block *Split = new Block(Original->Code); Split->BranchesIn[Prior] = new Branch(NULL); Prior->BranchesOut[Split] = new Branch(Prior->BranchesOut[Original]->Condition, Prior->BranchesOut[Original]->Code); Prior->BranchesOut.erase(Original); Parent->AddBlock(Split); Live.insert(Split); } } }
void TextAutosizer::FingerprintMapper::assertMapsAreConsistent() { // For each fingerprint -> block mapping in m_blocksForFingerprint we should have an associated // map from block -> fingerprint in m_fingerprints. ReverseFingerprintMap::iterator end = m_blocksForFingerprint.end(); for (ReverseFingerprintMap::iterator fingerprintIt = m_blocksForFingerprint.begin(); fingerprintIt != end; ++fingerprintIt) { Fingerprint fingerprint = fingerprintIt->key; BlockSet* blocks = fingerprintIt->value.get(); for (BlockSet::iterator blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt) { const LayoutBlock* block = (*blockIt); ASSERT(m_fingerprints.get(block) == fingerprint); } } }
void ConstantPropagationPass::runOnKernel(ir::IRKernel& k) { report("Running constant propagation on kernel " << k.name); Analysis* dfgAnalysis = getAnalysis("DataflowGraphAnalysis"); assert(dfgAnalysis != 0); analysis::DataflowGraph& dfg = *static_cast<analysis::DataflowGraph*>(dfgAnalysis); dfg.convertToSSAType(analysis::DataflowGraph::Minimal); assert(dfg.ssa() == analysis::DataflowGraph::Minimal); BlockSet blocks; report(" Starting by scanning all basic blocks"); for(iterator block = dfg.begin(); block != dfg.end(); ++block) { report(" Queueing up BB_" << block->id()); blocks.insert(block); } while(!blocks.empty()) { iterator block = *blocks.begin(); blocks.erase(blocks.begin()); eliminateRedundantInstructions(dfg, blocks, block); } report("Finished running constant propagation on kernel " << k.name); reportE(REPORT_PTX, k); }
Shape *MakeMultiple(BlockSet &Blocks, BlockSet& Entries, BlockBlockSetMap& IndependentGroups, Shape *Prev, BlockSet &NextEntries) { PrintDebug("creating multiple block with %d inner groups\n", IndependentGroups.size()); bool Fused = !!(Shape::IsSimple(Prev)); MultipleShape *Multiple = new MultipleShape(); Notice(Multiple); BlockSet CurrEntries; for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { Block *CurrEntry = iter->first; BlockSet &CurrBlocks = iter->second; PrintDebug(" multiple group with entry %d:\n", CurrEntry->Id); DebugDump(CurrBlocks, " "); // Create inner block CurrEntries.clear(); CurrEntries.insert(CurrEntry); for (BlockSet::iterator iter = CurrBlocks.begin(); iter != CurrBlocks.end(); iter++) { Block *CurrInner = *iter; // Remove the block from the remaining blocks Blocks.erase(CurrInner); // Find new next entries and fix branches to them for (BlockBranchMap::iterator iter = CurrInner->BranchesOut.begin(); iter != CurrInner->BranchesOut.end();) { Block *CurrTarget = iter->first; BlockBranchMap::iterator Next = iter; Next++; if (CurrBlocks.find(CurrTarget) == CurrBlocks.end()) { NextEntries.insert(CurrTarget); Solipsize(CurrTarget, Branch::Break, Multiple, CurrBlocks); } iter = Next; // increment carefully because Solipsize can remove us } } Multiple->InnerMap[CurrEntry] = Process(CurrBlocks, CurrEntries, NULL); // If we are not fused, then our entries will actually be checked if (!Fused) { CurrEntry->IsCheckedMultipleEntry = true; } } DebugDump(Blocks, " remaining blocks after multiple:"); // Add entries not handled as next entries, they are deferred for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Block *Entry = *iter; if (IndependentGroups.find(Entry) == IndependentGroups.end()) { NextEntries.insert(Entry); } } return Multiple; }
ControlFlowGraph::ConstBlockPointerVector ControlFlowGraph::executable_sequence() const { typedef std::unordered_set<const_iterator> BlockSet; ConstBlockPointerVector sequence; BlockSet unscheduled; for(const_iterator i = begin(); i != end(); ++i) { unscheduled.insert(i); } report("Getting executable sequence."); sequence.push_back(get_entry_block()); unscheduled.erase(get_entry_block()); report(" added " << get_entry_block()->label()); while (!unscheduled.empty()) { if (sequence.back()->has_fallthrough_edge()) { const_edge_iterator fallthroughEdge = sequence.back()->get_fallthrough_edge(); sequence.push_back(fallthroughEdge->tail); unscheduled.erase(fallthroughEdge->tail); } else { // find a new block, favor branch targets over random blocks const_iterator next = *unscheduled.begin(); for(const_edge_pointer_iterator edge = sequence.back()->out_edges.begin(); edge != sequence.back()->out_edges.end(); ++edge) { if(unscheduled.count((*edge)->tail) != 0) { next = (*edge)->tail; } } // rewind through fallthrough edges to find the beginning of the // next chain of fall throughs report(" restarting at " << next->label()); bool rewinding = true; while (rewinding) { rewinding = false; for (const_edge_pointer_iterator edge = next->in_edges.begin(); edge != next->in_edges.end(); ++edge) { if ((*edge)->type == Edge::FallThrough) { assertM(unscheduled.count((*edge)->head) != 0, (*edge)->head->label() << " has multiple fallthrough branches."); next = (*edge)->head; report(" rewinding to " << next->label()); rewinding = true; break; } } } sequence.push_back(next); unscheduled.erase(next); } report(" added " << sequence.back()->label()); } return sequence; }
// Main function. // Process a set of blocks with specified entries, returns a shape // The Make* functions receive a NextEntries. If they fill it with data, those are the entries for the // ->Next block on them, and the blocks are what remains in Blocks (which Make* modify). In this way // we avoid recursing on Next (imagine a long chain of Simples, if we recursed we could blow the stack). Shape *Process(BlockSet &Blocks, BlockSet& InitialEntries, Shape *Prev) { PrintDebug("Process() called\n"); BlockSet *Entries = &InitialEntries; BlockSet TempEntries[2]; int CurrTempIndex = 0; BlockSet *NextEntries; Shape *Ret = NULL; #define Make(call) \ Shape *Temp = call; \ if (Prev) Prev->Next = Temp; \ if (!Ret) Ret = Temp; \ if (!NextEntries->size()) { PrintDebug("Process() returning\n"); return Ret; } \ Prev = Temp; \ Entries = NextEntries; \ continue; while (1) { PrintDebug("Process() running\n"); DebugDump(Blocks, " blocks : "); DebugDump(*Entries, " entries: "); CurrTempIndex = 1-CurrTempIndex; NextEntries = &TempEntries[CurrTempIndex]; NextEntries->clear(); if (Entries->size() == 0) return Ret; if (Entries->size() == 1) { Block *Curr = *(Entries->begin()); if (Curr->BranchesIn.size() == 0) { // One entry, no looping ==> Simple Make(MakeSimple(Blocks, Curr, *NextEntries)); } // One entry, looping ==> Loop Make(MakeLoop(Blocks, *Entries, *NextEntries)); } // More than one entry, try to eliminate through a Multiple groups of // independent blocks from an entry/ies. It is important to remove through // multiples as opposed to looping since the former is more performant. BlockBlockSetMap IndependentGroups; FindIndependentGroups(Blocks, *Entries, IndependentGroups); PrintDebug("Independent groups: %d\n", IndependentGroups.size()); if (IndependentGroups.size() > 0) { // We can handle a group in a multiple if its entry cannot be reached by another group. // Note that it might be reachable by itself - a loop. But that is fine, we will create // a loop inside the multiple block (which is the performant order to do it). for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end();) { Block *Entry = iter->first; BlockSet &Group = iter->second; BlockBlockSetMap::iterator curr = iter++; // iterate carefully, we may delete for (BlockBranchMap::iterator iterBranch = Entry->BranchesIn.begin(); iterBranch != Entry->BranchesIn.end(); iterBranch++) { Block *Origin = iterBranch->first; if (Group.find(Origin) == Group.end()) { // Reached from outside the group, so we cannot handle this PrintDebug("Cannot handle group with entry %d because of incoming branch from %d\n", Entry->Id, Origin->Id); IndependentGroups.erase(curr); break; } } } // As an optimization, if we have 2 independent groups, and one is a small dead end, we can handle only that dead end. // The other then becomes a Next - without nesting in the code and recursion in the analysis. // TODO: if the larger is the only dead end, handle that too // TODO: handle >2 groups // TODO: handle not just dead ends, but also that do not branch to the NextEntries. However, must be careful // there since we create a Next, and that Next can prevent eliminating a break (since we no longer // naturally reach the same place), which may necessitate a one-time loop, which makes the unnesting // pointless. if (IndependentGroups.size() == 2) { // Find the smaller one BlockBlockSetMap::iterator iter = IndependentGroups.begin(); Block *SmallEntry = iter->first; int SmallSize = iter->second.size(); iter++; Block *LargeEntry = iter->first; int LargeSize = iter->second.size(); if (SmallSize != LargeSize) { // ignore the case where they are identical - keep things symmetrical there if (SmallSize > LargeSize) { Block *Temp = SmallEntry; SmallEntry = LargeEntry; LargeEntry = Temp; // Note: we did not flip the Sizes too, they are now invalid. TODO: use the smaller size as a limit? } // Check if dead end bool DeadEnd = true; BlockSet &SmallGroup = IndependentGroups[SmallEntry]; for (BlockSet::iterator iter = SmallGroup.begin(); iter != SmallGroup.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (SmallGroup.find(Target) == SmallGroup.end()) { DeadEnd = false; break; } } if (!DeadEnd) break; } if (DeadEnd) { PrintDebug("Removing nesting by not handling large group because small group is dead end\n"); IndependentGroups.erase(LargeEntry); } } } PrintDebug("Handleable independent groups: %d\n", IndependentGroups.size()); if (IndependentGroups.size() > 0) { // Some groups removable ==> Multiple Make(MakeMultiple(Blocks, *Entries, IndependentGroups, Prev, *NextEntries)); } } // No independent groups, must be loopable ==> Loop Make(MakeLoop(Blocks, *Entries, *NextEntries)); } }
// For each entry, find the independent group reachable by it. The independent group is // the entry itself, plus all the blocks it can reach that cannot be directly reached by another entry. Note that we // ignore directly reaching the entry itself by another entry. void FindIndependentGroups(BlockSet &Blocks, BlockSet &Entries, BlockBlockSetMap& IndependentGroups) { typedef std::map<Block*, Block*> BlockBlockMap; struct HelperClass { BlockBlockSetMap& IndependentGroups; BlockBlockMap Ownership; // For each block, which entry it belongs to. We have reached it from there. HelperClass(BlockBlockSetMap& IndependentGroupsInit) : IndependentGroups(IndependentGroupsInit) {} void InvalidateWithChildren(Block *New) { // TODO: rename New BlockList ToInvalidate; // Being in the list means you need to be invalidated ToInvalidate.push_back(New); while (ToInvalidate.size() > 0) { Block *Invalidatee = ToInvalidate.front(); ToInvalidate.pop_front(); Block *Owner = Ownership[Invalidatee]; if (IndependentGroups.find(Owner) != IndependentGroups.end()) { // Owner may have been invalidated, do not add to IndependentGroups! IndependentGroups[Owner].erase(Invalidatee); } if (Ownership[Invalidatee]) { // may have been seen before and invalidated already Ownership[Invalidatee] = NULL; for (BlockBranchMap::iterator iter = Invalidatee->BranchesOut.begin(); iter != Invalidatee->BranchesOut.end(); iter++) { Block *Target = iter->first; BlockBlockMap::iterator Known = Ownership.find(Target); if (Known != Ownership.end()) { Block *TargetOwner = Known->second; if (TargetOwner) { ToInvalidate.push_back(Target); } } } } } } }; HelperClass Helper(IndependentGroups); // We flow out from each of the entries, simultaneously. // When we reach a new block, we add it as belonging to the one we got to it from. // If we reach a new block that is already marked as belonging to someone, it is reachable by // two entries and is not valid for any of them. Remove it and all it can reach that have been // visited. BlockList Queue; // Being in the queue means we just added this item, and we need to add its children for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Block *Entry = *iter; Helper.Ownership[Entry] = Entry; IndependentGroups[Entry].insert(Entry); Queue.push_back(Entry); } while (Queue.size() > 0) { Block *Curr = Queue.front(); Queue.pop_front(); Block *Owner = Helper.Ownership[Curr]; // Curr must be in the ownership map if we are in the queue if (!Owner) continue; // we have been invalidated meanwhile after being reached from two entries // Add all children for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *New = iter->first; BlockBlockMap::iterator Known = Helper.Ownership.find(New); if (Known == Helper.Ownership.end()) { // New node. Add it, and put it in the queue Helper.Ownership[New] = Owner; IndependentGroups[Owner].insert(New); Queue.push_back(New); continue; } Block *NewOwner = Known->second; if (!NewOwner) continue; // We reached an invalidated node if (NewOwner != Owner) { // Invalidate this and all reachable that we have seen - we reached this from two locations Helper.InvalidateWithChildren(New); } // otherwise, we have the same owner, so do nothing } } // Having processed all the interesting blocks, we remain with just one potential issue: // If a->b, and a was invalidated, but then b was later reached by someone else, we must // invalidate b. To check for this, we go over all elements in the independent groups, // if an element has a parent which does *not* have the same owner, we must remove it // and all its children. for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { BlockSet &CurrGroup = IndependentGroups[*iter]; BlockList ToInvalidate; for (BlockSet::iterator iter = CurrGroup.begin(); iter != CurrGroup.end(); iter++) { Block *Child = *iter; for (BlockBranchMap::iterator iter = Child->BranchesIn.begin(); iter != Child->BranchesIn.end(); iter++) { Block *Parent = iter->first; if (Helper.Ownership[Parent] != Helper.Ownership[Child]) { ToInvalidate.push_back(Child); } } } while (ToInvalidate.size() > 0) { Block *Invalidatee = ToInvalidate.front(); ToInvalidate.pop_front(); Helper.InvalidateWithChildren(Invalidatee); } } // Remove empty groups for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { if (IndependentGroups[*iter].size() == 0) { IndependentGroups.erase(*iter); } } #if DEBUG PrintDebug("Investigated independent groups:\n"); for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { DebugDump(iter->second, " group: "); } #endif }
Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) { // Find the inner blocks in this loop. Proceed backwards from the entries until // you reach a seen block, collecting as you go. BlockSet InnerBlocks; BlockSet Queue = Entries; while (Queue.size() > 0) { Block *Curr = *(Queue.begin()); Queue.erase(Queue.begin()); if (!contains(InnerBlocks, Curr)) { // This element is new, mark it as inner and remove from outer InnerBlocks.insert(Curr); Blocks.erase(Curr); // Add the elements prior to it for (BlockSet::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) { Queue.insert(*iter); } #if 0 // Add elements it leads to, if they are dead ends. There is no reason not to hoist dead ends // into loops, as it can avoid multiple entries after the loop for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (Target->BranchesIn.size() <= 1 && Target->BranchesOut.size() == 0) { Queue.insert(Target); } } #endif } } assert(InnerBlocks.size() > 0); for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Possible = iter->first; if (!contains(InnerBlocks, Possible)) { NextEntries.insert(Possible); } } } #if 0 // We can avoid multiple next entries by hoisting them into the loop. if (NextEntries.size() > 1) { BlockBlockSetMap IndependentGroups; FindIndependentGroups(NextEntries, IndependentGroups, &InnerBlocks); while (IndependentGroups.size() > 0 && NextEntries.size() > 1) { Block *Min = NULL; int MinSize = 0; for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { Block *Entry = iter->first; BlockSet &Blocks = iter->second; if (!Min || Blocks.size() < MinSize) { // TODO: code size, not # of blocks Min = Entry; MinSize = Blocks.size(); } } // check how many new entries this would cause BlockSet &Hoisted = IndependentGroups[Min]; bool abort = false; for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end() && !abort; iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (Hoisted.find(Target) == Hoisted.end() && NextEntries.find(Target) == NextEntries.end()) { // abort this hoisting abort = true; break; } } } if (abort) { IndependentGroups.erase(Min); continue; } // hoist this entry PrintDebug("hoisting %d into loop\n", Min->Id); NextEntries.erase(Min); for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end(); iter++) { Block *Curr = *iter; InnerBlocks.insert(Curr); Blocks.erase(Curr); } IndependentGroups.erase(Min); } } #endif PrintDebug("creating loop block:\n"); DebugDump(InnerBlocks, " inner blocks:"); DebugDump(Entries, " inner entries:"); DebugDump(Blocks, " outer blocks:"); DebugDump(NextEntries, " outer entries:"); LoopShape *Loop = new LoopShape(); Notice(Loop); // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations) // A. Branches to the loop entries become a continue to this shape for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Solipsize(*iter, Branch::Continue, Loop, InnerBlocks); } // B. Branches to outside the loop (a next entry) become breaks on this shape for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) { Solipsize(*iter, Branch::Break, Loop, InnerBlocks); } // Finish up Shape *Inner = Process(InnerBlocks, Entries, NULL); Loop->Inner = Inner; return Loop; }