// VI is an in-loop instruction that uses Visitor.V, a loop invariant. Check whether any // iteration will require the value. Assume it is used if we're synthesising an unspecialised // copy of the loop for any reason. void PeelIteration::visitVariant(ShadowInstructionInvar* VI, DIVisitor& Visitor) { const ShadowLoopInvar* immediateChild = immediateChildLoop(L, VI->parent->outerScope); PeelAttempt* LPA = getPeelAttempt(immediateChild); if(LPA && LPA->isEnabled()) LPA->visitVariant(VI, Visitor); else Visitor.notifyUsersMissed(); }
void IntegrationAttempt::visitUser(ShadowInstIdx& User, DIVisitor& Visitor) { // Figure out what context cares about this value. The only possibilities are: this loop iteration, the next iteration of this loop (latch edge of header phi), // a child loop (defer to it to decide what to do), or a parent loop (again defer). // Note that nested cases (e.g. this is an invariant two children deep) are taken care of in the immediate child or parent's logic. if(User.blockIdx == INVALID_BLOCK_IDX || User.instIdx == INVALID_INSTRUCTION_IDX) return; ShadowInstructionInvar* SII = getInstInvar(User.blockIdx, User.instIdx); const ShadowLoopInvar* UserL = SII->parent->outerScope; if(UserL == L) { if(!visitNextIterationPHI(SII, Visitor)) { // Just an ordinary user in the same iteration (or out of any loop!). Visitor.visit(getInst(User.blockIdx, User.instIdx), this, User.blockIdx, User.instIdx); } } else { if((!L) || L->contains(UserL)) { const ShadowLoopInvar* outermostChildLoop = immediateChildLoop(L, UserL); // Used in a child loop. Check if that child exists at all and defer to it. PeelAttempt* LPA = getPeelAttempt(outermostChildLoop); if(LPA && LPA->isEnabled()) LPA->visitVariant(SII, Visitor); else if((!getBB(outermostChildLoop->headerIdx))) Visitor.visit(0, this, User.blockIdx, User.instIdx); // Loop not explored, but a failed version may exist else Visitor.notifyUsersMissed(); } else { visitExitPHI(SII, Visitor); } } }
bool IntegrationAttempt::analyseBlock(uint32_t& blockIdx, bool inLoopAnalyser, bool inAnyLoop, bool skipStoreMerge, const ShadowLoopInvar* MyL) { ShadowBB* BB = getBB(blockIdx); if(!BB) return false; bool anyChange = false; // Use natural scope rather than scope because even if a loop is // ignored we want to notice that it exists so we can call analyseLoop ShadowBBInvar* BBI = BB->invar; const ShadowLoopInvar* BBL = BBI->naturalScope; if(BBL != MyL) { BB->inAnyLoop = true; inAnyLoop = true; // By construction of our top-ordering, must be a loop entry block. release_assert(BBL && "Walked into root context?"); // Now explore the loop, if possible. // At the moment can't ever happen inside the loop analyser. PeelAttempt* LPA = 0; if((!inLoopAnalyser) && (LPA = getOrCreatePeelAttempt(BBL))) { // Give the preheader an extra reference in case we need that store // to calculate a general version of the loop body if it doesn't terminate. ShadowBB* PHBB = getBB(LPA->L->preheaderIdx); PHBB->refStores(); bool loopReadsTentativeData, loopContainsCheckedReads; LPA->analyse(stack_depth, loopReadsTentativeData, loopContainsCheckedReads); readsTentativeData |= loopReadsTentativeData; containsCheckedReads |= loopContainsCheckedReads; // We're certainly not in the loop analyser, so pick whether to keep a terminated // version of the loop now. if(LPA->isTerminated()) { LPA->findProfitableIntegration(); if(!LPA->isEnabled()) { // The preheader already has a copy of the TL and DSE stores // in case the loop didn't terminate -- give it to each exiting block. TLLocalStore* backupTlStore; bool dropTlRef; if(readsTentativeData) { backupTlStore = new TLLocalStore(stack_depth); backupTlStore->allOthersClobbered = true; dropTlRef = true; } else { backupTlStore = PHBB->tlStore; dropTlRef = false; } LPA->Iterations.back()->setExitingStores(backupTlStore, StoreKindTL); if(dropTlRef) backupTlStore->dropReference(); DSELocalStore* backupDSEStore = PHBB->dseStore; setAllNeededTop(backupDSEStore); DSELocalStore* emptyStore = new DSELocalStore(stack_depth); emptyStore->allOthersClobbered = true; LPA->Iterations.back()->setExitingStores(emptyStore, StoreKindDSE); emptyStore->dropReference(); } } if(LPA->isTerminated() && LPA->isEnabled()) { // Committed blocks in the iterations will be used; // next parent inherits them. inheritCommitBlocksAndFunctions(LPA->CommitBlocks, LPA->CommitFailedBlocks, LPA->CommitFunctions); } else { LPA->releaseCommittedChildren(); } } // Analyse for invariants if we didn't establish that the loop terminates. if((!LPA) || !LPA->isTerminated()) { anyChange |= analyseLoop(BBL, inLoopAnalyser); if(!inLoopAnalyser) { // Run other passes over the whole loop gatherIndirectUsersInLoop(BBL); findTentativeLoadsInUnboundedLoop(BBL, /* commit disabled here = */ false, /* second pass = */ false); tryKillStoresInUnboundedLoop(BBL, /* commit disabled here = */ false, /* disable writes = */ false); } } else { // The loop preheader's local store was copied by the loop analysis assuming we'd // need it to analyse the loop body, but we've found the loop terminates; drop the extra ref. // For the common case where the loop has a single known exit point, perform store simplifications. // These apply because the store was forked anticipating failure to establish an iteration count. ShadowBB* ExitingBlock = LPA->Iterations.back()->getUniqueExitingBlock(); std::vector<ShadowValue> simplifyStores; getBB(BBL->preheaderIdx)->derefStores(ExitingBlock ? &simplifyStores : 0); for(std::vector<ShadowValue>::iterator it = simplifyStores.begin(), itend = simplifyStores.end(); it != itend; ++it) { if(LocStore* LS = ExitingBlock->getReadableStoreFor(*it)) LocStore::simplifyStore(LS); } // Copy edges found always dead to local scope, to accelerate edgeIsDead queries without // checking every iteration every time. copyLoopExitingDeadEdges(LPA); // Take account of the number of live edges leaving the last iteration // when deciding which blocks are certain: // The -1 accounts for the header's incoming edge. pendingEdges += (LPA->Iterations.back()->pendingEdges - 1); LPA->Iterations.back()->pendingEdges = 0; } // Advance the main loop past this loop. Loop blocks are always contiguous in the topo ordering. while(blockIdx < invarInfo->BBs.size() && BBL->contains(getBBInvar(blockIdx)->naturalScope)) ++blockIdx; --blockIdx; return anyChange; } BB->inAnyLoop = inAnyLoop; if(!skipStoreMerge) { // Check if the block becomes a certainty (only applicable when not in a loop!) checkBlockStatus(BB, inLoopAnalyser); // Loop headers and entry blocks are given their stores in other ways // If doBlockStoreMerge returned false this block isn't currently reachable. // See comments in that function for reasons why that can happen. if(!doBlockStoreMerge(BB)) return false; if(!inLoopAnalyser) { doTLStoreMerge(BB); doDSEStoreMerge(BB); } } // As-expected checks may also be noted duirng analyseBlockInstructions: // they are cleared each time around because the flag might not make sense anymore if the instruction's // operands have degraded to the point that the instruction will no longer be resolved. // The noteAsExpected function here only tags those which are mentioned in path conditions. applyMemoryPathConditions(BB, inLoopAnalyser, inAnyLoop); clearAsExpectedChecks(BB); noteAsExpectedChecks(BB); if(!inLoopAnalyser) { //TLWalkPathConditions(BB, true, false); if(pass->countPathConditionsAtBlockStart(BB->invar, BB->IA)) { setAllNeededTop(BB->dseStore); BB->dseStore = BB->dseStore->getEmptyMap(); } } LFV3(errs() << nestingIndent() << "Start block " << BB->invar->BB->getName() << " store " << BB->localStore << " refcount " << BB->localStore->refCount << "\n"); // Else we should just analyse this block here. anyChange |= analyseBlockInstructions(BB, inLoopAnalyser, inAnyLoop); LFV3(errs() << nestingIndent() << "End block " << BB->invar->BB->getName() << " store " << BB->localStore << " refcount " << BB->localStore->refCount << "\n"); return anyChange; }