void PeelIteration::setExitingStore(void* S, ShadowBBInvar* BBI, const ShadowLoopInvar* exitLoop, StoreKind kind) { PeelAttempt* LPA; // Defer to child loop iterations? if(BBI->naturalScope != L && (LPA = getPeelAttempt(immediateChildLoop(L, BBI->naturalScope))) && LPA->isTerminated()) { for(uint32_t i = 0, ilim = LPA->Iterations.size(); i != ilim; ++i) LPA->Iterations[i]->setExitingStore(S, BBI, exitLoop, kind); return; } // For each live edge leaving the loop, replace the exiting block's store with S. ShadowBB* ExitingBB = getBB(*BBI); if(!ExitingBB) return; uint32_t exitingEdges = 0; for(uint32_t i = 0, ilim = BBI->succIdxs.size(); i != ilim; ++i) { ShadowBBInvar* ExitedBBI = getBBInvar(BBI->succIdxs[i]); if(ExitingBB->succsAlive[i] && ((!ExitedBBI->naturalScope) || !exitLoop->contains(ExitedBBI->naturalScope))) { ++exitingEdges; } } if(kind == StoreKindTL) { for(uint32_t i = 0; i != exitingEdges; ++i) { SAFE_DROP_REF(ExitingBB->tlStore); ((TLLocalStore*)S)->refCount++; } ExitingBB->tlStore = (TLLocalStore*)S; } else if(kind == StoreKindDSE) { for(uint32_t i = 0; i != exitingEdges; ++i) { SAFE_DROP_REF(ExitingBB->dseStore); ((DSELocalStore*)S)->refCount++; } ExitingBB->dseStore = (DSELocalStore*)S; } }
// Like analyse(), but used from sharing pathways when we're sure none of the functions need re-evaluating. // We really only want to recreate its effects on the store. void IntegrationAttempt::execute(uint32_t new_stack_depth) { stack_depth = new_stack_depth; getInitialStore(false); for(uint32_t i = 0; i < nBBs; ++i) { if(!BBs[i]) continue; ShadowBB* BB = BBs[i]; ShadowBBInvar* BBI = BB->invar; if(BBI->naturalScope != L) { PeelAttempt* LPA = getPeelAttempt(BBI->naturalScope); if(LPA && LPA->isTerminated()) { // Run each individual iteration for(std::vector<PeelIteration*>::iterator it = LPA->Iterations.begin(), itend = LPA->Iterations.end(); it != itend; ++it) { (*it)->execute(stack_depth); } } else { executeLoop(BBI->naturalScope); } // Skip blocks in this scope while(i < nBBs && BBI->naturalScope->contains(getBBInvar(i + BBsOffset)->naturalScope)) ++i; --i; } else { if(i != 0) { if(!doBlockStoreMerge(BB)) return; } executeBlock(BB); } } }
// Find a unique exiting edge from this loop iteration if one exists. Set bail if there are multiple exiting edges; // return null if there are multiple or none. ShadowBB* PeelIteration::getUniqueExitingBlock2(ShadowBBInvar* BBI, const ShadowLoopInvar* exitLoop, bool& bail) { PeelAttempt* LPA; // Defer to child loop iteration? if(BBI->naturalScope != L && (LPA = getPeelAttempt(immediateChildLoop(L, BBI->naturalScope))) && LPA->isTerminated()) { return LPA->Iterations.back()->getUniqueExitingBlock2(BBI, exitLoop, bail); } // Find a unique exiting edge if there is one. ShadowBB* ExitingBB = getBB(*BBI); if(!ExitingBB) return 0; uint32_t exitingEdges = 0; for(uint32_t i = 0, ilim = BBI->succIdxs.size(); i != ilim && exitingEdges < 2; ++i) { ShadowBBInvar* ExitedBBI = getBBInvar(BBI->succIdxs[i]); if(ExitingBB->succsAlive[i] && ((!ExitedBBI->naturalScope) || !exitLoop->contains(ExitedBBI->naturalScope))) { ++exitingEdges; } } if(exitingEdges == 0) return 0; else if(exitingEdges == 1) return ExitingBB; else { bail = true; return 0; } }
bool IntegrationAttempt::analyseBlock(uint32_t& blockIdx, bool inLoopAnalyser, bool inAnyLoop, bool skipStoreMerge, const ShadowLoopInvar* MyL) { ShadowBB* BB = getBB(blockIdx); if(!BB) return false; bool anyChange = false; // Use natural scope rather than scope because even if a loop is // ignored we want to notice that it exists so we can call analyseLoop ShadowBBInvar* BBI = BB->invar; const ShadowLoopInvar* BBL = BBI->naturalScope; if(BBL != MyL) { BB->inAnyLoop = true; inAnyLoop = true; // By construction of our top-ordering, must be a loop entry block. release_assert(BBL && "Walked into root context?"); // Now explore the loop, if possible. // At the moment can't ever happen inside the loop analyser. PeelAttempt* LPA = 0; if((!inLoopAnalyser) && (LPA = getOrCreatePeelAttempt(BBL))) { // Give the preheader an extra reference in case we need that store // to calculate a general version of the loop body if it doesn't terminate. ShadowBB* PHBB = getBB(LPA->L->preheaderIdx); PHBB->refStores(); bool loopReadsTentativeData, loopContainsCheckedReads; LPA->analyse(stack_depth, loopReadsTentativeData, loopContainsCheckedReads); readsTentativeData |= loopReadsTentativeData; containsCheckedReads |= loopContainsCheckedReads; // We're certainly not in the loop analyser, so pick whether to keep a terminated // version of the loop now. if(LPA->isTerminated()) { LPA->findProfitableIntegration(); if(!LPA->isEnabled()) { // The preheader already has a copy of the TL and DSE stores // in case the loop didn't terminate -- give it to each exiting block. TLLocalStore* backupTlStore; bool dropTlRef; if(readsTentativeData) { backupTlStore = new TLLocalStore(stack_depth); backupTlStore->allOthersClobbered = true; dropTlRef = true; } else { backupTlStore = PHBB->tlStore; dropTlRef = false; } LPA->Iterations.back()->setExitingStores(backupTlStore, StoreKindTL); if(dropTlRef) backupTlStore->dropReference(); DSELocalStore* backupDSEStore = PHBB->dseStore; setAllNeededTop(backupDSEStore); DSELocalStore* emptyStore = new DSELocalStore(stack_depth); emptyStore->allOthersClobbered = true; LPA->Iterations.back()->setExitingStores(emptyStore, StoreKindDSE); emptyStore->dropReference(); } } if(LPA->isTerminated() && LPA->isEnabled()) { // Committed blocks in the iterations will be used; // next parent inherits them. inheritCommitBlocksAndFunctions(LPA->CommitBlocks, LPA->CommitFailedBlocks, LPA->CommitFunctions); } else { LPA->releaseCommittedChildren(); } } // Analyse for invariants if we didn't establish that the loop terminates. if((!LPA) || !LPA->isTerminated()) { anyChange |= analyseLoop(BBL, inLoopAnalyser); if(!inLoopAnalyser) { // Run other passes over the whole loop gatherIndirectUsersInLoop(BBL); findTentativeLoadsInUnboundedLoop(BBL, /* commit disabled here = */ false, /* second pass = */ false); tryKillStoresInUnboundedLoop(BBL, /* commit disabled here = */ false, /* disable writes = */ false); } } else { // The loop preheader's local store was copied by the loop analysis assuming we'd // need it to analyse the loop body, but we've found the loop terminates; drop the extra ref. // For the common case where the loop has a single known exit point, perform store simplifications. // These apply because the store was forked anticipating failure to establish an iteration count. ShadowBB* ExitingBlock = LPA->Iterations.back()->getUniqueExitingBlock(); std::vector<ShadowValue> simplifyStores; getBB(BBL->preheaderIdx)->derefStores(ExitingBlock ? &simplifyStores : 0); for(std::vector<ShadowValue>::iterator it = simplifyStores.begin(), itend = simplifyStores.end(); it != itend; ++it) { if(LocStore* LS = ExitingBlock->getReadableStoreFor(*it)) LocStore::simplifyStore(LS); } // Copy edges found always dead to local scope, to accelerate edgeIsDead queries without // checking every iteration every time. copyLoopExitingDeadEdges(LPA); // Take account of the number of live edges leaving the last iteration // when deciding which blocks are certain: // The -1 accounts for the header's incoming edge. pendingEdges += (LPA->Iterations.back()->pendingEdges - 1); LPA->Iterations.back()->pendingEdges = 0; } // Advance the main loop past this loop. Loop blocks are always contiguous in the topo ordering. while(blockIdx < invarInfo->BBs.size() && BBL->contains(getBBInvar(blockIdx)->naturalScope)) ++blockIdx; --blockIdx; return anyChange; } BB->inAnyLoop = inAnyLoop; if(!skipStoreMerge) { // Check if the block becomes a certainty (only applicable when not in a loop!) checkBlockStatus(BB, inLoopAnalyser); // Loop headers and entry blocks are given their stores in other ways // If doBlockStoreMerge returned false this block isn't currently reachable. // See comments in that function for reasons why that can happen. if(!doBlockStoreMerge(BB)) return false; if(!inLoopAnalyser) { doTLStoreMerge(BB); doDSEStoreMerge(BB); } } // As-expected checks may also be noted duirng analyseBlockInstructions: // they are cleared each time around because the flag might not make sense anymore if the instruction's // operands have degraded to the point that the instruction will no longer be resolved. // The noteAsExpected function here only tags those which are mentioned in path conditions. applyMemoryPathConditions(BB, inLoopAnalyser, inAnyLoop); clearAsExpectedChecks(BB); noteAsExpectedChecks(BB); if(!inLoopAnalyser) { //TLWalkPathConditions(BB, true, false); if(pass->countPathConditionsAtBlockStart(BB->invar, BB->IA)) { setAllNeededTop(BB->dseStore); BB->dseStore = BB->dseStore->getEmptyMap(); } } LFV3(errs() << nestingIndent() << "Start block " << BB->invar->BB->getName() << " store " << BB->localStore << " refcount " << BB->localStore->refCount << "\n"); // Else we should just analyse this block here. anyChange |= analyseBlockInstructions(BB, inLoopAnalyser, inAnyLoop); LFV3(errs() << nestingIndent() << "End block " << BB->invar->BB->getName() << " store " << BB->localStore << " refcount " << BB->localStore->refCount << "\n"); return anyChange; }