// Instrumenting some of the accesses may be proven redundant. // Currently handled: // - read-before-write (within same BB, no calls between) // // We do not handle some of the patterns that should not survive // after the classic compiler optimizations. // E.g. two reads from the same temp should be eliminated by CSE, // two writes should be eliminated by DSE, etc. // // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( SmallVectorImpl<Instruction*> &Local, SmallVectorImpl<Instruction*> &All) { SmallSet<Value*, 8> WriteTargets; // Iterate from the end. for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), E = Local.rend(); It != E; ++It) { Instruction *I = *It; if (StoreInst *Store = dyn_cast<StoreInst>(I)) { WriteTargets.insert(Store->getPointerOperand()); } else { LoadInst *Load = cast<LoadInst>(I); Value *Addr = Load->getPointerOperand(); if (WriteTargets.count(Addr)) { // We will write to this temp, so no reason to analyze the read. NumOmittedReadsBeforeWrite++; continue; } if (addrPointsToConstantData(Addr)) { // Addr points to some constant data -- it can not race with any writes. continue; } } All.push_back(I); } Local.clear(); }
static void filterForDiscriminator(SmallVectorImpl<Result> &results, DebuggerClient *debugClient) { Identifier discriminator = debugClient->getPreferredPrivateDiscriminator(); if (discriminator.empty()) return; auto doesNotMatch = [discriminator](Result next) -> bool { return !matchesDiscriminator(discriminator, next); }; auto lastMatchIter = std::find_if_not(results.rbegin(), results.rend(), doesNotMatch); if (lastMatchIter == results.rend()) return; Result lastMatch = *lastMatchIter; auto newEnd = std::remove_if(results.begin(), lastMatchIter.base()-1, doesNotMatch); results.erase(newEnd, results.end()); results.push_back(lastMatch); }
// Instrumenting some of the accesses may be proven redundant. // Currently handled: // - read-before-write (within same BB, no calls between) // - not captured variables // // We do not handle some of the patterns that should not survive // after the classic compiler optimizations. // E.g. two reads from the same temp should be eliminated by CSE, // two writes should be eliminated by DSE, etc. // // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, const DataLayout &DL) { SmallSet<Value*, 8> WriteTargets; // Iterate from the end. for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), E = Local.rend(); It != E; ++It) { Instruction *I = *It; if (StoreInst *Store = dyn_cast<StoreInst>(I)) { Value *Addr = Store->getPointerOperand(); if (!shouldInstrumentReadWriteFromAddress(Addr)) continue; WriteTargets.insert(Addr); } else { LoadInst *Load = cast<LoadInst>(I); Value *Addr = Load->getPointerOperand(); if (!shouldInstrumentReadWriteFromAddress(Addr)) continue; if (WriteTargets.count(Addr)) { // We will write to this temp, so no reason to analyze the read. NumOmittedReadsBeforeWrite++; continue; } if (addrPointsToConstantData(Addr)) { // Addr points to some constant data -- it can not race with any writes. continue; } } Value *Addr = isa<StoreInst>(*I) ? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(I)->getPointerOperand(); if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && !PointerMayBeCaptured(Addr, true, true)) { // The variable is addressable but not captured, so it cannot be // referenced from a different thread and participate in a data race // (see llvm/Analysis/CaptureTracking.h for details). NumOmittedNonCaptured++; continue; } All.push_back(I); } Local.clear(); }
/// iterate - Repeatedly update the Hopfield nodes until stability or the /// maximum number of iterations is reached. /// @param Linked - Numbers of linked nodes that need updating. void SpillPlacement::iterate(const SmallVectorImpl<unsigned> &Linked) { DEBUG(dbgs() << "Iterating over " << Linked.size() << " linked nodes:\n"); if (Linked.empty()) return; // Run up to 10 iterations. The edge bundle numbering is closely related to // basic block numbering, so there is a strong tendency towards chains of // linked nodes with sequential numbers. By scanning the linked nodes // backwards and forwards, we make it very likely that a single node can // affect the entire network in a single iteration. That means very fast // convergence, usually in a single iteration. for (unsigned iteration = 0; iteration != 10; ++iteration) { // Scan backwards, skipping the last node which was just updated. bool Changed = false; for (SmallVectorImpl<unsigned>::const_reverse_iterator I = llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) { unsigned n = *I; bool C = nodes[n].update(nodes); Changed |= C; DEBUG(dbgs() << " \\EB#" << n << format(" = %+2.0f", nodes[n].Value) << (C ? " *\n" : "\n")); } if (!Changed) return; // Scan forwards, skipping the first node which was just updated. Changed = false; for (SmallVectorImpl<unsigned>::const_iterator I = llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) { unsigned n = *I; bool C = nodes[n].update(nodes); Changed |= C; DEBUG(dbgs() << " /EB#" << n << format(" = %+2.0f", nodes[n].Value) << (C ? " *\n" : "\n")); } if (!Changed) return; } }