bool IndependentBlocks::runOnFunction(llvm::Function &F) { bool Changed = false; RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); SD = &getAnalysis<ScopDetection>(); SE = &getAnalysis<ScalarEvolution>(); AllocaBlock = &F.getEntryBlock(); DEBUG(dbgs() << "Run IndepBlock on " << F.getName() << '\n'); for (const Region *R : *SD) { Changed |= createIndependentBlocks(R); Changed |= eliminateDeadCode(R); // This may change the RegionTree. if (!DisableIntraScopScalarToArray || !PollyModelPHINodes) Changed |= splitExitBlock(const_cast<Region *>(R)); } DEBUG(dbgs() << "Before Scalar to Array------->\n"); DEBUG(F.dump()); if (!DisableIntraScopScalarToArray || !PollyModelPHINodes) for (const Region *R : *SD) Changed |= translateScalarToArray(R); DEBUG(dbgs() << "After Independent Blocks------------->\n"); DEBUG(F.dump()); verifyAnalysis(); return Changed; }
void md::incrementFunctionVersion(llvm::Function &fn) { unsigned newVersion = getFunctionVersion(fn) + 1; auto& ctx = fn.getContext(); ConstantInt* cNewVersion = ConstantInt::get(Type::getInt32Ty(ctx), newVersion); MDNode* versionNode = MDNode::get(ctx, ConstantAsMetadata::get(cNewVersion)); fn.setMetadata("fcd.funver", versionNode); }
bool DINOGlobal::shouldIgnoreFunction (const llvm::Function &F) { if (F.getName().find(DINOPrefix) == 0) { #ifdef DINO_VERBOSE llvm::outs() << "Skipping DINO function " << F.getName() << "\n"; #endif //DINO_VERBOSE return true; } return false; }
//------------------------------------------------------------------ /// Scan a function to see if any instructions are interesting /// /// @param[in] f /// The function to be inspected. /// /// @return /// False if there was an error scanning; true otherwise. //------------------------------------------------------------------ virtual bool InspectFunction(llvm::Function &f) { for (llvm::Function::iterator bbi = f.begin(), last_bbi = f.end(); bbi != last_bbi; ++bbi) { if (!InspectBasicBlock(*bbi)) return false; } return true; }
void AstBackEnd::runOnFunction(llvm::Function& fn) { grapher.reset(new AstGrapher); // Before doing anything, create statements for blocks in reverse post-order. This ensures that values exist // before they are used. (Post-order would try to use statements before they were created.) for (BasicBlock* block : ReversePostOrderTraversal<BasicBlock*>(&fn.getEntryBlock())) { grapher->createRegion(*block, *output->basicBlockToStatement(*block)); } // Identify loops, then visit basic blocks in post-order. If the basic block if the head // of a cyclic region, process the loop. Otherwise, if the basic block is the start of a single-entry-single-exit // region, process that region. auto& domTreeWrapper = getAnalysis<DominatorTreeWrapperPass>(fn); domTree = &domTreeWrapper.getDomTree(); postDomTree->recalculate(fn); RootedPostDominatorTree::treeFromIncompleteTree(fn, postDomTree); // Traverse graph in post-order. Try to detect regions with the post-dominator tree. // Cycles are only considered once. for (BasicBlock* entry : post_order(&fn.getEntryBlock())) { BasicBlock* postDominator = entry; while (postDominator != nullptr) { AstGraphNode* graphNode = grapher->getGraphNodeFromEntry(postDominator); BasicBlock* exit = graphNode->hasExit() ? graphNode->getExit() : postDominatorOf(*postDomTree, *postDominator); RegionType region = isRegion(*entry, exit); if (region == Acyclic) { runOnRegion(fn, *entry, exit); } else if (region == Cyclic) { runOnLoop(fn, *entry, exit); } if (!domTree->dominates(entry, exit)) { break; } postDominator = exit; } } Statement* bodyStatement = grapher->getGraphNodeFromEntry(&fn.getEntryBlock())->node; output->setBody(bodyStatement); }
bool BitcastCallEliminator::runOnFunction(llvm::Function &function) { bool res = false; bool doLoop = true; while (doLoop) { doLoop = false; for (llvm::Function::iterator i = function.begin(), e = function.end(); i != e; ++i) { llvm::BasicBlock *bb = &*i; bool bbchanged = false; for (llvm::BasicBlock::iterator ibb = bb->begin(), ebb = bb->end(); ibb != ebb; ++ibb) { llvm::Instruction *inst = &*ibb; if (llvm::isa<llvm::CallInst>(inst) && llvm::cast<llvm::CallInst>(inst)->getCalledFunction() == NULL) { llvm::CallInst *callInst = llvm::cast<llvm::CallInst>(inst); llvm::Value *calledValue = callInst->getCalledValue(); llvm::Value *bareCalledValue = calledValue->stripPointerCasts(); if (llvm::isa<llvm::Function>(bareCalledValue)) { const llvm::FunctionType *calledType = llvm::cast<llvm::FunctionType>(llvm::cast<llvm::PointerType>(calledValue->getType())->getContainedType(0)); const llvm::FunctionType *calleeType = llvm::cast<llvm::Function>(bareCalledValue)->getFunctionType(); if (calledType->getReturnType() == calleeType->getReturnType()) { if (argsMatch(calleeType, callInst)) { std::vector<llvm::Value*> args; unsigned int numArgs = callInst->getNumArgOperands(); for (unsigned int k = 0; k < numArgs; ++k) { args.push_back(callInst->getArgOperand(k)); } #if LLVM_VERSION < VERSION(3, 0) llvm::CallInst *newCall = llvm::CallInst::Create(bareCalledValue, args.begin(), args.end(), "", inst); #else llvm::CallInst *newCall = llvm::CallInst::Create(bareCalledValue, args, "", inst); #endif inst->replaceAllUsesWith(newCall); llvm::StringRef name = inst->getName(); inst->eraseFromParent(); newCall->setName(name); res = true; doLoop = true; bbchanged = true; } } } } if (bbchanged) { break; } } } } return res; }
bool InstructionCount::runOnFunction(llvm::Function &Fun) { ICount = 0; // A llvm::Function is just a list of llvm::BasicBlock. In order to get // instruction count we can visit all llvm::BasicBlocks ... for(llvm::Function::const_iterator I = Fun.begin(), E = Fun.end(); I != E; ++I) // ... and sum the llvm::BasicBlock size -- A llvm::BasicBlock size is just // a list of instructions! ICount += I->size(); return false; }
bool Unboxing::runOnFunction(llvm::Function & f) { //std::cout << "running Unboxing optimization..." << std::endl; m = reinterpret_cast<RiftModule*>(f.getParent()); ta = &getAnalysis<TypeAnalysis>(); for (auto & b : f) { auto i = b.begin(); while (i != b.end()) { ins = i; bool erase = false; if (CallInst * ci = dyn_cast<CallInst>(ins)) { StringRef s = ci->getCalledFunction()->getName(); if (s == "genericAdd") { erase = genericArithmetic(Instruction::FAdd); } else if (s == "genericSub") { erase = genericArithmetic(Instruction::FSub); } else if (s == "genericMul") { erase = genericArithmetic(Instruction::FMul); } else if (s == "genericDiv") { erase = genericArithmetic(Instruction::FDiv); } else if (s == "genericLt") { erase = genericRelational(FCmpInst::FCMP_OLT); } else if (s == "genericGt") { erase = genericRelational(FCmpInst::FCMP_OGT); } else if (s == "genericEq") { erase = genericRelational(FCmpInst::FCMP_OEQ); } else if (s == "genericNeq") { erase = genericRelational(FCmpInst::FCMP_ONE); } else if (s == "genericGetElement") { erase = genericGetElement(); } } if (erase) { llvm::Instruction * v = i; ++i; state().erase(v); v->eraseFromParent(); } else { ++i; } } } if (DEBUG) { cout << "After unboxing optimization: --------------------------------" << endl; f.dump(); state().print(cout); } return false; }
bool CLIPSFunctionPass::runOnFunction(llvm::Function& function) { if(!function.isDeclaration()) { void* env = getEnvironment(); CLIPSEnvironment* clEnv = new CLIPSEnvironment(env); EnvReset(env); CLIPSPassHeader* header = (CLIPSPassHeader*)getIndirectPassHeader(); char* passes = CharBuffer(strlen(header->getPasses()) + 64); sprintf(passes,"(passes %s)", header->getPasses()); EnvAssertString(env, passes); free(passes); KnowledgeConstructor tmp; if(header->needsLoops() && header->needsRegions()) { llvm::LoopInfo& li = getAnalysis<LoopInfo>(); llvm::RegionInfo& ri = getAnalysis<RegionInfo>(); tmp.route(function, li, ri); } else if(header->needsLoops() && !header->needsRegions()) { llvm::LoopInfo& li = getAnalysis<LoopInfo>(); tmp.route(function, li); } else if(header->needsRegions() && !header->needsLoops()) { llvm::RegionInfo& ri = getAnalysis<RegionInfo>(); tmp.route(function, ri); } else { tmp.route(function); } clEnv->makeInstances((char*)tmp.getInstancesAsString().c_str()); //TODO: put in the line to build the actual knowledge EnvRun(env, -1L); //it's up to the code in the expert system to make changes EnvReset(env); return true; } else { return false; } }
bool ArgumentRenamePass::runOnFunction(llvm::Function &F) { for (auto &A : F.args()) { A.setName("arg." + A.getName()); } return true; }
bool BasicFunctionPass::runOnFunction(llvm::Function& function) { RemoveRedundantCallsToSetVisibility optimize_set_visibility; bool changed = false; for (auto& block : function.getBasicBlockList()) { bool block_changed = optimize_set_visibility.runOnBasicBlock(block); changed = changed || block_changed; } return changed; }
void emitValidRemarks(const llvm::Function &F, const Region *R) { LLVMContext &Ctx = F.getContext(); DebugLoc Begin, End; getDebugLocations(R, Begin, End); emitOptimizationRemark(Ctx, DEBUG_TYPE, F, Begin, "A valid Scop begins here."); emitOptimizationRemark(Ctx, DEBUG_TYPE, F, End, "A valid Scop ends here."); }
std::vector<llvm::BasicBlock*> BasicBlockSorter::sortBasicBlocks(llvm::Function &function) { std::vector<llvm::BasicBlock*> ret; std::set<llvm::BasicBlock*> visited; llvm::BasicBlock &entryBlock = function.getEntryBlock(); visitBasicBlock(ret, visited, &entryBlock); return ret; }
void createPrimitiveDestructor(Module& module, const SEM::TypeInstance* const typeInstance, llvm::Function& llvmFunction) { assert(llvmFunction.isDeclaration()); Function functionGenerator(module, llvmFunction, destructorArgInfo(module, *typeInstance), &(module.templateBuilder(TemplatedObject::TypeInstance(typeInstance)))); const auto debugInfo = genDebugDestructorFunction(module, *typeInstance, &llvmFunction); functionGenerator.attachDebugInfo(debugInfo); functionGenerator.setDebugPosition(getDebugDestructorPosition(module, *typeInstance)); genPrimitiveDestructorCall(functionGenerator, typeInstance->selfType(), functionGenerator.getRawContextValue()); functionGenerator.getBuilder().CreateRetVoid(); functionGenerator.verify(); }
Function::Function(Module& pModule, llvm::Function& function, const ArgInfo& argInfo, TemplateBuilder* pTemplateBuilder) : module_(pModule), function_(function), entryBuilder_(pModule.getLLVMContext()), builder_(pModule.getLLVMContext()), createdEntryBlock_(false), useEntryBuilder_(false), argInfo_(argInfo), templateBuilder_(pTemplateBuilder), #if LOCIC_LLVM_VERSION < 307 debugInfo_(nullptr), #endif exceptionInfo_(nullptr), returnValuePtr_(nullptr), templateArgs_(nullptr), unwindState_(nullptr) { assert(function.isDeclaration()); assert(argInfo_.numArguments() == function_.getFunctionType()->getNumParams()); // Add a bottom level unwind stack. unwindStackStack_.push(UnwindStack()); // Add bottom level action for this function. unwindStack().push_back(UnwindAction::FunctionMarker()); const auto startBB = createBasicBlock(""); builder_.SetInsertPoint(startBB); argValues_.reserve(function_.arg_size()); for (auto arg = function_.arg_begin(); arg != function_.arg_end(); ++arg) { argValues_.push_back(arg); } std::vector<llvm_abi::Type*> argABITypes; argABITypes.reserve(argInfo.argumentTypes().size()); std::vector<llvm::Type*> argLLVMTypes; argLLVMTypes.reserve(argInfo.argumentTypes().size()); for (const auto& typePair : argInfo.argumentTypes()) { argABITypes.push_back(typePair.first); argLLVMTypes.push_back(typePair.second); } SetUseEntryBuilder useEntryBuilder(*this); // Decode arguments according to ABI. decodeABIValues(argValues_, argABITypes, argLLVMTypes); }
bool FunctionEraser::runOnFunction(llvm::Function &Fun) { // Get the analysis we need. Please notice that we are not giving any argument // to 'getAnalysis'. This is the correct way for requesting analysis from // 'llvm::FunctionPass'. If you need to get analysis information from a // 'llvm::ModulePass', then you have to give the Function you want to analyze // as 'getAnalysis' argument. InstructionCount &ICount = getAnalysis<InstructionCount>(); if(ICount.GetInstructionCount() < EraseThreshold) return false; // Apply the transformation by erasing function body. Fun.deleteBody(); // Update pass statistics. ++ErasedFunctions; return true; }
bool MemoryAnalyzer::runOnFunction(llvm::Function &function) { m_globals.clear(); m_map.clear(); m_mayZap.clear(); llvm::Module *module = function.getParent(); for (llvm::Module::global_iterator global = module->global_begin(), globale = module->global_end(); global != globale; ++global) { const llvm::Type *globalType = llvm::cast<llvm::PointerType>(global->getType())->getContainedType(0); if (llvm::isa<llvm::IntegerType>(globalType)) { m_globals.insert(&*global); } } #if LLVM_VERSION < VERSION(3, 8) m_aa = &getAnalysis<llvm::AliasAnalysis>(); #else m_aa = &getAnalysis<llvm::AAResultsWrapperPass>().getAAResults(); #endif visit(function); return false; }
void emitRejectionRemarks(const llvm::Function &F, const RejectLog &Log) { LLVMContext &Ctx = F.getContext(); const Region *R = Log.region(); DebugLoc Begin, End; getDebugLocations(R, Begin, End); emitOptimizationRemarkMissed( Ctx, DEBUG_TYPE, F, Begin, "The following errors keep this region from being a Scop."); for (RejectReasonPtr RR : Log) { if (const DebugLoc &Loc = RR->getDebugLoc()) emitOptimizationRemarkMissed(Ctx, DEBUG_TYPE, F, Loc, RR->getEndUserMessage()); } emitOptimizationRemarkMissed(Ctx, DEBUG_TYPE, F, End, "Invalid Scop candidate ends here."); }
void JITEngine::optimizeFunction(llvm::Function &f) { f.viewCFG(); m_llvmFuncPassManager->run(f); f.viewCFG(); }
bool CallingConvention_AnyArch_AnyCC::analyzeFunction(ParameterRegistry ®istry, CallInformation &fillOut, llvm::Function &func) { if (!isFullDisassembly() || md::isPrototype(func)) { return false; } auto regs = &*func.arg_begin(); unordered_map<const TargetRegisterInfo*, ModRefInfo> resultMap; // Find all GEPs const auto& target = registry.getTargetInfo(); unordered_multimap<const TargetRegisterInfo*, User*> registerUsers; for (User* user : regs->users()) { if (const TargetRegisterInfo* maybeRegister = target.registerInfo(*user)) { const TargetRegisterInfo& registerInfo = target.largestOverlappingRegister(*maybeRegister); registerUsers.insert({®isterInfo, user}); } } // Find all users of these GEPs DominatorsPerRegister gepUsers; for (auto iter = registerUsers.begin(); iter != registerUsers.end(); iter++) { addAllUsers(*iter->second, iter->first, gepUsers); } DominatorTree& preDom = registry.getAnalysis<DominatorTreeWrapperPass>(func).getDomTree(); PostDominatorTree& postDom = registry.getAnalysis<PostDominatorTreeWrapperPass>(func).getPostDomTree(); // Add calls SmallVector<CallInst*, 8> calls; CallGraph& cg = registry.getAnalysis<CallGraphWrapperPass>().getCallGraph(); CallGraphNode* thisFunc = cg[&func]; for (const auto& pair : *thisFunc) { Function* callee = pair.second->getFunction(); if (const CallInformation* callInfo = registry.getCallInfo(*callee)) if (callInfo->getStage() == CallInformation::Completed) { // pair.first is a weak value handle and has a cast operator to get the pointee CallInst* caller = cast<CallInst>((Value*)pair.first); calls.push_back(caller); for (const auto& vi : *callInfo) { if (vi.type == ValueInformation::IntegerRegister) { gepUsers[vi.registerInfo].insert(caller); } } } } // Start out resultMap based on call dominance. Weed out calls until dominant call set has been established. // This map will be refined by results from mod/ref instruction analysis. The purpose is mainly to define // mod/ref behavior for registers that are used in callees of this function, but not in this function // directly. while (calls.size() > 0) { unordered_map<const TargetRegisterInfo*, unsigned> callResult; auto dominant = findDominantValues(preDom, calls); for (CallInst* call : dominant) { Function* callee = call->getCalledFunction(); for (const auto& pair : translateToModRef(*registry.getCallInfo(*callee))) { callResult[pair.first] |= pair.second; } calls.erase(find(calls.begin(), calls.end(), call)); } for (const auto& pair : callResult) { resultMap[pair.first] = static_cast<ModRefInfo>(pair.second); } } // Find the dominant use(s) auto preDominatingUses = gepUsers; for (auto& pair : preDominatingUses) { pair.second = findDominantValues(preDom, pair.second); } // Fill out ModRef use dictionary // (Ref info is incomplete) for (auto& pair : preDominatingUses) { ModRefInfo& r = resultMap[pair.first]; r = IncompleteRef; for (auto inst : pair.second) { if (isa<StoreInst>(inst)) { // If we see a dominant store, then the register is modified. r = MRI_Mod; break; } if (CallInst* call = dyn_cast<CallInst>(inst)) { // If the first user is a call, propagate its ModRef value. r = registry.getCallInfo(*call->getCalledFunction())->getRegisterModRef(*pair.first); break; } } } // Find post-dominating stores auto postDominatingUses = gepUsers; for (auto& pair : postDominatingUses) { const TargetRegisterInfo* key = pair.first; auto& set = pair.second; // remove non-Mod instructions for (auto iter = set.begin(); iter != set.end(); ) { if (isa<StoreInst>(*iter)) { iter++; continue; } else if (CallInst* call = dyn_cast<CallInst>(*iter)) { auto callee = call->getCalledFunction(); const auto& info = *registry.getCallInfo(*callee); if ((info.getRegisterModRef(*key) & MRI_Mod) == MRI_Mod) { iter++; continue; } } iter = set.erase(iter); } set = findDominantValues(postDom, set); } MemorySSA& mssa = *registry.getMemorySSA(func); // Walk up post-dominating uses until we get to liveOnEntry. for (auto& pair : postDominatingUses) { walkUpPostDominatingUse(target, mssa, preDominatingUses, postDominatingUses, resultMap, pair.first); } // Use resultMap to build call information. First, sort registers by their pointer order; this ensures stable // parameter order. // We have authoritative information on used parameters, but not on return values. Only register parameters in this // step. SmallVector<pair<const TargetRegisterInfo*, ModRefInfo>, 16> registers; copy(resultMap.begin(), resultMap.end(), registers.begin()); sort(registers.begin(), registers.end()); vector<const TargetRegisterInfo*> returns; for (const auto& pair : resultMap) { if (pair.second & MRI_Ref) { fillOut.addParameter(ValueInformation::IntegerRegister, pair.first); } if (pair.second & MRI_Mod) { returns.push_back(pair.first); } } // Check for used returns. for (const TargetRegisterInfo* reg : ipaFindUsedReturns(registry, func, returns)) { fillOut.addReturn(ValueInformation::IntegerRegister, reg); } return true; }
bool _runOnFunction(llvm::Function& f) { Timer _t2("(sum)"); Timer _t("initializing"); initialize(); _t.split("overhead"); // f.dump(); llvm::Module* cur_module = f.getParent(); #if LLVMREV < 217548 llvm::PassManager fake_pm; #else llvm::legacy::PassManager fake_pm; #endif llvm::InlineCostAnalysis* cost_analysis = new llvm::InlineCostAnalysis(); fake_pm.add(cost_analysis); // llvm::errs() << "doing fake run\n"; fake_pm.run(*fake_module); // llvm::errs() << "done with fake run\n"; bool did_any_inlining = false; // TODO I haven't gotten the callgraph-updating part of the inliner to work, // so it's not easy to tell what callsites have been inlined into (ie added to) // the function. // One simple-but-not-great way to handle it is to just iterate over the entire function // multiple times and re-inline things until we don't want to inline any more; // NPASSES controls the maximum number of times to attempt that. // Right now we actually don't need that, since we only inline fully-optimized // functions (from the stdlib), and those will already have had inlining // applied recursively. const int NPASSES = 1; for (int passnum = 0; passnum < NPASSES; passnum++) { _t.split("collecting calls"); std::vector<llvm::CallSite> calls; for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) { llvm::CallInst* call = llvm::dyn_cast<llvm::CallInst>(&(*I)); // From Inliner.cpp: if (!call || llvm::isa<llvm::IntrinsicInst>(call)) continue; // I->dump(); llvm::CallSite CS(call); llvm::Value* v = CS.getCalledValue(); llvm::ConstantExpr* ce = llvm::dyn_cast<llvm::ConstantExpr>(v); if (!ce) continue; assert(ce->isCast()); llvm::ConstantInt* l_addr = llvm::cast<llvm::ConstantInt>(ce->getOperand(0)); int64_t addr = l_addr->getSExtValue(); if (addr == (int64_t)printf) continue; llvm::Function* f = g.func_addr_registry.getLLVMFuncAtAddress((void*)addr); if (f == NULL) { if (VERBOSITY()) { printf("Giving up on inlining %s:\n", g.func_addr_registry.getFuncNameAtAddress((void*)addr, true).c_str()); call->dump(); } continue; } // We load the bitcode lazily, so check if we haven't yet fully loaded the function: if (f->isMaterializable()) { #if LLVMREV < 220600 f->Materialize(); #else f->materialize(); #endif } // It could still be a declaration, though I think the code won't generate this case any more: if (f->isDeclaration()) continue; // Keep this section as a release_assert since the code-to-be-inlined, as well as the inlining // decisions, can be different in release mode: int op_idx = -1; for (llvm::Argument& arg : f->args()) { ++op_idx; llvm::Type* op_type = call->getOperand(op_idx)->getType(); if (arg.getType() != op_type) { llvm::errs() << f->getName() << " has arg " << op_idx << " mismatched!\n"; llvm::errs() << "Given "; op_type->dump(); llvm::errs() << " but underlying function expected "; arg.getType()->dump(); llvm::errs() << '\n'; } RELEASE_ASSERT(arg.getType() == call->getOperand(op_idx)->getType(), ""); } assert(!f->isDeclaration()); CS.setCalledFunction(f); calls.push_back(CS); } // assert(0 && "TODO"); // printf("%ld\n", calls.size()); bool did_inline = false; _t.split("doing inlining"); while (calls.size()) { llvm::CallSite cs = calls.back(); calls.pop_back(); // if (VERBOSITY("irgen.inlining") >= 1) { // llvm::errs() << "Evaluating callsite "; // cs->dump(); //} llvm::InlineCost IC = cost_analysis->getInlineCost(cs, threshold); bool do_inline = false; if (IC.isAlways()) { if (VERBOSITY("irgen.inlining") >= 2) llvm::errs() << "always inline\n"; do_inline = true; } else if (IC.isNever()) { if (VERBOSITY("irgen.inlining") >= 2) llvm::errs() << "never inline\n"; do_inline = false; } else { if (VERBOSITY("irgen.inlining") >= 2) llvm::errs() << "Inline cost: " << IC.getCost() << '\n'; do_inline = (bool)IC; } if (VERBOSITY("irgen.inlining") >= 1) { if (!do_inline) llvm::outs() << "not "; llvm::outs() << "inlining "; cs->dump(); } if (do_inline) { static StatCounter num_inlines("num_inlines"); num_inlines.log(); // llvm::CallGraph cg(*f.getParent()); ////cg.addToCallGraph(cs->getCalledFunction()); // llvm::InlineFunctionInfo InlineInfo(&cg); llvm::InlineFunctionInfo InlineInfo; bool inlined = llvm::InlineFunction(cs, InlineInfo, false); did_inline = did_inline || inlined; did_any_inlining = did_any_inlining || inlined; // if (inlined) // f.dump(); } } if (!did_inline) { if (passnum >= NPASSES - 1 && VERBOSITY("irgen.inlining")) printf("quitting after %d passes\n", passnum + 1); break; } } // TODO would be nice to break out here and not have to rematerialize the function; // I think I have to do that even if no inlining happened from the "setCalledFunction" call above. // I thought that'd just change the CS object, but maybe it changes the underlying instruction as well? // if (!did_any_inlining) // return false; _t.split("remapping"); llvm::ValueToValueMapTy VMap; for (llvm::Function::iterator I = f.begin(), E = f.end(); I != E; ++I) { VMap[I] = I; } MyMaterializer materializer(cur_module); for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) { RemapInstruction(&(*I), VMap, llvm::RF_None, NULL, &materializer); } _t.split("cleaning up"); std::vector<llvm::GlobalValue*> to_remove; for (llvm::Module::global_iterator I = cur_module->global_begin(), E = cur_module->global_end(); I != E; ++I) { if (I->use_empty()) { to_remove.push_back(I); continue; } } for (int i = 0; i < to_remove.size(); i++) { to_remove[i]->eraseFromParent(); } for (llvm::Module::iterator I = cur_module->begin(), E = cur_module->end(); I != E;) { if (!I->isDeclaration()) { ++I; continue; } if (I->use_empty()) { I = cur_module->getFunctionList().erase(I); } else { ++I; } } return did_any_inlining; }
bool NullDerefProtectionTransformer::runOnFunction(llvm::Function &F) { llvm::IRBuilder<> TheBuilder(F.getContext()); Builder = &TheBuilder; std::vector<llvm::Instruction*> WorkList; for (llvm::inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) { llvm::Instruction* I = &*i; if (llvm::isa<llvm::LoadInst>(I)) WorkList.push_back(I); } for (std::vector<llvm::Instruction*>::iterator i = WorkList.begin(), e = WorkList.end(); i != e; ++i) { Inst = *i; llvm::LoadInst* I = llvm::cast<llvm::LoadInst>(*i); // Find all the instructions that uses the instruction I. for (llvm::Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; ++UI) { // Check whether I is used as the first argument for a load instruction. // If it is, then instrument the load instruction. if (llvm::LoadInst* LI = llvm::dyn_cast<llvm::LoadInst>(*UI)) { llvm::Value* Arg = LI->getOperand(0); if (Arg == I) instrumentInst(LI, Arg); } // Check whether I is used as the second argument for a store // instruction. If it is, then instrument the store instruction. else if (llvm::StoreInst* SI = llvm::dyn_cast<llvm::StoreInst>(*UI)) { llvm::Value* Arg = SI->getOperand(1); if (Arg == I) instrumentInst(SI, Arg); } // Check whether I is used as the first argument for a GEP instruction. // If it is, then instrument the GEP instruction. else if (llvm::GetElementPtrInst* GEP = llvm::dyn_cast< llvm::GetElementPtrInst>(*UI)) { llvm::Value* Arg = GEP->getOperand(0); if (Arg == I) instrumentInst(GEP, Arg); } else { // Check whether I is used as the first argument for a call instruction. // If it is, then instrument the call instruction. llvm::CallSite CS(*UI); if (CS) { llvm::CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); if (i != e) { llvm::Value *Arg = *i; if (Arg == I) instrumentInst(CS.getInstruction(), Arg); } } } } } return true; }
bool TypeAnalysis::runOnFunction(llvm::Function & f) { state.clear(); if (DEBUG) std::cout << "runnning TypeAnalysis..." << std::endl; // for all basic blocks, for all instructions do { // cout << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" << endl; state.iterationStart(); for (auto & b : f) { for (auto & i : b) { if (CallInst * ci = dyn_cast<CallInst>(&i)) { StringRef s = ci->getCalledFunction()->getName(); if (s == "doubleVectorLiteral") { // when creating literal from a double, it is // always double scalar llvm::Value * op = ci->getOperand(0); AType * t = new AType(AType::Kind::D); state.update(op, t); state.update(ci, new AType(AType::Kind::DV, t)); } else if (s == "characterVectorLiteral") { state.update(ci, new AType(AType::Kind::CV)); } else if (s == "fromDoubleVector") { state.update(ci, new AType(AType::Kind::R, state.get(ci->getOperand(0)))); } else if (s == "fromCharacterVector") { state.update(ci, new AType(AType::Kind::R, state.get(ci->getOperand(0)))); } else if (s == "fromFunction") { state.update(ci, new AType(AType::Kind::R, state.get(ci->getOperand(0)))); } else if (s == "genericGetElement") { genericGetElement(ci); } else if (s == "genericSetElement") { // don't do anything for set element as it does not // produce any new value } else if (s == "genericAdd") { genericArithmetic(ci); } else if (s == "genericSub") { genericArithmetic(ci); } else if (s == "genericMul") { genericArithmetic(ci); } else if (s == "genericDiv") { genericArithmetic(ci); } else if (s == "genericEq") { genericRelational(ci); } else if (s == "genericNeq") { genericRelational(ci); } else if (s == "genericLt") { genericRelational(ci); } else if (s == "genericGt") { genericRelational(ci); } else if (s == "length") { // result of length operation is always // double scalar state.update(ci, new AType(AType::Kind::D)); } else if (s == "type") { // result of type operation is always // character vector state.update(ci, new AType(AType::Kind::CV)); } else if (s == "c") { // make sure the types to c are correct AType * t1 = state.get(ci->getArgOperand(1)); for (unsigned i = 2; i < ci->getNumArgOperands(); ++i) t1 = t1->merge(state.get(ci->getArgOperand(i))); if (t1->isScalar()) // concatenation of scalars is a vector t1 = new AType(AType::Kind::R, AType::Kind::DV); state.update(ci, t1); } else if (s == "genericEval") { state.update(ci, new AType(AType::Kind::R)); } else if (s == "envGet") { state.update(ci, new AType(AType::Kind::R)); } } else if (PHINode * phi = dyn_cast<PHINode>(&i)) { AType * first = state.get(phi->getOperand(0)); AType * second = state.get(phi->getOperand(1)); AType * result = first->merge(second); state.update(phi, result); } } } } while (!state.hasReachedFixpoint()); if (DEBUG) { f.dump(); cout << state << endl; } return false; }
// Propagate conditions bool ConditionPropagator::runOnFunction(llvm::Function &F) { m_map.clear(); llvm::SmallVector<std::pair<const llvm::BasicBlock*, const llvm::BasicBlock*>, 32> backedgesVector; llvm::FindFunctionBackedges(F, backedgesVector); std::set<std::pair<const llvm::BasicBlock*, const llvm::BasicBlock*> > backedges; backedges.insert(backedgesVector.begin(), backedgesVector.end()); if (m_debug) { std::cout << "========================================" << std::endl; } for (llvm::Function::iterator bbi = F.begin(), bbe = F.end(); bbi != bbe; ++bbi) { llvm::BasicBlock *bb = &*bbi; std::set<llvm::BasicBlock*> preds; for (llvm::Function::iterator tmpi = F.begin(), tmpe = F.end(); tmpi != tmpe; ++tmpi) { if (isPred(&*tmpi, bb) && backedges.find(std::make_pair(&*tmpi, bb)) == backedges.end()) { if (m_debug) { std::cout << bb->getName().str() << " has non-backedge predecessor " << tmpi->getName().str() << std::endl; } preds.insert(&*tmpi); } } std::set<llvm::Value*> trueSet; std::set<llvm::Value*> falseSet; bool haveStarted = false; for (std::set<llvm::BasicBlock*>::iterator i = preds.begin(), e = preds.end(); i != e; ++i) { TrueFalseMap::iterator it = m_map.find(*i); if (it == m_map.end()) { std::cerr << "Did not find condition information for predecessor " << (*i)->getName().str() << "!" << std::endl; exit(99999); } if (!haveStarted) { trueSet = it->second.first; falseSet = it->second.second; haveStarted = true; } else { // intersect trueSet = intersect(trueSet, it->second.first); falseSet = intersect(falseSet, it->second.second); } } if (preds.size() == 1) { llvm::BasicBlock *pred = *(preds.begin()); // branch condition! if (!m_onlyLoopConditions || m_lcbs.find(pred) != m_lcbs.end()) { llvm::TerminatorInst *termi = pred->getTerminator(); if (llvm::isa<llvm::BranchInst>(termi)) { llvm::BranchInst *br = llvm::cast<llvm::BranchInst>(termi); if (br->isConditional()) { if (br->getSuccessor(0) == bb) { // branch on true trueSet.insert(br->getCondition()); } else { // branch on false falseSet.insert(br->getCondition()); } } } } // assumes! if (!m_onlyLoopConditions) { for (llvm::BasicBlock::iterator insti = pred->begin(), inste = pred->end(); insti != inste; ++insti) { if (llvm::isa<llvm::CallInst>(insti)) { llvm::CallInst *ci = llvm::cast<llvm::CallInst>(insti); llvm::Function *calledFunction = ci->getCalledFunction(); if (calledFunction != NULL) { std::string functionName = calledFunction->getName().str(); if (functionName == "__kittel_assume") { llvm::CallSite callSite(ci); trueSet.insert(callSite.getArgument(0)); } } } } } } if (m_debug) { std::cout << "In " << bb->getName().str() << ":" << std::endl; std::cout << "TRUE: "; printSet(trueSet); std::cout << std::endl; std::cout << "FALSE: "; printSet(falseSet); std::cout << std::endl; if (++bbi != bbe) { std::cout << std::endl; } --bbi; } m_map.insert(std::make_pair(bb, std::make_pair(trueSet, falseSet))); } return false; }
void JitEventListener::NotifyFunctionEmitted(const llvm::Function &f, void *, size_t, const llvm::JITEventListener::EmittedFunctionDetails &) { if(f.getName().find("__cling_Un1Qu3")!=llvm::StringRef::npos) emit aboutToExecWrappedFunction(); }