/// GetExceptionObject - Return the exception object from the value passed into /// the 'resume' instruction (typically an aggregate). Clean up any dead /// instructions, including the 'resume' instruction. Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) { Value *V = RI->getOperand(0); Value *ExnObj = nullptr; InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V); LoadInst *SelLoad = nullptr; InsertValueInst *ExcIVI = nullptr; bool EraseIVIs = false; if (SelIVI) { if (SelIVI->getNumIndices() == 1 && *SelIVI->idx_begin() == 1) { ExcIVI = dyn_cast<InsertValueInst>(SelIVI->getOperand(0)); if (ExcIVI && isa<UndefValue>(ExcIVI->getOperand(0)) && ExcIVI->getNumIndices() == 1 && *ExcIVI->idx_begin() == 0) { ExnObj = ExcIVI->getOperand(1); SelLoad = dyn_cast<LoadInst>(SelIVI->getOperand(1)); EraseIVIs = true; } } } if (!ExnObj) ExnObj = ExtractValueInst::Create(RI->getOperand(0), 0, "exn.obj", RI); RI->eraseFromParent(); if (EraseIVIs) { if (SelIVI->use_empty()) SelIVI->eraseFromParent(); if (ExcIVI->use_empty()) ExcIVI->eraseFromParent(); if (SelLoad && SelLoad->use_empty()) SelLoad->eraseFromParent(); } return ExnObj; }
/// PromoteAliasSet - Try to promote memory values to scalars by sinking /// stores out of the loop and moving loads to before the loop. We do this by /// looping over the stores in the loop, looking for stores to Must pointers /// which are loop invariant. /// void LICM::PromoteAliasSet(AliasSet &AS) { // We can promote this alias set if it has a store, if it is a "Must" alias // set, if the pointer is loop invariant, and if we are not eliminating any // volatile loads or stores. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) return; assert(!AS.empty() && "Must alias set should have at least one pointer element in it!"); Value *SomePtr = AS.begin()->getValue(); // It isn't safe to promote a load/store from the loop if the load/store is // conditional. For example, turning: // // for () { if (c) *P += 1; } // // into: // // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; // // is not safe, because *P may only be valid to access if 'c' is true. // // It is safe to promote P if all uses are direct load/stores and if at // least one is guaranteed to be executed. bool GuaranteedToExecute = false; SmallVector<Instruction*, 64> LoopUses; SmallPtrSet<Value*, 4> PointerMustAliases; // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. unsigned Alignment = 1; // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) { Value *ASIV = ASI->getValue(); PointerMustAliases.insert(ASIV); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. if (SomePtr->getType() != ASIV->getType()) return; for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end(); UI != UE; ++UI) { // Ignore instructions that are outside the loop. Instruction *Use = dyn_cast<Instruction>(*UI); if (!Use || !CurLoop->contains(Use)) continue; // If there is an non-load/store instruction in the loop, we can't promote // it. if (LoadInst *load = dyn_cast<LoadInst>(Use)) { assert(!load->isVolatile() && "AST broken"); if (!load->isSimple()) return; } else if (StoreInst *store = dyn_cast<StoreInst>(Use)) { // Stores *of* the pointer are not interesting, only stores *to* the // pointer. if (Use->getOperand(1) != ASIV) continue; assert(!store->isVolatile() && "AST broken"); if (!store->isSimple()) return; // Note that we only check GuaranteedToExecute inside the store case // so that we do not introduce stores where they did not exist before // (which would break the LLVM concurrency model). // If the alignment of this instruction allows us to specify a more // restrictive (and performant) alignment and if we are sure this // instruction will be executed, update the alignment. // Larger is better, with the exception of 0 being the best alignment. unsigned InstAlignment = store->getAlignment(); if ((InstAlignment > Alignment || InstAlignment == 0) && (Alignment != 0)) if (isGuaranteedToExecute(*Use)) { GuaranteedToExecute = true; Alignment = InstAlignment; } if (!GuaranteedToExecute) GuaranteedToExecute = isGuaranteedToExecute(*Use); } else return; // Not a load or store. LoopUses.push_back(Use); } } // If there isn't a guaranteed-to-execute instruction, we can't promote. if (!GuaranteedToExecute) return; // Otherwise, this is safe to promote, lets do it! DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n'); Changed = true; ++NumPromoted; // Grab a debug location for the inserted loads/stores; given that the // inserted loads/stores have little relation to the original loads/stores, // this code just arbitrarily picks a location from one, since any debug // location is better than none. DebugLoc DL = LoopUses[0]->getDebugLoc(); SmallVector<BasicBlock*, 8> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); // We use the SSAUpdater interface to insert phi nodes as required. SmallVector<PHINode*, 16> NewPHIs; SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, *CurAST, DL, Alignment); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. LoadInst *PreheaderLoad = new LoadInst(SomePtr, SomePtr->getName()+".promoted", Preheader->getTerminator()); PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DL); SSA.AddAvailableValue(Preheader, PreheaderLoad); // Rewrite all the loads in the loop and remember all the definitions from // stores in the loop. Promoter.run(LoopUses); // If the SSAUpdater didn't use the load in the preheader, just zap it now. if (PreheaderLoad->use_empty()) PreheaderLoad->eraseFromParent(); }
/// Attempt to merge an objc_release with a store, load, and objc_retain to form /// an objc_storeStrong. This can be a little tricky because the instructions /// don't always appear in order, and there may be unrelated intervening /// instructions. void ObjCARCContract::ContractRelease(Instruction *Release, inst_iterator &Iter) { LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release)); if (!Load || !Load->isSimple()) return; // For now, require everything to be in one basic block. BasicBlock *BB = Release->getParent(); if (Load->getParent() != BB) return; // Walk down to find the store and the release, which may be in either order. BasicBlock::iterator I = Load, End = BB->end(); ++I; AliasAnalysis::Location Loc = AA->getLocation(Load); StoreInst *Store = 0; bool SawRelease = false; for (; !Store || !SawRelease; ++I) { if (I == End) return; Instruction *Inst = I; if (Inst == Release) { SawRelease = true; continue; } InstructionClass Class = GetBasicInstructionClass(Inst); // Unrelated retains are harmless. if (IsRetain(Class)) continue; if (Store) { // The store is the point where we're going to put the objc_storeStrong, // so make sure there are no uses after it. if (CanUse(Inst, Load, PA, Class)) return; } else if (AA->getModRefInfo(Inst, Loc) & AliasAnalysis::Mod) { // We are moving the load down to the store, so check for anything // else which writes to the memory between the load and the store. Store = dyn_cast<StoreInst>(Inst); if (!Store || !Store->isSimple()) return; if (Store->getPointerOperand() != Loc.Ptr) return; } } Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand()); // Walk up to find the retain. I = Store; BasicBlock::iterator Begin = BB->begin(); while (I != Begin && GetBasicInstructionClass(I) != IC_Retain) --I; Instruction *Retain = I; if (GetBasicInstructionClass(Retain) != IC_Retain) return; if (GetObjCArg(Retain) != New) return; Changed = true; ++NumStoreStrongs; LLVMContext &C = Release->getContext(); Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); Type *I8XX = PointerType::getUnqual(I8X); Value *Args[] = { Load->getPointerOperand(), New }; if (Args[0]->getType() != I8XX) Args[0] = new BitCastInst(Args[0], I8XX, "", Store); if (Args[1]->getType() != I8X) Args[1] = new BitCastInst(Args[1], I8X, "", Store); CallInst *StoreStrong = CallInst::Create(getStoreStrongCallee(BB->getParent()->getParent()), Args, "", Store); StoreStrong->setDoesNotThrow(); StoreStrong->setDebugLoc(Store->getDebugLoc()); // We can't set the tail flag yet, because we haven't yet determined // whether there are any escaping allocas. Remember this call, so that // we can set the tail flag once we know it's safe. StoreStrongCalls.insert(StoreStrong); if (&*Iter == Store) ++Iter; Store->eraseFromParent(); Release->eraseFromParent(); EraseInstruction(Retain); if (Load->use_empty()) Load->eraseFromParent(); }
Function * futamurize( const Function * orig_func, DenseMap<const Value*, Value*> &argmap, std::set<const unsigned char *> &constant_addresses_set ) { LLVMContext &context = getGlobalContext(); // Make a copy of the function, removing constant arguments Function * specialized_func = CloneFunction( orig_func, argmap ); specialized_func->setName( orig_func->getNameStr() + "_1" ); // add it to our module LLVM_Module->getFunctionList().push_back( specialized_func ); printf("\nspecialized_func = %p <%s>\n", specialized_func, specialized_func->getName().data()); //~ specialized_func->dump(); // Optimize it FunctionPassManager PM( LLVM_Module ); createStandardFunctionPasses( &PM, 3 ); PM.add(createScalarReplAggregatesPass()); // Break up aggregate allocas PM.add(createInstructionCombiningPass()); // Cleanup for scalarrepl. PM.add(createJumpThreadingPass()); // Thread jumps. PM.add(createCFGSimplificationPass()); // Merge & remove BBs PM.add(createInstructionCombiningPass()); // Combine silly seq's PM.add(createTailCallEliminationPass()); // Eliminate tail calls PM.add(createCFGSimplificationPass()); // Merge & remove BBs PM.add(createReassociatePass()); // Reassociate expressions PM.add(createLoopRotatePass()); // Rotate Loop PM.add(createLICMPass()); // Hoist loop invariants PM.add(createLoopUnswitchPass( false )); PM.add(createInstructionCombiningPass()); PM.add(createIndVarSimplifyPass()); // Canonicalize indvars PM.add(createLoopDeletionPass()); // Delete dead loops PM.add(createLoopUnroll2Pass()); // Unroll small loops PM.add(createInstructionCombiningPass()); // Clean up after the unroller PM.add(createGVNPass()); // Remove redundancies PM.add(createMemCpyOptPass()); // Remove memcpy / form memset PM.add(createSCCPPass()); // Constant prop with SCCP PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); PM.add(new MemoryDependenceAnalysis()); //~ PM.add(createAAEvalPass()); const PassInfo * pinfo = Pass::lookupPassInfo( "print-alias-sets" ); if( !pinfo ) { printf( "print-alias-sets not found\n" ); exit(-1); } PM.add( pinfo->createPass() ); FunctionPassManager PM_Inline( LLVM_Module ); PM_Inline.add(createSingleFunctionInliningPass()); bool Changed = false; int iterations = 2; int inline_iterations = 6; do { Changed = false; // first do some optimizations PM.doInitialization(); PM.run( *specialized_func ); PM.doFinalization(); // Load from Constant Memory detection const TargetData *TD = LLVM_EE->getTargetData(); for (inst_iterator I = inst_begin(specialized_func), E = inst_end(specialized_func); I != E; ++I) { Instruction * inst = (Instruction *) &*I; // get all Load instructions LoadInst * load = dyn_cast<LoadInst>( inst ); if( !load ) continue; if( load->isVolatile() ) continue; if (load->use_empty()) continue; // Don't muck with dead instructions... // get the address loaded by load instruction Value *ptr_value = load->getPointerOperand(); // we're only interested in constant addresses ConstantExpr * ptr_constant_expr = dyn_cast<ConstantExpr>( ptr_value ); if( !ptr_constant_expr ) continue; ptr_constant_expr->dump(); // compute real address of constant pointer expression Constant * ptr_constant = ConstantFoldConstantExpression( ptr_constant_expr, TD ); if( !ptr_constant ) continue; ptr_constant->dump(); // convert to int constant ConstantInt *int_constant = dyn_cast<ConstantInt>( ConstantExpr::getPtrToInt( ptr_constant, Type::getInt64Ty( context ))); if( !int_constant ) continue; int_constant->dump(); // get data size int data_length = TD->getTypeAllocSize( load->getType() ); ptr_value->getType()->dump(); // get real address (at last !) const unsigned char * c_ptr = (const unsigned char *) int_constant->getLimitedValue(); printf( "%ld %d %d\n", c_ptr, constant_addresses_set.count( c_ptr ), data_length ); // check what's in this address int isconst = 1; for( int offset=0; offset<data_length; offset++ ) isconst &= constant_addresses_set.count( c_ptr + offset ); if( !isconst ) continue; printf( "It is constant.\n" ); // make a LLVM const with the data Constant *new_constant = NULL; switch( data_length ) { case 1: new_constant = ConstantInt::get( Type::getInt8Ty( context ), *(uint8_t*)c_ptr, false /* signed */ ); break; case 2: new_constant = ConstantInt::get( Type::getInt16Ty( context ), *(uint16_t*)c_ptr, false /* signed */ ); break; case 4: new_constant = ConstantInt::get( Type::getInt32Ty( context ), *(uint32_t*)c_ptr, false /* signed */ ); break; case 8: new_constant = ConstantInt::get( Type::getInt64Ty( context ), *(uint64_t*)c_ptr, false /* signed */ ); break; default: { StringRef const_data ( (const char *) c_ptr, data_length ); new_constant = ConstantArray::get( context, const_data, false /* dont add terminating null */ ); } } if( !new_constant ) continue; new_constant->dump(); //~ // get the type that is loaded const Type *Ty = load->getType(); // do we need a cast ? if( load->getType() != new_constant->getType() ) { new_constant = ConstantExpr::getBitCast( new_constant, Ty ); new_constant->dump(); } // zap the load and replace with constant address load->replaceAllUsesWith( new_constant ); printf( "\nREPLACED :...\n" ); load->dump(); new_constant->dump(); Changed = true; } if( Changed ) continue; // re-optimize and do another pass of constant load elimination // if we can't do anything else, do an inlining pass if( inline_iterations > 0 ) { inline_iterations --; PM_Inline.doInitialization(); Changed |= PM_Inline.run( *specialized_func ); PM_Inline.doFinalization(); //~ for( int i=0; i<3; i++ ) { PM.doInitialization(); Changed |= PM.run( *specialized_func ); PM.doFinalization(); } } if( iterations>0 && !Changed ) iterations--; } while( Changed || iterations>0 ); return specialized_func; }