void GraphBuilder::visitPtrToIntInst(PtrToIntInst& I) { DSNode* N = getValueDest(I.getOperand(0)).getNode(); if(I.hasOneUse()) { if(isa<ICmpInst>(*(I.use_begin()))) { NumBoringIntToPtr++; return; } } if(I.hasOneUse()) { Value *V = dyn_cast<Value>(*(I.use_begin())); DenseSet<Value *> Seen; while(V && V->hasOneUse() && Seen.insert(V).second) { if(isa<LoadInst>(V)) break; if(isa<StoreInst>(V)) break; if(isa<CallInst>(V)) break; V = dyn_cast<Value>(*(V->use_begin())); } if(isa<BranchInst>(V)){ NumBoringIntToPtr++; return; } } if(N) N->setPtrToIntMarker(); }
// // Method: getUnsafeAllocsFromABC() // // Description: // Find all memory objects that are both allocated on the stack and are not // proven to be indexed in a type-safe manner according to the static array // bounds checking pass. // // Notes: // This method saves its results be remembering the set of DSNodes which are // both on the stack and potentially indexed in a type-unsafe manner. // // FIXME: // This method only considers unsafe GEP instructions; it does not consider // unsafe call instructions or other instructions deemed unsafe by the array // bounds checking pass. // void ConvertUnsafeAllocas::getUnsafeAllocsFromABC(Module & M) { UnsafeAllocaNodeListBuilder Builder(budsPass, unsafeAllocaNodes); Builder.visit(M); #if 0 // Haohui: Disable it right now since nobody using the code std::map<BasicBlock *,std::set<Instruction*>*> UnsafeGEPMap= abcPass->UnsafeGetElemPtrs; std::map<BasicBlock *,std::set<Instruction*>*>::const_iterator bCurrent = UnsafeGEPMap.begin(), bEnd = UnsafeGEPMap.end(); for (; bCurrent != bEnd; ++bCurrent) { std::set<Instruction *> * UnsafeGetElemPtrs = bCurrent->second; std::set<Instruction *>::const_iterator iCurrent = UnsafeGetElemPtrs->begin(), iEnd = UnsafeGetElemPtrs->end(); for (; iCurrent != iEnd; ++iCurrent) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*iCurrent)) { Value *pointerOperand = GEP->getPointerOperand(); DSGraph * TDG = budsPass->getDSGraph(*(GEP->getParent()->getParent())); DSNode *DSN = TDG->getNodeForValue(pointerOperand).getNode(); //FIXME DO we really need this ? markReachableAllocas(DSN); if (DSN && DSN->isAllocaNode() && !DSN->isNodeCompletelyFolded()) { unsafeAllocaNodes.push_back(DSN); } } else { //call instruction add the corresponding *iCurrent->dump(); //FIXME abort(); } } } #endif }
void RTAssociate::ProcessFunctionBody(Function &F, Function &NewF, DSGraph* G, DataStructures* DS) { if (G->node_begin() == G->node_end()) return; // Quick exit if nothing to do. FuncInfo &FI = *getFuncInfo(&F); // Calculate which DSNodes are reachable from globals. If a node is reachable // from a global, we will create a global pool for it, so no argument passage // is required. G->getGlobalsGraph(); // Map all node reachable from this global to the corresponding nodes in // the globals graph. DSGraph::NodeMapTy GlobalsGraphNodeMapping; G->computeGToGGMapping(GlobalsGraphNodeMapping); // Loop over all of the nodes which are non-escaping, adding pool-allocatable // ones to the NodesToPA vector. for (DSGraph::node_iterator I = G->node_begin(), E = G->node_end(); I != E; ++I) { DSNode *N = I; if (GlobalsGraphNodeMapping.count(N)) { // If it is a global pool, set up the pool descriptor appropriately. DSNode *GGN = GlobalsGraphNodeMapping[N].getNode(); assert(getFuncInfo(0)->PoolDescriptors[GGN] && "Should be in global mapping!"); FI.PoolDescriptors[N] = getFuncInfo(0)->PoolDescriptors[GGN]; } else if (!FI.PoolDescriptors[N]) { // Otherwise, if it was not passed in from outside the function, it must // be a local pool! assert(!N->isGlobalNode() && "Should be in global mapping!"); FI.PoolDescriptors[N] = CreateLocalPool(N, NewF); } } TransformBody(NewF, FI, DS); }
void GraphBuilder::visitVAStartNode(DSNode* N) { assert(N && "Null node as argument"); assert(FB && "No function for this graph?"); Module *M = FB->getParent(); assert(M && "No module for function"); Triple TargetTriple(M->getTargetTriple()); Triple::ArchType Arch = TargetTriple.getArch(); // Fetch the VANode associated with the func containing the call to va_start DSNodeHandle & VANH = G.getVANodeFor(*FB); // Make sure this NodeHandle has a node to go with it if (VANH.isNull()) VANH.mergeWith(createNode()); // Create a dsnode for an array of pointers to the VAInfo for this func DSNode * VAArray = createNode(); VAArray->setArrayMarker(); VAArray->foldNodeCompletely(); VAArray->setLink(0,VANH); //VAStart modifies its argument N->setModifiedMarker(); // For the architectures we support, build dsnodes that match // how we know va_list is used. switch (Arch) { case Triple::x86: // On x86, we have: // va_list as a pointer to an array of pointers to the variable arguments if (N->getSize() < 1) N->growSize(1); N->setLink(0, VAArray); break; case Triple::x86_64: // On x86_64, we have va_list as a struct {i32, i32, i8*, i8* } // The first i8* is where arguments generally go, but the second i8* can // be used also to pass arguments by register. // We model this by having both the i8*'s point to an array of pointers // to the arguments. if (N->getSize() < 24) N->growSize(24); //sizeof the va_list struct mentioned above N->setLink(8,VAArray); //first i8* N->setLink(16,VAArray); //second i8* break; default: // FIXME: For now we abort if we don't know how to handle this arch // Either add support for other architectures, or at least mark the // nodes unknown/incomplete or whichever results in the correct // conservative behavior in the general case assert(0 && "VAstart not supported on this architecture!"); //XXX: This might be good enough in those cases that we don't know //what the arch does N->setIncompleteMarker()->setUnknownMarker()->foldNodeCompletely(); } // XXX: We used to set the alloca marker for the DSNode passed to va_start. // Seems to me that you could allocate the va_list on the heap, so ignoring // for now. N->setModifiedMarker()->setVAStartMarker(); }
void visitGetElementPtrInst(GetElementPtrInst &GEP) { Value *pointerOperand = GEP.getPointerOperand(); DSGraph * TDG = budsPass->getDSGraph(*(GEP.getParent()->getParent())); DSNode *DSN = TDG->getNodeForValue(pointerOperand).getNode(); //FIXME DO we really need this ? markReachableAllocas(DSN); if (DSN && DSN->isAllocaNode() && !DSN->isNodeCompletelyFolded()) { unsafeAllocaNodes.push_back(DSN); } }
void PoolRegisterElimination::removeTypeSafeRegistrations (const char * name) { // // Scan through all uses of the registration function and see if it can be // safely removed. If so, schedule it for removal. // std::vector<CallInst*> toBeRemoved; Function * F = intrinsic->getIntrinsic(name).F; // // Look for and record all registrations that can be deleted. // for (Value::use_iterator UI=F->use_begin(), UE=F->use_end(); UI != UE; ++UI) { // // Get the pointer to the registered object. // CallInst * CI = cast<CallInst>(*UI); Value * Ptr = intrinsic->getValuePointer(CI); // Lookup the DSNode for the value in the function's DSGraph. // DSGraph * TDG = dsaPass->getDSGraph(*(CI->getParent()->getParent())); DSNodeHandle DSH = TDG->getNodeForValue(Ptr); assert ((!(DSH.isNull())) && "No DSNode for Value!\n"); // // If the DSNode is type-safe and is never used as an array, then there // will never be a need to look it up in a splay tree, so remove its // registration. // DSNode * N = DSH.getNode(); if(!N->isArrayNode() && TS->isTypeSafe(Ptr, F)){ toBeRemoved.push_back(CI); } } // // Update the statistics. // if (toBeRemoved.size()) { RemovedRegistration += toBeRemoved.size(); TypeSafeRegistrations += toBeRemoved.size(); } // // Remove the unnecesary registrations. // std::vector<CallInst*>::iterator it, end; for (it = toBeRemoved.begin(), end = toBeRemoved.end(); it != end; ++it) { (*it)->eraseFromParent(); } }
// // Method: insertHardDanglingPointers() // // Description: // Insert dangling pointer dereferences into the code. This is done by // finding instructions that store pointers to memory and free'ing those // pointers before the store. Subsequent loads and uses of the pointer will // cause a dangling pointer dereference. // // Return value: // true - The module was modified. // false - The module was left unmodified. // // Notes: // This code utilizes DSA to ensure that the pointer can point to heap // memory (although the pointer is allowed to alias global and stack memory). // bool FaultInjector::insertHardDanglingPointers (Function & F) { // // Ensure that we can get analysis information for this function. // if (!(TDPass->hasDSGraph(F))) return false; // // Scan through each instruction of the function looking for store // instructions that store a pointer to memory. Free the pointer right // before the store instruction. // DSGraph * DSG = TDPass->getDSGraph(F); for (Function::iterator fI = F.begin(), fE = F.end(); fI != fE; ++fI) { BasicBlock & BB = *fI; for (BasicBlock::iterator bI = BB.begin(), bE = BB.end(); bI != bE; ++bI) { Instruction * I = bI; // // Look to see if there is an instruction that stores a pointer to // memory. If so, then free the pointer before the store. // if (StoreInst * SI = dyn_cast<StoreInst>(I)) { if (isa<PointerType>(SI->getOperand(0)->getType())) { Value * Pointer = SI->getOperand(0); // // Check to ensure that the pointer aliases with the heap. If so, go // ahead and add the free. Note that we may introduce an invalid // free, but we're injecting errors, so I think that's okay. // DSNode * Node = DSG->getNodeForValue(Pointer).getNode(); if (Node && (Node->isHeapNode())) { // Skip if we should not insert a fault. if (!doFault()) continue; // // Print information about where the fault is being inserted. // printSourceInfo ("Hard dangling pointer", I); CallInst::Create (Free, Pointer, "", I); ++DPFaults; } } } } } return (DPFaults > 0); }
bool DSGraphStats::isNodeForValueUntyped(Value *V, unsigned Offset, const Function *F) { DSNodeHandle NH = getNodeHandleForValue(V); if(!NH.getNode()){ return true; } else { DSNode *N = NH.getNode(); if (N->isNodeCompletelyFolded()){ ++NumFoldedAccess; return true; } if ( N->isExternalNode()){ ++NumExternalAccesses; return true; } if ( N->isIncompleteNode()){ ++NumIncompleteAccesses; return true; } if (N->isUnknownNode()){ ++NumUnknownAccesses; return true; } if (N->isIntToPtrNode()){ ++NumI2PAccesses; return true; } // it is a complete node, now check how many types are present int count = 0; unsigned offset = NH.getOffset() + Offset; if (N->type_begin() != N->type_end()) for (DSNode::TyMapTy::const_iterator ii = N->type_begin(), ee = N->type_end(); ii != ee; ++ii) { if(ii->first != offset) continue; count += ii->second->size(); } if (count ==0) ++NumTypeCount0Accesses; else if(count == 1) ++NumTypeCount1Accesses; else if(count == 2) ++NumTypeCount2Accesses; else if(count == 3) ++NumTypeCount3Accesses; else ++NumTypeCount4Accesses; DEBUG(assert(TS->isTypeSafe(V,F))); } return false; }
void GraphBuilder::visitIntToPtrInst(IntToPtrInst &I) { DSNode *N = createNode(); if(I.hasOneUse()) { if(isa<ICmpInst>(*(I.use_begin()))) { NumBoringIntToPtr++; return; } } else { N->setIntToPtrMarker(); N->setUnknownMarker(); } setDestTo(I, N); }
FunctionList DSNodeEquivs::getCallees(CallSite &CS) { const Function *CalledFunc = CS.getCalledFunction(); // If the called function is casted from one function type to another, peer // into the cast instruction and pull out the actual function being called. if (ConstantExpr *CExpr = dyn_cast<ConstantExpr>(CS.getCalledValue())) { if (CExpr->getOpcode() == Instruction::BitCast && isa<Function>(CExpr->getOperand(0))) CalledFunc = cast<Function>(CExpr->getOperand(0)); } FunctionList Callees; // Direct calls are simple. if (CalledFunc) { Callees.push_back(CalledFunc); return Callees; } // Okay, indirect call. // Ask the DSCallGraph what this calls... TDDataStructures &TDDS = getAnalysis<TDDataStructures>(); const DSCallGraph &DSCG = TDDS.getCallGraph(); DSCallGraph::callee_iterator CalleeIt = DSCG.callee_begin(CS); DSCallGraph::callee_iterator CalleeItEnd = DSCG.callee_end(CS); for (; CalleeIt != CalleeItEnd; ++CalleeIt) Callees.push_back(*CalleeIt); // If the callgraph doesn't give us what we want, query the DSGraph // ourselves. if (Callees.empty()) { Instruction *Inst = CS.getInstruction(); Function *Parent = Inst->getParent()->getParent(); Value *CalledValue = CS.getCalledValue(); DSNodeHandle &NH = TDDS.getDSGraph(*Parent)->getNodeForValue(CalledValue); if (!NH.isNull()) { DSNode *Node = NH.getNode(); Node->addFullFunctionList(Callees); } } // For debugging, dump out the callsites we are unable to get callees for. DEBUG( if (Callees.empty()) { errs() << "Failed to get callees for callsite:\n"; CS.getInstruction()->dump(); });
int dslink_response_sub(DSLink *link, json_t *paths, json_t *rid) { if (dslink_response_send_closed(link, rid) != 0) { return DSLINK_ALLOC_ERR; } DSNode *root = link->responder->super_root; size_t index; json_t *value; json_array_foreach(paths, index, value) { const char *path = json_string_value(json_object_get(value, "path")); DSNode *node = dslink_node_get_path(root, path); if (!node) { continue; } uint32_t *sid = malloc(sizeof(uint32_t)); if (!sid) { return DSLINK_ALLOC_ERR; } *sid = (uint32_t) json_integer_value(json_object_get(value, "sid")); void *tmp = sid; if (dslink_map_set(link->responder->value_path_subs, (void *) node->path, &tmp) != 0) { free(sid); return 1; } if (tmp) { void *p = tmp; dslink_map_remove(link->responder->value_sid_subs, &p); free(tmp); } tmp = (void *) node->path; if (dslink_map_set(link->responder->value_sid_subs, sid, &tmp) != 0) { tmp = (void *) node->path; dslink_map_remove(link->responder->value_path_subs, &tmp); free(sid); return 1; } dslink_response_send_val(link, node, *sid); if (node->on_subscribe) { node->on_subscribe(link, node); } } return 0; }
// // Method: getLocalPoolNodes() // // Description: // For a given function, determine which DSNodes for that function should have // local pools created for them. // void AllNodesHeuristic::getLocalPoolNodes (const Function & F, DSNodeList_t & Nodes) { // // Get the DSGraph of the specified function. If the DSGraph has no nodes, // then there is nothing we need to do. // DSGraph* G = Graphs->getDSGraph(F); if (G->node_begin() == G->node_end()) return; // // Calculate which DSNodes are reachable from globals. If a node is reachable // from a global, we will create a global pool for it, so no argument passage // is required. Graphs->getGlobalsGraph(); // Map all node reachable from this global to the corresponding nodes in // the globals graph. DSGraph::NodeMapTy GlobalsGraphNodeMapping; G->computeGToGGMapping(GlobalsGraphNodeMapping); // // Loop over all of the nodes which are non-escaping, adding pool-allocatable // ones to the NodesToPA vector. In other words, scan over the DSGraph and // find nodes for which a new pool must be created within this function. // for (DSGraph::node_iterator I = G->node_begin(), E = G->node_end(); I != E; ++I) { // Get the DSNode and, if applicable, its mirror in the globals graph DSNode * N = I; DSNode * GGN = GlobalsGraphNodeMapping[N].getNode(); // // We pool allocate all nodes. Here, we just want to make sure that this // DSNode hasn't already been assigned to a global pool. // if (!((GGN && GlobalPoolNodes.count (GGN)))) { // Otherwise, if it was not passed in from outside the function, it must // be a local pool! assert(!N->isGlobalNode() && "Should be in global mapping!"); Nodes.push_back (N); } } return; }
// printTypesForNode --prints all the types for the given NodeValue, without a newline // (meant to be called as a helper) static void printTypesForNode(llvm::raw_ostream &O, NodeValue &NV) { DSNode *N = NV.getNode(); if (N->isNodeCompletelyFolded()) { O << "Folded"; } // Go through all the types, and just dump them. // FIXME: Lifted from Printer.cpp, probably should be shared bool firstType = true; if (N->type_begin() != N->type_end()) for (DSNode::TyMapTy::const_iterator ii = N->type_begin(), ee = N->type_end(); ii != ee; ++ii) { if (!firstType) O << "::"; firstType = false; O << ii->first << ":"; if (ii->second) { bool first = true; for (svset<Type*>::const_iterator ni = ii->second->begin(), ne = ii->second->end(); ni != ne; ++ni) { if (!first) O << "|"; Type * t = *ni; t->print (O); first = false; } } else O << "VOID"; } else O << "VOID"; if (N->isArrayNode()) O << "Array"; }
/// OptimizeGlobals - This method uses information taken from DSA to optimize /// global variables. /// bool DSOpt::OptimizeGlobals(Module &M) { DSGraph &GG = TD->getGlobalsGraph(); const DSGraph::ScalarMapTy &SM = GG.getScalarMap(); bool Changed = false; for (Module::giterator I = M.gbegin(), E = M.gend(); I != E; ++I) if (!I->isExternal()) { // Loop over all of the non-external globals... // Look up the node corresponding to this global, if it exists. DSNode *GNode = 0; DSGraph::ScalarMapTy::const_iterator SMI = SM.find(I); if (SMI != SM.end()) GNode = SMI->second.getNode(); if (GNode == 0 && I->hasInternalLinkage()) { // If there is no entry in the scalar map for this global, it was never // referenced in the program. If it has internal linkage, that means we // can delete it. We don't ACTUALLY want to delete the global, just // remove anything that references the global: later passes will take // care of nuking it. if (!I->use_empty()) { I->replaceAllUsesWith(Constant::getNullValue((Type*)I->getType())); ++NumGlobalsIsolated; } } else if (GNode && GNode->isComplete()) { // If the node has not been read or written, and it is not externally // visible, kill any references to it so it can be DCE'd. if (!GNode->isModified() && !GNode->isRead() &&I->hasInternalLinkage()){ if (!I->use_empty()) { I->replaceAllUsesWith(Constant::getNullValue((Type*)I->getType())); ++NumGlobalsIsolated; } } // We expect that there will almost always be a node for this global. // If there is, and the node doesn't have the M bit set, we can set the // 'constant' bit on the global. if (!GNode->isModified() && !I->isConstant()) { I->setConstant(true); ++NumGlobalsConstanted; Changed = true; } } } return Changed; }
void GraphBuilder::visitVAArgInst(VAArgInst &I) { Module *M = FB->getParent(); Triple TargetTriple(M->getTargetTriple()); Triple::ArchType Arch = TargetTriple.getArch(); switch(Arch) { case Triple::x86_64: { // On x86_64, we have va_list as a struct {i32, i32, i8*, i8* } // The first i8* is where arguments generally go, but the second i8* can // be used also to pass arguments by register. // We model this by having both the i8*'s point to an array of pointers // to the arguments. DSNodeHandle Ptr = G.getVANodeFor(*FB); DSNodeHandle Dest = getValueDest(&I); if (Ptr.isNull()) return; // Make that the node is read and written Ptr.getNode()->setReadMarker()->setModifiedMarker(); // Not updating type info, as it is already a collapsed node if (isa<PointerType>(I.getType())) Dest.mergeWith(Ptr); return; } default: { assert(0 && "What frontend generates this?"); DSNodeHandle Ptr = getValueDest(I.getOperand(0)); //FIXME: also updates the argument if (Ptr.isNull()) return; // Make that the node is read and written Ptr.getNode()->setReadMarker()->setModifiedMarker(); // Ensure a type record exists. DSNode *PtrN = Ptr.getNode(); PtrN->mergeTypeInfo(I.getType(), Ptr.getOffset()); if (isa<PointerType>(I.getType())) setDestTo(I, getLink(Ptr)); } } }
int dslink_response_unsub(DSLink *link, json_t *sids, json_t *rid) { size_t index; json_t *value; json_array_foreach(sids, index, value) { uint32_t sid = (uint32_t) json_integer_value(value); void *p = &sid; char *path = dslink_map_remove(link->responder->value_sid_subs, &p); if (path) { DSNode *node = dslink_node_get_path(link->responder->super_root, path); if (node && node->on_unsubscribe) { node->on_unsubscribe(link, node); } void *tmp = path; dslink_map_remove(link->responder->value_path_subs, &tmp); free(p); } }
void TDDataStructures::markReachableFunctionsExternallyAccessible(DSNode *N, DenseSet<DSNode*> &Visited) { if (!N || Visited.count(N)) return; Visited.insert(N); // Handle this node { N->addFullFunctionSet(ExternallyCallable); } for (DSNode::edge_iterator ii = N->edge_begin(), ee = N->edge_end(); ii != ee; ++ii) if (!ii->second.isNull()) { DSNodeHandle &NH = ii->second; DSNode * NN = NH.getNode(); NN->addFullFunctionSet(ExternallyCallable); markReachableFunctionsExternallyAccessible(NN, Visited); } }
LatticeValue *SFVInstVisitor::getLatticeValueForField(Value *Ptr) { if (!isa<PointerType>(Ptr->getType()) || isa<ConstantPointerNull>(Ptr)) return 0; const DSNodeHandle *NH = &DSG.getNodeForValue(Ptr); DSNode *Node = NH->getNode(); assert(Node && "Pointer doesn't have node??"); std::multimap<DSNode*, LatticeValue*>::iterator I = NodeLVs.find(Node); if (I == NodeLVs.end()) return 0; // Not a node we are still tracking. // Okay, next convert the node offset to a field index expression. std::vector<unsigned> Idxs; ComputeStructureFieldIndices(Node->getType(), NH->getOffset(), Idxs, Node->getParentGraph()->getTargetData()); for (; I != NodeLVs.end() && I->first == Node; ++I) if (I->second->getIndices() == Idxs) return I->second; return 0; }
// // Function: FoldNodesInDSGraph() // // Description: // This function will take the specified DSGraph and fold all DSNodes within // it that are marked with the heap flag. // static void FoldNodesInDSGraph (DSGraph & Graph) { // Worklist of heap nodes to process std::vector<DSNodeHandle> HeapNodes; // // Go find all of the heap nodes. // DSGraph::node_iterator i; DSGraph::node_iterator e = Graph.node_end(); for (i = Graph.node_begin(); i != e; ++i) { DSNode * Node = i; if (Node->isHeapNode()) HeapNodes.push_back (DSNodeHandle(Node)); } // // Fold all of the heap nodes; this makes them type-unknown. // for (unsigned i = 0; i < HeapNodes.size(); ++i) HeapNodes[i].getNode()->foldNodeCompletely(); return; }
// // Function: makeFSParameterCallsComplete() // // Description: // Finds calls to sc.fsparameter and fills in the completeness byte which // is the last argument to such call. The second argument to the function // is the one which is analyzed for completeness. // // Inputs: // M - Reference to the the module to analyze // void CompleteChecks::makeFSParameterCallsComplete(Module &M) { Function *sc_fsparameter = M.getFunction("sc.fsparameter"); if (sc_fsparameter == NULL) return; std::set<CallInst *> toComplete; // // Iterate over all uses of sc.fsparameter and discover which have a complete // pointer argument. // for (Function::use_iterator i = sc_fsparameter->use_begin(); i != sc_fsparameter->use_end(); ++i) { CallInst *CI; CI = dyn_cast<CallInst>(*i); if (CI == 0 || CI->getCalledFunction() != sc_fsparameter) continue; // // Get the parent function to which this call belongs. // Function *P = CI->getParent()->getParent(); Value *PtrOperand = CI->getOperand(2); DSNode *N = getDSNodeHandle(PtrOperand, P).getNode(); if (N == 0 || N->isExternalNode() || N->isIncompleteNode() || N->isUnknownNode() || N->isPtrToIntNode() || N->isIntToPtrNode()) { continue; } toComplete.insert(CI); } // // Fill in a 1 for each call instruction that has a complete pointer // argument. // Type *int8 = Type::getInt8Ty(M.getContext()); Constant *complete = ConstantInt::get(int8, 1); for (std::set<CallInst *>::iterator i = toComplete.begin(); i != toComplete.end(); ++i) { CallInst *CI = *i; CI->setOperand(4, complete); } return; }
void AllButUnreachableFromMemoryHeuristic::AssignToPools ( const DSNodeList_t &NodesToPA, Function *F, DSGraph* G, std::vector<OnePool> &ResultPools) { // Build a set of all nodes that are reachable from another node in the // graph. Here we ignore scalar nodes that are only globals as they are // often global pointers to big arrays. std::set<const DSNode*> ReachableFromMemory; for (DSGraph::node_iterator I = G->node_begin(), E = G->node_end(); I != E; ++I) { DSNode *N = I; #if 0 // // Ignore nodes that are just globals and not arrays. // if (N->isArray() || N->isHeapNode() || N->isAllocaNode() || N->isUnknownNode()) #endif // If a node is marked, all children are too. if (!ReachableFromMemory.count(N)) { for (DSNode::iterator NI = N->begin(), E = N->end(); NI != E; ++NI) { // // Sometimes this results in a NULL DSNode. Skip it if that is the // case. // if (!(*NI)) continue; // // Do a depth-first iteration over the DSGraph starting with this // child node. // for (df_ext_iterator<const DSNode*> DI = df_ext_begin(*NI, ReachableFromMemory), E = df_ext_end(*NI, ReachableFromMemory); DI != E; ++DI) /*empty*/; } } } // Only pool allocate a node if it is reachable from a memory object (itself // included). for (unsigned i = 0, e = NodesToPA.size(); i != e; ++i) if (ReachableFromMemory.count(NodesToPA[i])) ResultPools.push_back(OnePool(NodesToPA[i])); }
// // TODO // template<class dsa> bool TypeSafety<dsa>::isFieldDisjoint (const GlobalValue * V, unsigned offset) { // // Get the DSNode for the specified value. // DSNodeHandle DH = getDSNodeHandle (V); DSNode *node = DH.getNode(); //unsigned offset = DH.getOffset(); DEBUG(errs() << " check fields overlap at: " << offset << "\n"); // // If there is no DSNode, claim that it is not type safe. // if (DH.isNull()) { return false; } // // If the DSNode is completely folded, then we know for sure that it is not // type-safe. // if (node->isNodeCompletelyFolded()) return false; // // If the memory object represented by this DSNode can be manipulated by // external code or DSA has otherwise not finished analyzing all operations // on it, declare it type-unsafe. // if (node->isExternalNode() || node->isIncompleteNode()) return false; // // If the pointer to the memory object came from some source not understood // by DSA or somehow came from/escapes to the realm of integers, declare it // type-unsafe. // if (node->isUnknownNode() || node->isIntToPtrNode() || node->isPtrToIntNode()) { return false; } return !((NodeInfo[node])[offset]); }
// run - Calculate the top down data structure graphs for each function in the // program. // bool TDDataStructures::runOnModule(Module &M) { init(useEQBU ? &getAnalysis<EquivBUDataStructures>() : &getAnalysis<BUDataStructures>(), true, true, true, false); // Figure out which functions must not mark their arguments complete because // they are accessible outside this compilation unit. Currently, these // arguments are functions which are reachable by incomplete or external // nodes in the globals graph. const DSScalarMap &GGSM = GlobalsGraph->getScalarMap(); DenseSet<DSNode*> Visited; for (DSScalarMap::global_iterator I=GGSM.global_begin(), E=GGSM.global_end(); I != E; ++I) { DSNode *N = GGSM.find(*I)->second.getNode(); if (N->isIncompleteNode() || N->isExternalNode()) markReachableFunctionsExternallyAccessible(N, Visited); } // Loop over unresolved call nodes. Any functions passed into (but not // returned!) from unresolvable call nodes may be invoked outside of the // current module. for (DSGraph::afc_iterator I = GlobalsGraph->afc_begin(), E = GlobalsGraph->afc_end(); I != E; ++I) for (unsigned arg = 0, e = I->getNumPtrArgs(); arg != e; ++arg) markReachableFunctionsExternallyAccessible(I->getPtrArg(arg).getNode(), Visited); Visited.clear(); // Clear Aux of Globals Graph to be refilled in later by post-TD unresolved // functions GlobalsGraph->getAuxFunctionCalls().clear(); // Functions without internal linkage are definitely externally callable! for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration() && !I->hasInternalLinkage() && !I->hasPrivateLinkage()) ExternallyCallable.insert(I); // Debug code to print the functions that are externally callable #if 0 for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (ExternallyCallable.count(I)) { errs() << "ExternallyCallable: " << I->getNameStr() << "\n"; } #endif // We want to traverse the call graph in reverse post-order. To do this, we // calculate a post-order traversal, then reverse it. DenseSet<DSGraph*> VisitedGraph; std::vector<DSGraph*> PostOrder; {TIME_REGION(XXX, "td:Compute postorder"); // Calculate top-down from main... if (Function *F = M.getFunction("main")) ComputePostOrder(*F, VisitedGraph, PostOrder); // Next calculate the graphs for each unreachable function... for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration()) ComputePostOrder(*I, VisitedGraph, PostOrder); VisitedGraph.clear(); // Release memory! } {TIME_REGION(XXX, "td:Inline stuff"); // Visit each of the graphs in reverse post-order now! while (!PostOrder.empty()) { InlineCallersIntoGraph(PostOrder.back()); PostOrder.pop_back(); } } // Free the IndCallMap. while (!IndCallMap.empty()) { delete IndCallMap.begin()->second; IndCallMap.erase(IndCallMap.begin()); } formGlobalECs(); ExternallyCallable.clear(); GlobalsGraph->removeTriviallyDeadNodes(); GlobalsGraph->computeExternalFlags(DSGraph::DontMarkFormalsExternal); GlobalsGraph->computeIntPtrFlags(); // Make sure each graph has updated external information about globals // in the globals graph. VisitedGraph.clear(); for (Module::iterator F = M.begin(); F != M.end(); ++F) { if (!(F->isDeclaration())){ DSGraph *Graph = getOrCreateGraph(F); if (!VisitedGraph.insert(Graph).second) continue; cloneGlobalsInto(Graph, DSGraph::DontCloneCallNodes | DSGraph::DontCloneAuxCallNodes); Graph->computeExternalFlags(DSGraph::DontMarkFormalsExternal); Graph->computeIntPtrFlags(); // Clean up uninteresting nodes Graph->removeDeadNodes(0); } } // CBU contains the correct call graph. // Restore it, so that subsequent passes and clients can get it. restoreCorrectCallGraph(); /// Added by Zhiyuan: print out the DSGraph. if (llvm::DebugFlag) { print(errs(), &M); } return false; }
void CallTargetFinder<dsa>::findIndTargets(Module &M) { dsa* T = &getAnalysis<dsa>(); const DSCallGraph & callgraph = T->getCallGraph(); DSGraph* G = T->getGlobalsGraph(); DSGraph::ScalarMapTy& SM = G->getScalarMap(); for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration()) for (Function::iterator F = I->begin(), FE = I->end(); F != FE; ++F) for (BasicBlock::iterator B = F->begin(), BE = F->end(); B != BE; ++B) if (isa<CallInst>(B) || isa<InvokeInst>(B)) { CallSite cs(B); AllSites.push_back(cs); Function* CF = cs.getCalledFunction(); if (isa<UndefValue>(cs.getCalledValue())) continue; if (isa<InlineAsm>(cs.getCalledValue())) continue; // // If the called function is casted from one function type to // another, peer into the cast instruction and pull out the actual // function being called. // if (!CF) CF = dyn_cast<Function>(cs.getCalledValue()->stripPointerCasts()); if (!CF) { Value * calledValue = cs.getCalledValue()->stripPointerCasts(); if (isa<ConstantPointerNull>(calledValue)) { ++DirCall; CompleteSites.insert(cs); } else { IndCall++; DSCallGraph::callee_iterator csi = callgraph.callee_begin(cs), cse = callgraph.callee_end(cs); while(csi != cse) { const Function *F = *csi; DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F), sccee = callgraph.scc_end(F); for(;sccii != sccee; ++sccii) { DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii)); if (I != SM.end()) { IndMap[cs].push_back (*sccii); } } ++csi; } const Function *F1 = (cs).getInstruction()->getParent()->getParent(); F1 = callgraph.sccLeader(&*F1); DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F1), sccee = callgraph.scc_end(F1); for(;sccii != sccee; ++sccii) { DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii)); if (I != SM.end()) { IndMap[cs].push_back (*sccii); } } DSNode* N = T->getDSGraph(*cs.getCaller()) ->getNodeForValue(cs.getCalledValue()).getNode(); assert (N && "CallTarget: findIndTargets: No DSNode!"); if (!N->isIncompleteNode() && !N->isExternalNode() && IndMap[cs].size()) { CompleteSites.insert(cs); ++CompleteInd; } if (!N->isIncompleteNode() && !N->isExternalNode() && !IndMap[cs].size()) { ++CompleteEmpty; DEBUG(errs() << "Call site empty: '" << cs.getInstruction()->getName() << "' In '" << cs.getInstruction()->getParent()->getParent()->getName() << "'\n"); } } } else { ++DirCall; IndMap[cs].push_back(CF); CompleteSites.insert(cs); } } //Print the indirect call Map: for(std::map<CallSite, std::vector<const Function*> >::iterator indMapIt = IndMap.begin(); indMapIt != IndMap.end(); ++indMapIt ) { CallSite CS = indMapIt->first; Instruction* Inst = CS.getInstruction(); Inst->dump(); } }
/// /// getValueDest - Return the DSNode that the actual value points to. /// DSNodeHandle GraphBuilder::getValueDest(Value* V) { if (isa<Constant>(V) && cast<Constant>(V)->isNullValue()) return 0; // Null doesn't point to anything, don't add to ScalarMap! DSNodeHandle &NH = G.getNodeForValue(V); if (!NH.isNull()) return NH; // Already have a node? Just return it... // Otherwise we need to create a new node to point to. // Check first for constant expressions that must be traversed to // extract the actual value. DSNode* N; if (Function * F = dyn_cast<Function > (V)) { // Create a new global node for this function. N = createNode(); N->addFunction(F); if (F->isDeclaration()) N->setExternFuncMarker(); } else if (GlobalValue * GV = dyn_cast<GlobalValue > (V)) { // Create a new global node for this global variable. N = createNode(); N->addGlobal(GV); if (GV->isDeclaration()) N->setExternGlobalMarker(); } else if (Constant *C = dyn_cast<Constant>(V)) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { if (CE->isCast()) { if (isa<PointerType>(CE->getOperand(0)->getType())) NH = getValueDest(CE->getOperand(0)); else NH = createNode()->setUnknownMarker(); } else if (CE->getOpcode() == Instruction::GetElementPtr) { visitGetElementPtrInst(*CE); assert(G.hasNodeForValue(CE) && "GEP didn't get processed right?"); NH = G.getNodeForValue(CE); } else { // This returns a conservative unknown node for any unhandled ConstExpr NH = createNode()->setUnknownMarker(); } if (NH.isNull()) { // (getelementptr null, X) returns null G.eraseNodeForValue(V); return 0; } return NH; } else if (isa<UndefValue>(C)) { G.eraseNodeForValue(V); return 0; } else if (isa<GlobalAlias>(C)) { // XXX: Need more investigation // According to Andrew, DSA is broken on global aliasing, since it does // not handle the aliases of parameters correctly. Here is only a quick // fix for some special cases. NH = getValueDest(cast<GlobalAlias>(C)->getAliasee()); return NH; } else if (isa<BlockAddress>(C)) { // // FIXME: This may not be quite right; we should probably add a // BlockAddress flag to the DSNode instead of using the unknown flag. // N = createNode(); N->setUnknownMarker(); } else { errs() << "Unknown constant: " << *C << "\n"; assert(0 && "Unknown constant type!"); } N = createNode(); // just create a shadow node } else { // Otherwise just create a shadow node N = createNode(); } NH.setTo(N, 0); // Remember that we are pointing to it... return NH; }
// // Function: makeCStdLibCallsComplete() // // Description: // Fills in completeness information for all calls of a given CStdLib function // assumed to be of the form: // // pool_X(POOL *p1, ..., POOL *pN, void *a1, ..., void *aN, ..., uint8_t c); // // Specifically, this function assumes that there are as many pointer arguments // to check as there are initial pool arguments, and the pointer arguments // follow the pool arguments in corresponding order. Also, it is assumed that // the final argument to the function is a byte sized bit vector. // // This function fills in this final byte with a constant value whose ith // bit is set exactly when the ith pointer argument is complete. // // Inputs: // // F - A pointer to the CStdLib function appearing in the module // (non-null). // PoolArgs - The number of initial pool arguments for which a // corresponding pointer value requires a completeness check // (required to be at most 8). // void CompleteChecks::makeCStdLibCallsComplete(Function *F, unsigned PoolArgs) { assert(F != 0 && "Null function argument!"); assert(PoolArgs <= 8 && \ "Only up to 8 arguments are supported by CStdLib completeness checks!"); Value::use_iterator U = F->use_begin(); Value::use_iterator E = F->use_end(); // // Hold the call instructions that need changing. // typedef std::pair<CallInst *, uint8_t> VectorReplacement; std::set<VectorReplacement> callsToChange; Type *int8ty = Type::getInt8Ty(F->getContext()); FunctionType *F_type = F->getFunctionType(); // // Verify the type of the function is as expected. // // There should be as many pointer parameters to check for completeness // as there are pool parameters. The last parameter should be a byte. // assert(F_type->getNumParams() >= PoolArgs * 2 && \ "Not enough arguments to transformed CStdLib function call!"); for (unsigned arg = PoolArgs; arg < PoolArgs * 2; ++arg) assert(isa<PointerType>(F_type->getParamType(arg)) && \ "Expected pointer argument to function!"); // // This is the position of the vector operand in the call. // unsigned vect_position = F_type->getNumParams(); assert(F_type->getParamType(vect_position - 1) == int8ty && \ "Last parameter to the function should be a byte!"); // // Iterate over all calls of the function in the module, computing the // vectors for each call as it is found. // for (; U != E; ++U) { CallInst *CI; if ((CI = dyn_cast<CallInst>(*U)) && \ CI->getCalledValue()->stripPointerCasts() == F) { uint8_t vector = 0x0; // // Get the parent function to which this instruction belongs. // Function *P = CI->getParent()->getParent(); // // Iterate over the pointer arguments that need completeness checking // and build the completeness vector. // for (unsigned arg = 0; arg < PoolArgs; ++arg) { bool complete = true; // // Go past all the pool arguments to get the pointer to check. // Value *V = CI->getOperand(1 + PoolArgs + arg); // // Check for completeness of the pointer using DSA and // set the bit in the vector accordingly. // DSNode *N; if ((N = getDSNodeHandle(V, P).getNode()) && (N->isExternalNode() || N->isIncompleteNode() || N->isUnknownNode() || N->isIntToPtrNode() || N->isPtrToIntNode()) ) { complete = false; } if (complete) vector |= (1 << arg); } // // Add the instruction and vector to the set of instructions to change. // callsToChange.insert(VectorReplacement(CI, vector)); } } // // Iterate over all call instructions that need changing, modifying the // final operand of the call to hold the bit vector value. // std::set<VectorReplacement>::iterator change = callsToChange.begin(); std::set<VectorReplacement>::iterator change_end = callsToChange.end(); while (change != change_end) { Constant *vect_value = ConstantInt::get(int8ty, change->second); change->first->setOperand(vect_position, vect_value); ++change; } return; }
void GraphBuilder::visitGetElementPtrInst(User &GEP) { // // Ensure that the indexed pointer has a DSNode. // DSNodeHandle Value = getValueDest(GEP.getOperand(0)); if (Value.isNull()) Value = createNode(); // // There are a few quick and easy cases to handle. If the DSNode of the // indexed pointer is already folded, then we know that the result of the // GEP will have the same offset into the same DSNode // as the indexed pointer. // if (!Value.isNull() && Value.getNode()->isNodeCompletelyFolded()) { setDestTo(GEP, Value); return; } // // Okay, no easy way out. Calculate the offset into the object being // indexed. // int Offset = 0; // FIXME: I am not sure if the code below is completely correct (especially // if we start doing fancy analysis on non-constant array indices). // What if the array is indexed using a larger index than its declared // size? Does the LLVM verifier catch such issues? // // // Determine the offset (in bytes) between the result of the GEP and the // GEP's pointer operand. // // Note: All of these subscripts are indexing INTO the elements we have... // // FIXME: We can do better for array indexing. First, if the array index is // constant, we can determine how much farther we're moving the // pointer. Second, we can try to use the results of other analysis // passes (e.g., ScalarEvolution) to find min/max values to do less // conservative type-folding. // for (gep_type_iterator I = gep_type_begin(GEP), E = gep_type_end(GEP); I != E; ++I) if (StructType *STy = dyn_cast<StructType>(*I)) { // indexing into a structure // next index must be a constant const ConstantInt* CUI = cast<ConstantInt>(I.getOperand()); int FieldNo = CUI->getSExtValue(); // increment the offset by the actual byte offset being accessed unsigned requiredSize = TD.getTypeAllocSize(STy) + Value.getOffset() + Offset; if(!Value.getNode()->isArrayNode() || Value.getNode()->getSize() <= 0){ if (requiredSize > Value.getNode()->getSize()) Value.getNode()->growSize(requiredSize); } Offset += (unsigned)TD.getStructLayout(STy)->getElementOffset(FieldNo); if(TypeInferenceOptimize) { if(ArrayType* AT = dyn_cast<ArrayType>(STy->getTypeAtIndex(FieldNo))) { Value.getNode()->mergeTypeInfo(AT, Value.getOffset() + Offset); if((++I) == E) { break; } // Check if we are still indexing into an array. // We only record the topmost array type of any nested array. // Keep skipping indexes till we reach a non-array type. // J is the type of the next index. // Uncomment the line below to get all the nested types. gep_type_iterator J = I; while(isa<ArrayType>(*(++J))) { // Value.getNode()->mergeTypeInfo(AT1, Value.getOffset() + Offset); if((++I) == E) { break; } J = I; } if((I) == E) { break; } } } } else if(ArrayType *ATy = dyn_cast<ArrayType>(*I)) { // indexing into an array. Value.getNode()->setArrayMarker(); Type *CurTy = ATy->getElementType(); if(!isa<ArrayType>(CurTy) && Value.getNode()->getSize() <= 0) { Value.getNode()->growSize(TD.getTypeAllocSize(CurTy)); } else if(isa<ArrayType>(CurTy) && Value.getNode()->getSize() <= 0){ Type *ETy = (cast<ArrayType>(CurTy))->getElementType(); while(isa<ArrayType>(ETy)) { ETy = (cast<ArrayType>(ETy))->getElementType(); } Value.getNode()->growSize(TD.getTypeAllocSize(ETy)); } // Find if the DSNode belongs to the array // If not fold. if((Value.getOffset() || Offset != 0) || (!isa<ArrayType>(CurTy) && (Value.getNode()->getSize() != TD.getTypeAllocSize(CurTy)))) { Value.getNode()->foldNodeCompletely(); Value.getNode(); Offset = 0; break; } } else if (const PointerType *PtrTy = dyn_cast<PointerType>(*I)) { Type *CurTy = PtrTy->getElementType(); // // Unless we're advancing the pointer by zero bytes via array indexing, // fold the node (i.e., mark it type-unknown) and indicate that we're // indexing zero bytes into the object. // // Note that we break out of the loop if we fold the node. Once // something is folded, all values within it are considered to alias. // if (!isa<Constant>(I.getOperand()) || !cast<Constant>(I.getOperand())->isNullValue()) { Value.getNode()->setArrayMarker(); if(!isa<ArrayType>(CurTy) && Value.getNode()->getSize() <= 0){ Value.getNode()->growSize(TD.getTypeAllocSize(CurTy)); } else if(isa<ArrayType>(CurTy) && Value.getNode()->getSize() <= 0){ Type *ETy = (cast<ArrayType>(CurTy))->getElementType(); while(isa<ArrayType>(ETy)) { ETy = (cast<ArrayType>(ETy))->getElementType(); } Value.getNode()->growSize(TD.getTypeAllocSize(ETy)); } if(Value.getOffset() || Offset != 0 || (!isa<ArrayType>(CurTy) && (Value.getNode()->getSize() != TD.getTypeAllocSize(CurTy)))) { Value.getNode()->foldNodeCompletely(); Value.getNode(); Offset = 0; break; } } } // Add in the offset calculated... Value.setOffset(Value.getOffset()+Offset); // Check the offset DSNode *N = Value.getNode(); if (N) N->checkOffsetFoldIfNeeded(Value.getOffset()); // Value is now the pointer we want to GEP to be... setDestTo(GEP, Value); }
/// /// Method: visitIntrinsic() /// /// Description: /// Generate correct DSNodes for calls to LLVM intrinsic functions. /// /// Inputs: /// CS - The CallSite representing the call or invoke to the intrinsic. /// F - A pointer to the function called by the call site. /// /// Return value: /// true - This intrinsic is properly handled by this method. /// false - This intrinsic is not recognized by DSA. /// bool GraphBuilder::visitIntrinsic(CallSite CS, Function *F) { ++NumIntrinsicCall; // // If this is a debug intrinsic, then don't do any special processing. // if (isa<DbgInfoIntrinsic>(CS.getInstruction())) return true; switch (F->getIntrinsicID()) { case Intrinsic::vastart: { visitVAStartInst(CS); return true; } case Intrinsic::vacopy: { // Simply merge the two arguments to va_copy. // This results in loss of precision on the temporaries used to manipulate // the va_list, and so isn't a big deal. In theory we would build a // separate graph for this (like the one created in visitVAStartNode) // and only merge the node containing the variable arguments themselves. DSNodeHandle destNH = getValueDest(CS.getArgument(0)); DSNodeHandle srcNH = getValueDest(CS.getArgument(1)); destNH.mergeWith(srcNH); return true; } case Intrinsic::stacksave: { DSNode * Node = createNode(); Node->setAllocaMarker()->setIncompleteMarker()->setUnknownMarker(); Node->foldNodeCompletely(); setDestTo (*(CS.getInstruction()), Node); return true; } case Intrinsic::stackrestore: getValueDest(CS.getInstruction()).getNode()->setAllocaMarker() ->setIncompleteMarker() ->setUnknownMarker() ->foldNodeCompletely(); return true; case Intrinsic::vaend: case Intrinsic::memcpy: case Intrinsic::memmove: { // Merge the first & second arguments, and mark the memory read and // modified. DSNodeHandle RetNH = getValueDest(*CS.arg_begin()); RetNH.mergeWith(getValueDest(*(CS.arg_begin()+1))); if (DSNode *N = RetNH.getNode()) N->setModifiedMarker()->setReadMarker(); return true; } case Intrinsic::memset: // Mark the memory modified. if (DSNode *N = getValueDest(*CS.arg_begin()).getNode()) N->setModifiedMarker(); return true; case Intrinsic::eh_exception: { DSNode * Node = createNode(); Node->setIncompleteMarker(); Node->foldNodeCompletely(); setDestTo (*(CS.getInstruction()), Node); return true; } case Intrinsic::eh_selector: { for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { if (isa<PointerType>((*I)->getType())) { DSNodeHandle Ptr = getValueDest(*I); if(Ptr.getNode()) { Ptr.getNode()->setReadMarker(); Ptr.getNode()->setIncompleteMarker(); } } } return true; } case Intrinsic::eh_typeid_for: { DSNodeHandle Ptr = getValueDest(*CS.arg_begin()); Ptr.getNode()->setReadMarker(); Ptr.getNode()->setIncompleteMarker(); return true; } case Intrinsic::prefetch: return true; case Intrinsic::objectsize: return true; // // The return address/frame address aliases with the stack, // is type-unknown, and should // have the unknown flag set since we don't know where it goes. // case Intrinsic::returnaddress: case Intrinsic::frameaddress: { DSNode * Node = createNode(); Node->setAllocaMarker()->setIncompleteMarker()->setUnknownMarker(); Node->foldNodeCompletely(); setDestTo (*(CS.getInstruction()), Node); return true; } // Process lifetime intrinsics case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: case Intrinsic::invariant_start: case Intrinsic::invariant_end: return true; default: { //ignore pointer free intrinsics if (!isa<PointerType>(F->getReturnType())) { bool hasPtr = false; for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E && !hasPtr; ++I) if (isa<PointerType>(I->getType())) hasPtr = true; if (!hasPtr) return true; } DEBUG(errs() << "[dsa:local] Unhandled intrinsic: " << F->getName() << "\n"); assert(0 && "Unhandled intrinsic"); return false; } } }
void PoolRegisterElimination::removeSingletonRegistrations (const char * name) { // // Scan through all uses of the registration function and see if it can be // safely removed. If so, schedule it for removal. // std::vector<CallInst*> toBeRemoved; Function * F = intrinsic->getIntrinsic(name).F; // // Look for and record all registrations that can be deleted. // for (Value::use_iterator UI=F->use_begin(), UE=F->use_end(); UI != UE; ++UI) { // // Get the pointer to the registered object. // CallInst * CI = cast<CallInst>(*UI); Value * Ptr = intrinsic->getValuePointer(CI); // // Lookup the DSNode for the value in the function's DSGraph. // DSGraph * TDG = dsaPass->getDSGraph(*(CI->getParent()->getParent())); DSNodeHandle DSH = TDG->getNodeForValue(Ptr); assert ((!(DSH.isNull())) && "No DSNode for Value!\n"); // // If the object being registered is the same size as that found in the // DSNode, then we know it's a singleton object. The run-time doesn't need // such objects registered in the splay trees, so we can remove the // registration function. // DSNode * N = DSH.getNode(); Value * Size = intrinsic->getObjectSize (Ptr->stripPointerCasts()); if (Size) { if (ConstantInt * C = dyn_cast<ConstantInt>(Size)) { unsigned long size = C->getZExtValue(); if (size == N->getSize()) { toBeRemoved.push_back(CI); continue; } } } } // // Update the statistics. // if (toBeRemoved.size()) { RemovedRegistration += toBeRemoved.size(); SingletonRegistrations += toBeRemoved.size(); } // // Remove the unnecesary registrations. // std::vector<CallInst*>::iterator it, end; for (it = toBeRemoved.begin(), end = toBeRemoved.end(); it != end; ++it) { (*it)->eraseFromParent(); } }
// // Function: MergeConstantInitIntoNode() // // Description: // Merge the specified constant into the specified DSNode. // void GraphBuilder::MergeConstantInitIntoNode(DSNodeHandle &NH, Type* Ty, Constant *C) { // // Ensure a type-record exists... // DSNode *NHN = NH.getNode(); //NHN->mergeTypeInfo(Ty, NH.getOffset()); // // If we've found something of pointer type, create or find its DSNode and // make a link from the specified DSNode to the new DSNode describing the // pointer we've just found. // if (isa<PointerType>(Ty)) { NHN->mergeTypeInfo(Ty, NH.getOffset()); NH.addEdgeTo(getValueDest(C)); return; } // // If the type of the object (array element, structure field, etc.) is an // integer or floating point type, then just ignore it. It has no DSNode. // if (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()) return; // // Handle aggregate constants. // if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) { // // For an array, we don't worry about different elements pointing to // different objects; we essentially pretend that all array elements alias. // Type * ElementType = cast<ArrayType>(Ty)->getElementType(); for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) { Constant * ConstElement = cast<Constant>(CA->getOperand(i)); MergeConstantInitIntoNode(NH, ElementType, ConstElement); } } else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { // // For a structure, we need to merge each element of the constant structure // into the specified DSNode. However, we must also handle structures that // end with a zero-length array ([0 x sbyte]); this is a common C idiom // that continues to plague the world. // //NHN->mergeTypeInfo(Ty, NH.getOffset()); const StructLayout *SL = TD.getStructLayout(cast<StructType>(Ty)); for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) { DSNode *NHN = NH.getNode(); if (SL->getElementOffset(i) < SL->getSizeInBytes()) { // // Get the type and constant value of this particular element of the // constant structure. // Type * ElementType = cast<StructType>(Ty)->getElementType(i); Constant * ConstElement = cast<Constant>(CS->getOperand(i)); // // Get the offset (in bytes) into the memory object that we're // analyzing. // unsigned offset = NH.getOffset()+(unsigned)SL->getElementOffset(i); NHN->mergeTypeInfo(ElementType, offset); // // Create a new DSNodeHandle. This DSNodeHandle will point to the same // DSNode as the one we're constructing for our caller; however, it // will point into a different offset into that DSNode. // DSNodeHandle NewNH (NHN, offset); assert ((NHN->isNodeCompletelyFolded() || (NewNH.getOffset() == offset)) && "Need to resize DSNode!"); // // Recursively merge in this element of the constant struture into the // DSNode. // MergeConstantInitIntoNode(NewNH, ElementType, ConstElement); } else if (SL->getElementOffset(i) == SL->getSizeInBytes()) { // // If this is one of those cute structures that ends with a zero-length // array, just fold the DSNode now and get it over with. // DEBUG(errs() << "Zero size element at end of struct\n" ); NHN->foldNodeCompletely(); } else { assert(0 && "type was smaller than offsets of struct layout indicate"); } } } else if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) { // // Undefined values and NULL pointers have no DSNodes, so they do nothing. // } else { assert(0 && "Unknown constant type!"); } }