/// fence memory_order /// becomes: /// call void @llvm.nacl.atomic.fence(memory_order) /// and /// call void asm sideeffect "", "~{memory}"() /// fence seq_cst /// call void asm sideeffect "", "~{memory}"() /// becomes: /// call void asm sideeffect "", "~{memory}"() /// call void @llvm.nacl.atomic.fence.all() /// call void asm sideeffect "", "~{memory}"() /// Note that the assembly gets eliminated by the -remove-asm-memory pass. void AtomicVisitor::visitFenceInst(FenceInst &I) { return; // XXX EMSCRIPTEN Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type. BasicBlock::InstListType &IL(I.getParent()->getInstList()); bool isFirst = IL.empty() || &*I.getParent()->getInstList().begin() == &I; bool isLast = IL.empty() || &*I.getParent()->getInstList().rbegin() == &I; CallInst *PrevC = isFirst ? 0 : dyn_cast<CallInst>(I.getPrevNode()); CallInst *NextC = isLast ? 0 : dyn_cast<CallInst>(I.getNextNode()); if ((PrevC && PrevC->isInlineAsm() && cast<InlineAsm>(PrevC->getCalledValue())->isAsmMemory()) && (NextC && NextC->isInlineAsm() && cast<InlineAsm>(NextC->getCalledValue())->isAsmMemory()) && I.getOrdering() == SequentiallyConsistent) { const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence_all, T); replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, ArrayRef<Value *>()); } else { const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); } }
// Determine if the given call instruction should be registered. void RegisterVarargCallSites::visitCallInst(CallInst &I) { // // Do not register inline assembly instructions. // if (I.isInlineAsm()) return; CallSite CS(&I); Function *f = CS.getCalledFunction(); // If this is an indirect call, conservatively register it. if (f == 0) { toRegister.push_back(CS); return; } // Check whether we know to register this function. map<Function *, bool>::iterator found = shouldRegister.find(f); // If we've found the function, register the call site if we know that this // function should be registered. if (found != shouldRegister.end()) { if (shouldRegister[f]) toRegister.push_back(CS); } // The function has not been encountered yet. // Determine if calls to this function should be registered. else { if (f->isVarArg() && !isExternalVarargFunction(f->getName().str())) { shouldRegister[f] = true; toRegister.push_back(CS); } else shouldRegister[f] = false; } }
static bool needsStatepoint(const CallSite &CS, const TargetLibraryInfo &TLI) { if (callsGCLeafFunction(CS, TLI)) return false; if (CS.isCall()) { CallInst *call = cast<CallInst>(CS.getInstruction()); if (call->isInlineAsm()) return false; } return !(isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)); }
void AsmDirectivesVisitor::visitCallInst(CallInst &CI) { if (!CI.isInlineAsm() || !cast<InlineAsm>(CI.getCalledValue())->isAsmMemory()) return; // In NaCl ``asm("":::"memory")`` always comes in pairs, straddling a // sequentially consistent fence. Other passes rewrite this fence to // an equivalent stable NaCl intrinsic, meaning that this assembly can // be removed. CI.eraseFromParent(); ModifiedFunction = true; }
static bool needsStatepoint(const CallSite &CS) { if (callsGCLeafFunction(CS)) return false; if (CS.isCall()) { CallInst *call = cast<CallInst>(CS.getInstruction()); if (call->isInlineAsm()) return false; } if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) { return false; } return true; }
void visitCallInst(CallInst &I) { if (I.isInlineAsm()) return; Function* target = I.getCalledFunction(); if (target == NULL) { anyUnknown = true; return; } if (isInternal(target)) { if (used != NULL) used->push(target); } else { interface->call(target->getName(), arg_begin(I), arg_end(I)); } this->visitInstruction(I); }
void LLVMDefUseAnalysis::handleCallInst(LLVMNode *node) { CallInst *CI = cast<CallInst>(node->getKey()); if (CI->isInlineAsm()) { handleInlineAsm(node); return; } Function *func = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts()); if (func) { if (func->isIntrinsic() && !isa<DbgInfoIntrinsic>(CI)) { handleIntrinsicCall(node, CI); return; } // for realloc, we need to make it data dependent on the // memory it reallocates, since that is the memory it copies if (func->size() == 0) { using analysis::AllocationFunction; auto type = _options.getAllocationFunction(func->getName()); if (type == AllocationFunction::REALLOC) { addDataDependence(node, CI, CI->getOperand(0), Offset::UNKNOWN /* FIXME */); } else if (type == AllocationFunction::NONE) { handleUndefinedCall(node, CI); }// else { // we do not want to do anything for the memory // allocation functions // } // the function is undefined, so do not even try to // add the edges from return statements return; } } // add edges from the return nodes of subprocedure // to the call (if the call returns something) for (LLVMDependenceGraph *subgraph : node->getSubgraphs()) addReturnEdge(node, subgraph); }
void AAAnalyzer::handle_inst(Instruction *inst, FunctionWrapper * parent_func) { //outs()<<*inst<<"\n"; outs().flush(); switch (inst->getOpcode()) { // common/bitwise binary operations // Terminator instructions case Instruction::Ret: { ReturnInst* retInst = ((ReturnInst*) inst); if (retInst->getNumOperands() > 0 && !retInst->getOperandUse(0)->getType()->isVoidTy()) { parent_func->addRet(retInst->getOperandUse(0)); } } break; case Instruction::Resume: { Value* resume = ((ResumeInst*) inst)->getOperand(0); parent_func->addResume(resume); } break; case Instruction::Switch: case Instruction::Br: case Instruction::IndirectBr: case Instruction::Unreachable: break; // vector operations case Instruction::ExtractElement: { } break; case Instruction::InsertElement: { } break; case Instruction::ShuffleVector: { } break; // aggregate operations case Instruction::ExtractValue: { Value * agg = ((ExtractValueInst*) inst)->getAggregateOperand(); DyckVertex* aggV = wrapValue(agg); Type* aggTy = agg->getType(); ArrayRef<unsigned> indices = ((ExtractValueInst*) inst)->getIndices(); DyckVertex* currentStruct = aggV; for (unsigned int i = 0; i < indices.size(); i++) { if (isa<CompositeType>(aggTy) && aggTy->isSized()) { if (!aggTy->isStructTy()) { aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]); #ifndef ARRAY_SIMPLIFIED current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph); #endif if (i == indices.size() - 1) { this->makeAlias(currentStruct, wrapValue(inst)); } } else { aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]); if (i != indices.size() - 1) { currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL); } else { currentStruct = this->addField(currentStruct, -2 - (int) indices[i], wrapValue(inst)); } } } else { break; } } } break; case Instruction::InsertValue: { DyckVertex* resultV = wrapValue(inst); Value * agg = ((InsertValueInst*) inst)->getAggregateOperand(); if (!isa<UndefValue>(agg)) { makeAlias(resultV, wrapValue(agg)); } Value * val = ((InsertValueInst*) inst)->getInsertedValueOperand(); DyckVertex* insertedVal = wrapValue(val); Type *aggTy = inst->getType(); ArrayRef<unsigned> indices = ((InsertValueInst*) inst)->getIndices(); DyckVertex* currentStruct = resultV; for (unsigned int i = 0; i < indices.size(); i++) { if (isa<CompositeType>(aggTy) && aggTy->isSized()) { if (!aggTy->isStructTy()) { aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]); #ifndef ARRAY_SIMPLIFIED current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph); #endif if (i == indices.size() - 1) { this->makeAlias(currentStruct, insertedVal); } } else { aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]); if (i != indices.size() - 1) { currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL); } else { currentStruct = this->addField(currentStruct, -2 - (int) indices[i], insertedVal); } } } else { break; } } } break; // memory accessing and addressing operations case Instruction::Alloca: { } break; case Instruction::Fence: { } break; case Instruction::AtomicCmpXchg: { Value * retXchg = inst; Value * ptrXchg = inst->getOperand(0); Value * newXchg = inst->getOperand(2); addPtrTo(wrapValue(ptrXchg), wrapValue(retXchg)); addPtrTo(wrapValue(ptrXchg), wrapValue(newXchg)); } break; case Instruction::AtomicRMW: { Value * retRmw = inst; Value * ptrRmw = ((AtomicRMWInst*) inst)->getPointerOperand(); addPtrTo(wrapValue(ptrRmw), wrapValue(retRmw)); switch (((AtomicRMWInst*) inst)->getOperation()) { case AtomicRMWInst::Max: case AtomicRMWInst::Min: case AtomicRMWInst::UMax: case AtomicRMWInst::UMin: case AtomicRMWInst::Xchg: { Value * newRmw = ((AtomicRMWInst*) inst)->getValOperand(); addPtrTo(wrapValue(ptrRmw), wrapValue(newRmw)); } break; default: //others are binary ops like add/sub/... ///@TODO break; } } break; case Instruction::Load: { Value *lval = inst; Value *ladd = inst->getOperand(0); addPtrTo(wrapValue(ladd), wrapValue(lval)); } break; case Instruction::Store: { Value * sval = inst->getOperand(0); Value * sadd = inst->getOperand(1); addPtrTo(wrapValue(sadd), wrapValue(sval)); } break; case Instruction::GetElementPtr: { makeAlias(wrapValue(inst), handle_gep((GEPOperator*) inst)); } break; // conversion operations case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::BitCast: case Instruction::PtrToInt: case Instruction::IntToPtr: { Value * itpv = inst->getOperand(0); makeAlias(wrapValue(inst), wrapValue(itpv)); } break; // other operations case Instruction::Invoke: // invoke is a terminal operation { InvokeInst * invoke = (InvokeInst*) inst; LandingPadInst* lpd = invoke->getLandingPadInst(); parent_func->addLandingPad(invoke, lpd); Value * cv = invoke->getCalledValue(); vector<Value*> args; for (unsigned i = 0; i < invoke->getNumArgOperands(); i++) { args.push_back(invoke->getArgOperand(i)); } this->handle_invoke_call_inst(invoke, cv, &args, parent_func); } break; case Instruction::Call: { CallInst * callinst = (CallInst*) inst; if (callinst->isInlineAsm()) { break; } Value * cv = callinst->getCalledValue(); vector<Value*> args; for (unsigned i = 0; i < callinst->getNumArgOperands(); i++) { args.push_back(callinst->getArgOperand(i)); } this->handle_invoke_call_inst(callinst, cv, &args, parent_func); } break; case Instruction::PHI: { PHINode *phi = (PHINode *) inst; int nums = phi->getNumIncomingValues(); for (int i = 0; i < nums; i++) { Value * p = phi->getIncomingValue(i); makeAlias(wrapValue(inst), wrapValue(p)); } } break; case Instruction::Select: { Value *first = ((SelectInst*) inst)->getTrueValue(); Value *second = ((SelectInst*) inst)->getFalseValue(); makeAlias(wrapValue(inst), wrapValue(first)); makeAlias(wrapValue(inst), wrapValue(second)); } break; case Instruction::VAArg: { parent_func->addVAArg(inst); DyckVertex* vaarg = wrapValue(inst); Value * ptrVaarg = inst->getOperand(0); addPtrTo(wrapValue(ptrVaarg), vaarg); } break; case Instruction::LandingPad: // handled with invoke inst case Instruction::ICmp: case Instruction::FCmp: default: break; } }