void InstrumentMemoryAccesses::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { // Instrument an AtomicCmpXchg instruction with a store check. Value *AccessSize = ConstantInt::get(SizeTy, TD->getTypeStoreSize(I.getType())); instrument(I.getPointerOperand(), AccessSize, StoreCheckFunction, I); ++AtomicsInstrumented; }
extern "C" LLVMValueRef LLVMRustBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Target, LLVMValueRef Old, LLVMValueRef Source, LLVMAtomicOrdering Order, LLVMAtomicOrdering FailureOrder, LLVMBool Weak) { AtomicCmpXchgInst *ACXI = unwrap(B)->CreateAtomicCmpXchg( unwrap(Target), unwrap(Old), unwrap(Source), fromRust(Order), fromRust(FailureOrder)); ACXI->setWeak(Weak); return wrap(ACXI); }
extern "C" LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef target, LLVMValueRef old, LLVMValueRef source, AtomicOrdering order, AtomicOrdering failure_order, LLVMBool weak) { AtomicCmpXchgInst* acxi = unwrap(B)->CreateAtomicCmpXchg(unwrap(target), unwrap(old), unwrap(source), order, failure_order); acxi->setWeak(weak); return wrap(acxi); }
LLVMValueRef LLVM_General_BuildAtomicCmpXchg( LLVMBuilderRef b, LLVMValueRef ptr, LLVMValueRef cmp, LLVMValueRef n, LLVMBool v, LLVMAtomicOrdering lao, LLVMSynchronizationScope lss, const char *name ) { AtomicCmpXchgInst *a = unwrap(b)->CreateAtomicCmpXchg( unwrap(ptr), unwrap(cmp), unwrap(n), unwrap(lao), unwrap(lss) ); a->setVolatile(v); a->setName(name); return wrap(a); }
void GraphBuilder::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { if (isa<PointerType>(I.getType())) { visitInstruction (I); return; } // // Create a DSNode for the dereferenced pointer . If the DSNode is NULL, do // nothing more (this can occur if the pointer is a NULL constant; bugpoint // can generate such code). // DSNodeHandle Ptr = getValueDest(I.getPointerOperand()); if (Ptr.isNull()) return; // // Make that the memory object is read and written. // Ptr.getNode()->setReadMarker(); Ptr.getNode()->setModifiedMarker(); // // If the result of the compare-and-swap is a pointer, then we need to do // a few things: // o Merge the compare and swap values (which are pointers) with the result // o Merge the DSNode of the pointer *within* the memory object with the // DSNode of the compare, swap, and result DSNode. // if (isa<PointerType>(I.getType())) { // // Get the DSNodeHandle of the memory object returned from the load. Make // it the DSNodeHandle of the instruction's result. // DSNodeHandle FieldPtr = getLink (Ptr); setDestTo(I, getLink(Ptr)); // // Merge the result, compare, and swap values of the instruction. // FieldPtr.mergeWith (getValueDest (I.getCompareOperand())); FieldPtr.mergeWith (getValueDest (I.getNewValOperand())); } // // Modify the DSNode so that it has the loaded/written type at the // appropriate offset. // Ptr.getNode()->growSizeForType(I.getType(), Ptr.getOffset()); Ptr.getNode()->mergeTypeInfo(I.getType(), Ptr.getOffset()); return; }
/// %res = cmpxchg [weak] T* %ptr, T %old, T %new, memory_order_success /// memory_order_failure /// %val = extractvalue { T, i1 } %res, 0 /// %success = extractvalue { T, i1 } %res, 1 /// becomes: /// %val = call T @llvm.nacl.atomic.cmpxchg.i<size>( /// %object, %expected, %desired, memory_order_success, /// memory_order_failure) /// %success = icmp eq %old, %val /// Note: weak is currently dropped if present, the cmpxchg is always strong. void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { PointerHelper<AtomicCmpXchgInst> PH(*this, I); const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); auto Order = freezeMemoryOrder(I, I.getSuccessOrdering(), I.getFailureOrdering()); Value *Args[] = {PH.P, I.getCompareOperand(), I.getNewValOperand(), Order.first, Order.second}; replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, Args); }
static InstTransResult doCmpxchgRM(InstPtr ip, BasicBlock *&b, Value *dstAddr, const MCOperand &srcReg) { NASSERT(dstAddr != NULL); NASSERT(srcReg.isReg()); Value *acc; switch(width) { case 8: acc = R_READ<width>(b, X86::AL); break; case 16: acc = R_READ<width>(b, X86::AX); break; case 32: acc = R_READ<width>(b, X86::EAX); break; default: throw TErr(__LINE__, __FILE__, "Width not supported"); } //Value *mem_v = M_READ<width>(ip, b, dstAddr); Value *m_addr = NULL; unsigned addrspace = ip->get_addr_space(); if( dstAddr->getType()->isPointerTy() == false ) { llvm::Type *ptrTy = Type::getIntNPtrTy(b->getContext(), width, addrspace); m_addr = new llvm::IntToPtrInst(dstAddr, ptrTy, "", b); } else if( dstAddr->getType() != Type::getIntNPtrTy( b->getContext(), width, addrspace) ) { //we need to bitcast the pointer value to a pointer type of the appropriate width m_addr = CastInst::CreatePointerCast(dstAddr, Type::getIntNPtrTy(b->getContext(), width, addrspace), "", b); } else { m_addr = dstAddr; } Value *srcReg_v = R_READ<width>(b, srcReg.getReg()); AtomicCmpXchgInst *cmpx = new AtomicCmpXchgInst( m_addr, acc, srcReg_v, llvm::SequentiallyConsistent, llvm::SequentiallyConsistent, llvm::CrossThread, b); cmpx->setVolatile(true); Value *cmpx_val = ExtractValueInst::Create(cmpx, 0, "cmpxchg_cmpx_val", b); Value *was_eq = ExtractValueInst::Create(cmpx, 1, "cmpxchg_was_eq", b); doCmpVV<width>(ip, b, acc, cmpx_val); F_WRITE(b, ZF, was_eq); Value *new_acc = SelectInst::Create(was_eq, acc, cmpx_val, "", b); switch(width) { case 8: R_WRITE<width>(b, X86::AL, new_acc); break; case 16: R_WRITE<width>(b, X86::AX, new_acc); break; case 32: R_WRITE<width>(b, X86::EAX, new_acc); break; default: throw TErr(__LINE__, __FILE__, "Width not supported"); } return ContinueBlock; }
static InstTransResult doCmpxchgRM(InstPtr ip, BasicBlock *&b, Value *dstAddr, const MCOperand &srcReg) { NASSERT(dstAddr != NULL); NASSERT(srcReg.isReg()); Function *F = b->getParent(); BasicBlock *AccEQDest = BasicBlock::Create(b->getContext(), "AccEQDest", F); BasicBlock *AccNEDest = BasicBlock::Create(b->getContext(), "AccNEDest", F); BasicBlock *done = BasicBlock::Create(b->getContext(), "done", F); Value *acc; switch(width) { case 8: acc = R_READ<width>(b, X86::AL); break; case 16: acc = R_READ<width>(b, X86::AX); break; case 32: acc = R_READ<width>(b, X86::EAX); break; default: throw TErr(__LINE__, __FILE__, "Width not supported"); } //Value *mem_v = M_READ<width>(ip, b, dstAddr); Value *m_addr = NULL; unsigned addrspace = ip->get_addr_space(); if( dstAddr->getType()->isPointerTy() == false ) { llvm::Type *ptrTy = Type::getIntNPtrTy(b->getContext(), width, addrspace); m_addr = new llvm::IntToPtrInst(dstAddr, ptrTy, "", b); } else if( dstAddr->getType() != Type::getIntNPtrTy( b->getContext(), width, addrspace) ) { //we need to bitcast the pointer value to a pointer type of the appropriate width m_addr = CastInst::CreatePointerCast(dstAddr, Type::getIntNPtrTy(b->getContext(), width, addrspace), "", b); } else { m_addr = dstAddr; } Value *srcReg_v = R_READ<width>(b, srcReg.getReg()); AtomicCmpXchgInst *cmpx = new AtomicCmpXchgInst( m_addr, acc, srcReg_v, llvm::SequentiallyConsistent, llvm::CrossThread, b); cmpx->setVolatile(true); // needed for flags settings doCmpVV<width>(ip, b, acc, cmpx); Value *Cmp = new ICmpInst(*b, CmpInst::ICMP_EQ, cmpx, acc); BranchInst::Create(AccEQDest, AccNEDest, Cmp, b); // Acc == Dst F_SET(AccEQDest, "ZF"); //M_WRITE<width>(ip, AccEQDest, dstAddr, srcReg_v); BranchInst::Create(done, AccEQDest); // Acc != Dst F_CLEAR(AccNEDest, "ZF"); switch(width) { case 8: R_WRITE<width>(AccNEDest, X86::AL, cmpx); break; case 16: R_WRITE<width>(AccNEDest, X86::AX, cmpx); break; case 32: R_WRITE<width>(AccNEDest, X86::EAX, cmpx); break; default: throw TErr(__LINE__, __FILE__, "Width not supported"); } BranchInst::Create(done, AccNEDest); b = done; return ContinueBlock; }