Example #1
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
Example #2
0
/// Emit a load from an l-value of atomic type.  Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
                                       AggValueSlot resultSlot) {
  AtomicInfo atomics(*this, src);

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    llvm::Value *tempAddr;
    if (!resultSlot.isIgnored()) {
      assert(atomics.getEvaluationKind() == TEK_Aggregate);
      tempAddr = resultSlot.getAddr();
    } else {
      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    }

    // void __atomic_load(size_t size, void *mem, void *return, int order);
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);

    // Produce the r-value.
    return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
  }

  // Okay, we're doing this natively.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
  load->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  load->setAlignment(src.getAlignment().getQuantity());
  if (src.isVolatileQualified())
    load->setVolatile(true);
  if (src.getTBAAInfo())
    CGM.DecorateInstruction(load, src.getTBAAInfo());

  // If we're ignoring an aggregate return, don't do anything.
  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
    return RValue::getAggregate(nullptr, false);

  // Okay, turn that back into the original value type.
  return atomics.convertIntToValue(load, resultSlot, loc);
}
Example #3
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue;

  // If we've got a scalar value of the right size, try to avoid going
  // through memory.
  if (rvalue.isScalar() && !atomics.hasPadding()) {
    llvm::Value *value = rvalue.getScalarVal();
    if (isa<llvm::IntegerType>(value->getType())) {
      intValue = value;
    } else {
      llvm::IntegerType *inputIntTy =
        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
      if (isa<llvm::PointerType>(value->getType())) {
        intValue = Builder.CreatePtrToInt(value, inputIntTy);
      } else {
        intValue = Builder.CreateBitCast(value, inputIntTy);
      }
    }

  // Otherwise, we need to go through memory.
  } else {
    // Put the r-value in memory.
    llvm::Value *addr = atomics.materializeRValue(rvalue);

    // Cast the temporary to the atomic int type and pull a value out.
    addr = atomics.emitCastToAtomicIntPointer(addr);
    intValue = Builder.CreateAlignedLoad(addr,
                                 atomics.getAtomicAlignment().getQuantity());
  }

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
Example #4
0
/// Emit a load from an l-value of atomic type.  Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
                                       AggValueSlot resultSlot) {
  AtomicInfo atomics(*this, src);

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    llvm::Value *tempAddr;
    if (!resultSlot.isIgnored()) {
      assert(atomics.getEvaluationKind() == TEK_Aggregate);
      tempAddr = resultSlot.getAddr();
    } else {
      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    }

    // void __atomic_load(size_t size, void *mem, void *return, int order);
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);

    // Produce the r-value.
    return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
  }

  // Okay, we're doing this natively.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
  load->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  load->setAlignment(src.getAlignment().getQuantity());
  if (src.isVolatileQualified())
    load->setVolatile(true);
  if (src.getTBAAInfo())
    CGM.DecorateInstruction(load, src.getTBAAInfo());

  // Okay, turn that back into the original value type.
  QualType valueType = atomics.getValueType();
  llvm::Value *result = load;

  // If we're ignoring an aggregate return, don't do anything.
  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
    return RValue::getAggregate(0, false);

  // The easiest way to do this this is to go through memory, but we
  // try not to in some easy cases.
  if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
    llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
    if (isa<llvm::IntegerType>(resultTy)) {
      assert(result->getType() == resultTy);
      result = EmitFromMemory(result, valueType);
    } else if (isa<llvm::PointerType>(resultTy)) {
      result = Builder.CreateIntToPtr(result, resultTy);
    } else {
      result = Builder.CreateBitCast(result, resultTy);
    }
    return RValue::get(result);
  }

  // Create a temporary.  This needs to be big enough to hold the
  // atomic integer.
  llvm::Value *temp;
  bool tempIsVolatile = false;
  CharUnits tempAlignment;
  if (atomics.getEvaluationKind() == TEK_Aggregate) {
    assert(!resultSlot.isIgnored());
    temp = resultSlot.getAddr();
    tempAlignment = atomics.getValueAlignment();
    tempIsVolatile = resultSlot.isVolatile();
  } else {
    temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    tempAlignment = atomics.getAtomicAlignment();
  }

  // Slam the integer into the temporary.
  llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
  Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
    ->setVolatile(tempIsVolatile);

  return atomics.convertTempToRValue(temp, resultSlot, loc);
}