Example #1
0
/// \brief Emit a libcall for a binary operation on complex types.
ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
                                                          const BinOpInfo &Op) {
  CallArgList Args;
  Args.add(RValue::get(Op.LHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.LHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());

  // We *must* use the full CG function call building logic here because the
  // complex type has special ABI handling. We also should not forget about
  // special calling convention which may be used for compiler builtins.
  const CGFunctionInfo &FuncInfo =
    CGF.CGM.getTypes().arrangeFreeFunctionCall(
      Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
      RequiredArgs::All);
  llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
  llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
  llvm::Instruction *Call;

  RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
                            nullptr, &Call);
  cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
  cast<llvm::CallInst>(Call)->setDoesNotThrow();

  return Res.getComplexVal();
}
Example #2
0
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
                                     llvm::Value *Ptr,
                                     QualType DeleteTy) {
  const FunctionProtoType *DeleteFTy =
    DeleteFD->getType()->getAs<FunctionProtoType>();

  CallArgList DeleteArgs;

  QualType ArgTy = DeleteFTy->getArgType(0);
  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));

  if (DeleteFTy->getNumArgs() == 2) {
    QualType SizeTy = DeleteFTy->getArgType(1);
    uint64_t SizeVal = getContext().getTypeSize(DeleteTy) / 8;
    llvm::Constant *Size = llvm::ConstantInt::get(ConvertType(SizeTy),
                                                  SizeVal);
    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
  }

  // Emit the call to delete.
  EmitCall(CGM.getTypes().getFunctionInfo(DeleteFTy->getResultType(),
                                          DeleteArgs),
           CGM.GetAddrOfFunction(DeleteFD),
           DeleteArgs, DeleteFD);
}
Example #3
0
// CopyObject - Utility to copy an object.  Calls copy constructor as necessary.
// DestPtr is casted to the right type.
static void CopyObject(CodeGenFunction &CGF, const Expr *E, 
                       llvm::Value *DestPtr, llvm::Value *ExceptionPtrPtr) {
  QualType ObjectType = E->getType();

  // Store the throw exception in the exception object.
  if (!CGF.hasAggregateLLVMType(ObjectType)) {
    llvm::Value *Value = CGF.EmitScalarExpr(E);
    const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo();

    CGF.Builder.CreateStore(Value, 
                            CGF.Builder.CreateBitCast(DestPtr, ValuePtrTy));
  } else {
    const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo();
    const CXXRecordDecl *RD =
      cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
    
    llvm::Value *This = CGF.Builder.CreateBitCast(DestPtr, Ty);
    if (RD->hasTrivialCopyConstructor()) {
      CGF.EmitAggExpr(E, This, false);
    } else if (CXXConstructorDecl *CopyCtor
               = RD->getCopyConstructor(CGF.getContext(), 0)) {
      llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
      if (CGF.Exceptions) {
        CodeGenFunction::EHCleanupBlock Cleanup(CGF);
        llvm::Constant *FreeExceptionFn = getFreeExceptionFn(CGF);
        
        // Load the exception pointer.
        llvm::Value *ExceptionPtr = CGF.Builder.CreateLoad(ExceptionPtrPtr);
        CGF.Builder.CreateCall(FreeExceptionFn, ExceptionPtr);
      }

      llvm::Value *Src = CGF.EmitLValue(E).getAddress();
      CGF.setInvokeDest(PrevLandingPad);

      llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
      PrevLandingPad = CGF.getInvokeDest();
      CGF.setInvokeDest(TerminateHandler);

      // Stolen from EmitClassAggrMemberwiseCopy
      llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
                                                            Ctor_Complete);
      CallArgList CallArgs;
      CallArgs.push_back(std::make_pair(RValue::get(This),
                                      CopyCtor->getThisType(CGF.getContext())));

      // Push the Src ptr.
      CallArgs.push_back(std::make_pair(RValue::get(Src),
                                        CopyCtor->getParamDecl(0)->getType()));
      QualType ResultType =
        CopyCtor->getType()->getAs<FunctionType>()->getResultType();
      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
                   Callee, CallArgs, CopyCtor);
      CGF.setInvokeDest(PrevLandingPad);
    } else
      llvm_unreachable("uncopyable object");
  }
}
Example #4
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
Example #5
0
/// Emit a load from an l-value of atomic type.  Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
                                       AggValueSlot resultSlot) {
  AtomicInfo atomics(*this, src);

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    llvm::Value *tempAddr;
    if (!resultSlot.isIgnored()) {
      assert(atomics.getEvaluationKind() == TEK_Aggregate);
      tempAddr = resultSlot.getAddr();
    } else {
      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    }

    // void __atomic_load(size_t size, void *mem, void *return, int order);
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);

    // Produce the r-value.
    return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
  }

  // Okay, we're doing this natively.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
  load->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  load->setAlignment(src.getAlignment().getQuantity());
  if (src.isVolatileQualified())
    load->setVolatile(true);
  if (src.getTBAAInfo())
    CGM.DecorateInstruction(load, src.getTBAAInfo());

  // If we're ignoring an aggregate return, don't do anything.
  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
    return RValue::getAggregate(nullptr, false);

  // Okay, turn that back into the original value type.
  return atomics.convertIntToValue(load, resultSlot, loc);
}
Example #6
0
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy) {
  if (UseOptimizedLibcall) {
    // Load value and pass it to the function directly.
    unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
    Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy);
    Args.add(RValue::get(Val), ValTy);
  } else {
    // Non-optimized functions always take a reference.
    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
                         CGF.getContext().VoidPtrTy);
  }
}
Example #7
0
CharacterValueTy CharacterExprEmitter::VisitCallExpr(const CallExpr *E) {
  CharacterValueTy Dest;
  if(hasDestination())
    Dest = takeDestination();
  else {
    // FIXME function returning CHARACTER*(*)
    auto RetType = E->getFunction()->getType();
    Dest = CGF.GetCharacterValueFromPtr(
             CGF.CreateTempAlloca(CGF.ConvertTypeForMem(RetType), "characters"),
             RetType);
  }

  CallArgList ArgList;
  ArgList.addReturnValueArg(Dest);
  return CGF.EmitCall(E->getFunction(), ArgList, E->getArguments()).asCharacter();
}
Example #8
0
// CopyObject - Utility to copy an object.  Calls copy constructor as necessary.
// N is casted to the right type.
static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
                       bool WasPointer, bool WasPointerReference,
                       llvm::Value *E, llvm::Value *N) {
  // Store the throw exception in the exception object.
  if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
    llvm::Value *Value = E;
    if (!WasPointer)
      Value = CGF.Builder.CreateLoad(Value);
    const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
    if (WasPointerReference) {
      llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
      CGF.Builder.CreateStore(Value, Tmp);
      Value = Tmp;
      ValuePtrTy = Value->getType()->getPointerTo(0);
    }
    N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
    CGF.Builder.CreateStore(Value, N);
  } else {
    const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
    const CXXRecordDecl *RD;
    RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
    llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
    if (RD->hasTrivialCopyConstructor()) {
      CGF.EmitAggregateCopy(This, E, ObjectType);
    } else if (CXXConstructorDecl *CopyCtor
               = RD->getCopyConstructor(CGF.getContext(), 0)) {
      llvm::Value *Src = E;

      // Stolen from EmitClassAggrMemberwiseCopy
      llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
                                                            Ctor_Complete);
      CallArgList CallArgs;
      CallArgs.push_back(std::make_pair(RValue::get(This),
                                      CopyCtor->getThisType(CGF.getContext())));

      // Push the Src ptr.
      CallArgs.push_back(std::make_pair(RValue::get(Src),
                                        CopyCtor->getParamDecl(0)->getType()));

      const FunctionProtoType *FPT
        = CopyCtor->getType()->getAs<FunctionProtoType>();
      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
                   Callee, ReturnValueSlot(), CallArgs, CopyCtor);
    } else
      llvm_unreachable("uncopyable object");
  }
}
Example #9
0
void CodeGenFunction::EmitUPCAggregateCopy(llvm::Value *Dest, llvm::Value *Src,
                                           QualType DestTy, QualType SrcTy,
                                           SourceLocation Loc) {
  const ASTContext& Context = getContext();
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType SizeType = Context.getSizeType();
  assert(DestTy->getCanonicalTypeUnqualified() == SrcTy->getCanonicalTypeUnqualified());
  llvm::Constant *Len =
    llvm::ConstantInt::get(ConvertType(SizeType),
                           Context.getTypeSizeInChars(DestTy).getQuantity());
  llvm::SmallString<16> Name;
  const char *OpName;
  QualType DestArgTy, SrcArgTy;
  if (DestTy.getQualifiers().hasShared() && SrcTy.getQualifiers().hasShared()) {
    // both shared
    OpName = "copy";
    DestArgTy = SrcArgTy = ArgTy;
  } else if (DestTy.getQualifiers().hasShared()) {
    OpName = "put";
    DestArgTy = ArgTy;
    SrcArgTy = Context.VoidPtrTy;
  } else if (SrcTy.getQualifiers().hasShared()) {
    OpName = "get";
    DestArgTy = Context.VoidPtrTy;
    SrcArgTy = ArgTy;
  } else {
    llvm_unreachable("expected at least one shared argument");
  }

  Name += "__";
  Name += OpName;
  if (DestTy.getQualifiers().hasStrict() || SrcTy.getQualifiers().hasStrict())
    Name += 's';
  if (CGM.getCodeGenOpts().UPCDebug) Name += "g";
  Name += "blk";
  CallArgList Args;
  Args.add(RValue::get(Dest), DestArgTy);
  Args.add(RValue::get(Src), SrcArgTy);
  Args.add(RValue::get(Len), SizeType);
  if (CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(*this, Loc, &Args);
    Name += '5';
  } else {
    Name += '3';
  }
  EmitUPCCall(*this, Name, Context.VoidTy, Args);
}
Example #10
0
static void EmitUPCBarrier(CodeGenFunction& CGF, StringRef Name,
                           const Expr *E, SourceLocation Loc) {
  ASTContext &Context = CGF.CGM.getContext();
  llvm::SmallString<16> N(Name);
  CallArgList Args;

  llvm::Value *Id = E ? CGF.EmitScalarExpr(E) :
    llvm::ConstantInt::get(CGF.IntTy, 0x80000000);
  Args.add(RValue::get(Id), Context.IntTy);

  if (CGF.CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(CGF, Loc, &Args);
    N += 'g';
  }

  CGF.EmitUPCCall(N, Context.VoidTy, Args);
}
CodeGen::RValue
CGObjCJit::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
                           ReturnValueSlot Return,
                           QualType ResultType,
                           Selector Sel,
                           llvm::Value *Arg0,
                           llvm::Value *Arg0Class,
                           const CallArgList &CallArgs,
                           const ObjCMethodDecl *Method) {

  llvm::Value *Arg1 = GetSelector(CGF, Sel);

  CallArgList ActualArgs;
  ActualArgs.add(RValue::get(Arg0), CGF.getContext().getObjCIdType());
  ActualArgs.add(RValue::get(Arg1), CGF.getContext().getObjCSelType());
  ActualArgs.addFrom(CallArgs);

  if (Method)
    assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
           CGM.getContext().getCanonicalType(ResultType) &&
           "Result type mismatch!");

  // Perform the following:
  //   imp = class_getMethodImplementation( Arg0Class, Arg1 );
  //   (*imp)( Arg0, Arg1, CallArgs );
  llvm::CallInst *getImp;

  // Unfortunately, using the GNU runtime version of
  // class_getMethodImplementation and then calling the resulting
  // IMP doesn't work unless objc_msg_lookup was already
  // called first. TODO: avoid doing this every time
  //
  if (fn_objc_msg_lookup.isValid()) {
    getImp = CGF.Builder.CreateCall2(fn_objc_msg_lookup,
                                     Arg0,
                                     Arg1);
  } else { // use the universal way
    getImp = CGF.Builder.CreateCall2(fn_class_getMethodImplementation,
                                     Arg0Class,
                                     Arg1);
  }

  MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
  llvm::Value *theImp = CGF.Builder.CreateBitCast(getImp, MSI.MessengerType);
  return CGF.EmitCall(MSI.CallInfo, theImp, Return, ActualArgs);
}
Example #12
0
// Transforms a call to printf into a call to the NVPTX vprintf syscall (which
// isn't particularly special; it's invoked just like a regular function).
// vprintf takes two args: A format string, and a pointer to a buffer containing
// the varargs.
//
// For example, the call
//
//   printf("format string", arg1, arg2, arg3);
//
// is converted into something resembling
//
//   struct Tmp {
//     Arg1 a1;
//     Arg2 a2;
//     Arg3 a3;
//   };
//   char* buf = alloca(sizeof(Tmp));
//   *(Tmp*)buf = {a1, a2, a3};
//   vprintf("format string", buf);
//
// buf is aligned to the max of {alignof(Arg1), ...}.  Furthermore, each of the
// args is itself aligned to its preferred alignment.
//
// Note that by the time this function runs, E's args have already undergone the
// standard C vararg promotion (short -> int, float -> double, etc.).
RValue
CodeGenFunction::EmitCUDADevicePrintfCallExpr(const CallExpr *E,
                                              ReturnValueSlot ReturnValue) {
  assert(getLangOpts().CUDA);
  assert(getLangOpts().CUDAIsDevice);
  assert(E->getBuiltinCallee() == Builtin::BIprintf);
  assert(E->getNumArgs() >= 1); // printf always has at least one arg.

  const llvm::DataLayout &DL = CGM.getDataLayout();
  llvm::LLVMContext &Ctx = CGM.getLLVMContext();

  CallArgList Args;
  EmitCallArgs(Args,
               E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
               E->arguments(), E->getDirectCallee(),
               /* ParamsToSkip = */ 0);

  // Construct and fill the args buffer that we'll pass to vprintf.
  llvm::Value *BufferPtr;
  if (Args.size() <= 1) {
    // If there are no args, pass a null pointer to vprintf.
    BufferPtr = llvm::ConstantPointerNull::get(llvm::Type::getInt8PtrTy(Ctx));
  } else {
    llvm::SmallVector<llvm::Type *, 8> ArgTypes;
    for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
      ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
    llvm::Type *AllocaTy = llvm::StructType::create(ArgTypes, "printf_args");
    llvm::Value *Alloca = CreateTempAlloca(AllocaTy);

    for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
      llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
      llvm::Value *Arg = Args[I].RV.getScalarVal();
      Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
    }
    BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
  }

  // Invoke vprintf and return.
  llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
  return RValue::get(
      Builder.CreateCall(VprintfFunc, {Args[0].RV.getScalarVal(), BufferPtr}));
}
Example #13
0
llvm::Value *CodeGenFunction::EmitUPCCastSharedToLocal(llvm::Value *Value,
                                                       QualType DestTy,
                                                       SourceLocation Loc) {
  const ASTContext& Context = getContext();
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ResultTy = Context.VoidPtrTy;

  const char *FnName = "__getaddr";

  CallArgList Args;
  Args.add(RValue::get(Value), ArgTy);
  if (CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(*this, Loc, &Args);
    FnName = "__getaddrg";
  }

  RValue Result = EmitUPCCall(*this, FnName, ResultTy, Args);

  return Builder.CreateBitCast(Result.getScalarVal(), ConvertType(DestTy));
}
Example #14
0
void CodeGenFunction::EmitUPCStore(llvm::Value *Value,
                                   llvm::Value *Addr,
                                   bool isStrict,
                                   CharUnits Align,
                                   SourceLocation Loc) {

  const ASTContext& Context = getContext();
  const llvm::DataLayout &Target = CGM.getDataLayout();
  uint64_t Size = Target.getTypeSizeInBits(Value->getType());
  QualType AddrTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ValTy;
  llvm::SmallString<16> Name("__put");
  if (isStrict) Name += 's';
  if (CGM.getCodeGenOpts().UPCDebug) Name += "g";

  if (const char * ID = getUPCTypeID(*this, &ValTy, Value->getType(), Size, Context.toBits(Align))) {
    Name += ID;

    llvm::Type *ValLTy = ConvertTypeForMem(ValTy);

    if (Value->getType()->isPointerTy())
      Value = Builder.CreatePtrToInt(Value, ValLTy);
    else
      Value = Builder.CreateBitCast(Value, ValLTy);

    CallArgList Args;
    Args.add(RValue::get(Addr), AddrTy);
    Args.add(RValue::get(Value), ValTy);

    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '4';
    } else {
      Name += '2';
    }

    EmitUPCCall(*this, Name, Context.VoidTy, Args);
  } else {
    Name += "blk";

    llvm::AllocaInst *Mem = CreateTempAlloca(Value->getType());
    Mem->setAlignment(Target.getABITypeAlignment(Value->getType()));
    Builder.CreateStore(Value, Mem);
    llvm::Value *Tmp = Builder.CreateBitCast(Mem, VoidPtrTy);
    llvm::Value *SizeArg = llvm::ConstantInt::get(SizeTy, Size/Context.getCharWidth());
    
    CallArgList Args;
    Args.add(RValue::get(Addr), AddrTy);
    Args.add(RValue::get(Tmp), Context.VoidPtrTy);
    Args.add(RValue::get(SizeArg), Context.getSizeType());
    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '5';
    } else {
      Name += '3';
    }
    EmitUPCCall(*this, Name, getContext().VoidTy, Args);
  }
}
Example #15
0
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
                  SourceLocation Loc, CharUnits SizeInChars) {
  if (UseOptimizedLibcall) {
    // Load value and pass it to the function directly.
    unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
    int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
    ValTy =
        CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
    llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
                                                SizeInBits)->getPointerTo();
    Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
                               Align, CGF.getContext().getPointerType(ValTy),
                               Loc);
    // Coerce the value into an appropriately sized integer type.
    Args.add(RValue::get(Val), ValTy);
  } else {
    // Non-optimized functions always take a reference.
    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
                         CGF.getContext().VoidPtrTy);
  }
}
Example #16
0
/// \brief Emit a libcall for a binary operation on complex types.
ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
                                                          const BinOpInfo &Op) {
  CallArgList Args;
  Args.add(RValue::get(Op.LHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.LHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());

  // We *must* use the full CG function call building logic here because the
  // complex type has special ABI handling. We also should not forget about
  // special calling convention which may be used for compiler builtins.

  // We create a function qualified type to state that this call does not have
  // any exceptions.
  FunctionProtoType::ExtProtoInfo EPI;
  EPI = EPI.withExceptionSpec(
      FunctionProtoType::ExceptionSpecInfo(EST_BasicNoexcept));
  SmallVector<QualType, 4> ArgsQTys(
      4, Op.Ty->castAs<ComplexType>()->getElementType());
  QualType FQTy = CGF.getContext().getFunctionType(Op.Ty, ArgsQTys, EPI);
  const CGFunctionInfo &FuncInfo = CGF.CGM.getTypes().arrangeFreeFunctionCall(
      Args, cast<FunctionType>(FQTy.getTypePtr()), false);

  llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
  llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
  CGCallee Callee = CGCallee::forDirect(Func, FQTy->getAs<FunctionProtoType>());

  llvm::Instruction *Call;
  RValue Res = CGF.EmitCall(FuncInfo, Callee, ReturnValueSlot(), Args, &Call);
  cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
  return Res.getComplexVal();
}
Example #17
0
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) {
  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee());
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
  assert(MD->isInstance() && 
         "Trying to emit a member call expr on a static method!");
  
  const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
  const llvm::Type *Ty = 
    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), 
                                   FPT->isVariadic());
  llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, Ty);
  
  llvm::Value *BaseValue = 0;
  
  // There's a deref operator node added in Sema::BuildCallToMemberFunction
  // that's giving the wrong type for -> call exprs so we just ignore them
  // for now.
  if (ME->isArrow())
    return EmitUnsupportedRValue(CE, "C++ member call expr");
  else {
    LValue BaseLV = EmitLValue(ME->getBase());
    BaseValue = BaseLV.getAddress();
  }
  
  CallArgList Args;
  
  // Push the 'this' pointer.
  Args.push_back(std::make_pair(RValue::get(BaseValue), 
                                MD->getThisType(getContext())));
  
  EmitCallArgs(Args, FPT, CE->arg_begin(), CE->arg_end());
  
  QualType ResultType = MD->getType()->getAsFunctionType()->getResultType();
  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), 
                  Callee, Args, MD);
}
Example #18
0
RValue EmitUPCCall(CodeGenFunction &CGF,
                   llvm::StringRef Name,
                   QualType ResultTy,
                   const CallArgList& Args) {
  ASTContext &Context = CGF.CGM.getContext();
  llvm::SmallVector<QualType, 5> ArgTypes;

  for (CallArgList::const_iterator iter = Args.begin(),
         end = Args.end(); iter != end; ++iter) {
    ArgTypes.push_back(iter->Ty);
  }

  QualType FuncType =
    Context.getFunctionType(ResultTy,
                            ArgTypes,
                            FunctionProtoType::ExtProtoInfo());
    const CGFunctionInfo &Info =
      CGF.getTypes().arrangeFreeFunctionCall(Args, FuncType->castAs<FunctionType>());
    llvm::FunctionType * FTy =
      cast<llvm::FunctionType>(CGF.ConvertType(FuncType));
    llvm::Value * Fn = CGF.CGM.CreateRuntimeFunction(FTy, Name);

    return CGF.EmitCall(Info, Fn, ReturnValueSlot(), Args);
}
Example #19
0
llvm::Value *CodeGenFunction::EmitUPCLoad(llvm::Value *Addr,
                                          bool isStrict,
                                          llvm::Type *LTy,
                                          CharUnits Align,
                                          SourceLocation Loc) {
  const ASTContext& Context = getContext();
  const llvm::DataLayout &Target = CGM.getDataLayout();
  uint64_t Size = Target.getTypeSizeInBits(LTy);
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ResultTy;
  llvm::SmallString<16> Name("__get");
  if (isStrict) Name += 's';
  if (CGM.getCodeGenOpts().UPCDebug) Name += "g";

  if (const char * ID = getUPCTypeID(*this, &ResultTy, LTy, Size, Context.toBits(Align))) {
    Name += ID;

    CallArgList Args;
    Args.add(RValue::get(Addr), ArgTy);
    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '3';
    } else {
      Name += '2';
    }
    RValue Result = EmitUPCCall(*this, Name, ResultTy, Args);
    llvm::Value *Value = Result.getScalarVal();
    if (LTy->isPointerTy())
      Value = Builder.CreateIntToPtr(Value, LTy);
    else
      Value = Builder.CreateBitCast(Value, LTy);
    return Value;
  } else {
    Name += "blk";

    llvm::AllocaInst *Mem = CreateTempAlloca(LTy);
    Mem->setAlignment(Target.getABITypeAlignment(LTy));
    llvm::Value *Tmp = Builder.CreateBitCast(Mem, VoidPtrTy);
    llvm::Value *SizeArg = llvm::ConstantInt::get(SizeTy, Size/Context.getCharWidth());
    
    CallArgList Args;
    Args.add(RValue::get(Tmp), Context.VoidPtrTy);
    Args.add(RValue::get(Addr), ArgTy);
    Args.add(RValue::get(SizeArg), Context.getSizeType());
    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '5';
    } else {
      Name += '3';
    }
    EmitUPCCall(*this, Name, getContext().VoidTy, Args);

    return Builder.CreateLoad(Mem);
  }
}
// Transforms a call to printf into a call to the NVPTX vprintf syscall (which
// isn't particularly special; it's invoked just like a regular function).
// vprintf takes two args: A format string, and a pointer to a buffer containing
// the varargs.
//
// For example, the call
//
//   printf("format string", arg1, arg2, arg3);
//
// is converted into something resembling
//
//   struct Tmp {
//     Arg1 a1;
//     Arg2 a2;
//     Arg3 a3;
//   };
//   char* buf = alloca(sizeof(Tmp));
//   *(Tmp*)buf = {a1, a2, a3};
//   vprintf("format string", buf);
//
// buf is aligned to the max of {alignof(Arg1), ...}.  Furthermore, each of the
// args is itself aligned to its preferred alignment.
//
// Note that by the time this function runs, E's args have already undergone the
// standard C vararg promotion (short -> int, float -> double, etc.).
RValue
CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
                                               ReturnValueSlot ReturnValue) {
  assert(getTarget().getTriple().isNVPTX());
  assert(E->getBuiltinCallee() == Builtin::BIprintf);
  assert(E->getNumArgs() >= 1); // printf always has at least one arg.

  const llvm::DataLayout &DL = CGM.getDataLayout();
  llvm::LLVMContext &Ctx = CGM.getLLVMContext();

  CallArgList Args;
  EmitCallArgs(Args,
               E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
               E->arguments(), E->getDirectCallee(),
               /* ParamsToSkip = */ 0);

  // We don't know how to emit non-scalar varargs.
  if (std::any_of(Args.begin() + 1, Args.end(), [&](const CallArg &A) {
        return !A.getRValue(*this).isScalar();
      })) {
    CGM.ErrorUnsupported(E, "non-scalar arg to printf");
    return RValue::get(llvm::ConstantInt::get(IntTy, 0));
  }

  // Construct and fill the args buffer that we'll pass to vprintf.
  llvm::Value *BufferPtr;
  if (Args.size() <= 1) {
    // If there are no args, pass a null pointer to vprintf.
    BufferPtr = llvm::ConstantPointerNull::get(llvm::Type::getInt8PtrTy(Ctx));
  } else {
    llvm::SmallVector<llvm::Type *, 8> ArgTypes;
    for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
      ArgTypes.push_back(Args[I].getRValue(*this).getScalarVal()->getType());

    // Using llvm::StructType is correct only because printf doesn't accept
    // aggregates.  If we had to handle aggregates here, we'd have to manually
    // compute the offsets within the alloca -- we wouldn't be able to assume
    // that the alignment of the llvm type was the same as the alignment of the
    // clang type.
    llvm::Type *AllocaTy = llvm::StructType::create(ArgTypes, "printf_args");
    llvm::Value *Alloca = CreateTempAlloca(AllocaTy);

    for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
      llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
      llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
      Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
    }
    BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
  }

  // Invoke vprintf and return.
  llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
  return RValue::get(Builder.CreateCall(
      VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
}
llvm::Constant *
CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
                                   GlobalDecl GD, bool Extern,
                                   const CovariantThunkAdjustment &Adjustment) {
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  QualType ResultType = FPT->getResultType();

  FunctionArgList Args;
  ImplicitParamDecl *ThisDecl =
    ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
                              MD->getThisType(getContext()));
  Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
         e = MD->param_end();
       i != e; ++i) {
    ParmVarDecl *D = *i;
    Args.push_back(std::make_pair(D, D->getType()));
  }
  IdentifierInfo *II
    = &CGM.getContext().Idents.get("__thunk_named_foo_");
  FunctionDecl *FD = FunctionDecl::Create(getContext(),
                                          getContext().getTranslationUnitDecl(),
                                          SourceLocation(), II, ResultType, 0,
                                          Extern
                                            ? FunctionDecl::Extern
                                            : FunctionDecl::Static,
                                          false, true);
  StartFunction(FD, ResultType, Fn, Args, SourceLocation());

  // generate body
  const llvm::Type *Ty =
    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
                                   FPT->isVariadic());
  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);

  CallArgList CallArgs;

  bool ShouldAdjustReturnPointer = true;
  QualType ArgType = MD->getThisType(getContext());
  llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
  if (!Adjustment.ThisAdjustment.isEmpty()) {
    // Do the this adjustment.
    const llvm::Type *OrigTy = Callee->getType();
    Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment);
    
    if (!Adjustment.ReturnAdjustment.isEmpty()) {
      const CovariantThunkAdjustment &ReturnAdjustment = 
        CovariantThunkAdjustment(ThunkAdjustment(),
                                 Adjustment.ReturnAdjustment);
      
      Callee = CGM.BuildCovariantThunk(GD, Extern, ReturnAdjustment);
      
      Callee = Builder.CreateBitCast(Callee, OrigTy);
      ShouldAdjustReturnPointer = false;
    }
  }    

  CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));

  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
         e = MD->param_end();
       i != e; ++i) {
    ParmVarDecl *D = *i;
    QualType ArgType = D->getType();

    // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst);
    Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType.getNonReferenceType(),
                                               SourceLocation());
    CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType));
  }

  RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
                                                      FPT->getCallConv(),
                                                      FPT->getNoReturnAttr()),
                       Callee, ReturnValueSlot(), CallArgs, MD);
  if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) {
    bool CanBeZero = !(ResultType->isReferenceType()
    // FIXME: attr nonnull can't be zero either
                       /* || ResultType->hasAttr<NonNullAttr>() */ );
    // Do the return result adjustment.
    if (CanBeZero) {
      llvm::BasicBlock *NonZeroBlock = createBasicBlock();
      llvm::BasicBlock *ZeroBlock = createBasicBlock();
      llvm::BasicBlock *ContBlock = createBasicBlock();

      const llvm::Type *Ty = RV.getScalarVal()->getType();
      llvm::Value *Zero = llvm::Constant::getNullValue(Ty);
      Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero),
                           NonZeroBlock, ZeroBlock);
      EmitBlock(NonZeroBlock);
      llvm::Value *NZ = 
        DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment);
      EmitBranch(ContBlock);
      EmitBlock(ZeroBlock);
      llvm::Value *Z = RV.getScalarVal();
      EmitBlock(ContBlock);
      llvm::PHINode *RVOrZero = Builder.CreatePHI(Ty);
      RVOrZero->reserveOperandSpace(2);
      RVOrZero->addIncoming(NZ, NonZeroBlock);
      RVOrZero->addIncoming(Z, ZeroBlock);
      RV = RValue::get(RVOrZero);
    } else
      RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), 
                                         Adjustment.ReturnAdjustment));
  }

  if (!ResultType->isVoidType())
    EmitReturnOfRValue(RV, ResultType);

  FinishFunction();
  return Fn;
}
Example #22
0
File: CGDecl.cpp Project: aaasz/SHP
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;
  bool IsSimpleConstantInitializer = false;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      
      // If this value is an array or struct, is POD, and if the initializer is
      // a staticly determinable constant, try to optimize it.
      if (D.getInit() && !isByRef &&
          (Ty->isArrayType() || Ty->isRecordType()) &&
          Ty->isPODType() &&
          D.getInit()->isConstantInitializer(getContext())) {
        // If this variable is marked 'const', emit the value as a global.
        if (CGM.getCodeGenOpts().MergeAllConstants &&
            Ty.isConstant(getContext())) {
          EmitStaticBlockVarDecl(D);
          return;
        }
        
        IsSimpleConstantInitializer = true;
      }
      
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      if (isByRef)
        LTy = BuildByRefType(&D);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString());

      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class,
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }

    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");

      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);

      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;

      {
        // Push a cleanup block and restore the stack there.
        DelayedCleanupBlock scope(*this);

        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }

    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");

    // Allocate memory for the array.
    llvm::AllocaInst *VLA = 
      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
    VLA->setAlignment(getContext().getDeclAlignInBytes(&D));

    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");

    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef)
      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
                                    D.getNameAsString());

    bool isVolatile =
      getContext().getCanonicalType(D.getType()).isVolatileQualified();
    
    // If the initializer was a simple constant initializer, we can optimize it
    // in various ways.
    if (IsSimpleConstantInitializer) {
      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
      assert(Init != 0 && "Wasn't a simple constant init?");
      
      llvm::Value *AlignVal = 
        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align);
      const llvm::Type *IntPtr =
        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
      llvm::Value *SizeVal =
        llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty));

      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
      if (Loc->getType() != BP)
        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
      
      // If the initializer is all zeros, codegen with memset.
      if (isa<llvm::ConstantAggregateZero>(Init)) {
        llvm::Value *Zero =
          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
      } else {
        // Otherwise, create a temporary global with the initializer then 
        // memcpy from the global to the alloca.
        std::string Name = GetStaticDeclName(*this, D, ".");
        llvm::GlobalVariable *GV =
          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
                                   llvm::GlobalValue::InternalLinkage,
                                   Init, Name, 0, false, 0);
        GV->setAlignment(Align);

        llvm::Value *SrcPtr = GV;
        if (SrcPtr->getType() != BP)
          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
        
        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
      }
    } else if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
    } else {
      EmitAggExpr(Init, Loc, isVolatile);
    }
  }

  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    Builder.CreateStore(DeclPtr, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
    DtorTy = getContext().getBaseElementType(Array);
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        
        if (const ConstantArrayType *Array = 
              getContext().getAsConstantArrayType(Ty)) {
          {
            DelayedCleanupBlock Scope(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          
            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          }
        } else {
          {
            DelayedCleanupBlock Scope(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);

            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
          }
        }
      }
  }

  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();

    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
    assert(F && "Could not find function!");

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    //
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    {
      DelayedCleanupBlock scope(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    {
      DelayedCleanupBlock scope(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
    // FIXME: Turn this on and audit the codegen
    if (0 && Exceptions) {
      EHCleanupBlock Cleanup(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
  }
}
Example #23
0
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
                                                const ThunkInfo *Thunk) {
  assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
         "Please use a new CGF for this thunk");
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());

  // Adjust the 'this' pointer if necessary
  llvm::Value *AdjustedThisPtr =
    Thunk ? CGM.getCXXABI().performThisAdjustment(
                          *this, LoadCXXThisAddress(), Thunk->This)
          : LoadCXXThis();

  if (CurFnInfo->usesInAlloca()) {
    // We don't handle return adjusting thunks, because they require us to call
    // the copy constructor.  For now, fall through and pretend the return
    // adjustment was empty so we don't crash.
    if (Thunk && !Thunk->Return.isEmpty()) {
      CGM.ErrorUnsupported(
          MD, "non-trivial argument copy for return-adjusting thunk");
    }
    EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr);
    return;
  }

  // Start building CallArgs.
  CallArgList CallArgs;
  QualType ThisType = MD->getThisType(getContext());
  CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);

  if (isa<CXXDestructorDecl>(MD))
    CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs);

#ifndef NDEBUG
  unsigned PrefixArgs = CallArgs.size() - 1;
#endif
  // Add the rest of the arguments.
  for (const ParmVarDecl *PD : MD->parameters())
    EmitDelegateCallArg(CallArgs, PD, SourceLocation());

  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();

#ifndef NDEBUG
  const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(
      CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1, MD), PrefixArgs);
  assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
         CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
         CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
  assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
         similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
                 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
  assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
  for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
    assert(similar(CallFnInfo.arg_begin()[i].info,
                   CallFnInfo.arg_begin()[i].type,
                   CurFnInfo->arg_begin()[i].info,
                   CurFnInfo->arg_begin()[i].type));
#endif

  // Determine whether we have a return value slot to use.
  QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD)
                            ? ThisType
                            : CGM.getCXXABI().hasMostDerivedReturn(CurGD)
                                  ? CGM.getContext().VoidPtrTy
                                  : FPT->getReturnType();
  ReturnValueSlot Slot;
  if (!ResultType->isVoidType() &&
      CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
      !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
    Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());

  // Now emit our call.
  llvm::Instruction *CallOrInvoke;
  CGCallee Callee = CGCallee::forDirect(CalleePtr, MD);
  RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke);

  // Consider return adjustment if we have ThunkInfo.
  if (Thunk && !Thunk->Return.isEmpty())
    RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
  else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke))
    Call->setTailCallKind(llvm::CallInst::TCK_Tail);

  // Emit return.
  if (!ResultType->isVoidType() && Slot.isNull())
    CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);

  // Disable the final ARC autorelease.
  AutoreleaseResult = false;

  FinishThunk();
}
Example #24
0
/// Emit a load from an l-value of atomic type.  Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
                                       AggValueSlot resultSlot) {
  AtomicInfo atomics(*this, src);

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    llvm::Value *tempAddr;
    if (!resultSlot.isIgnored()) {
      assert(atomics.getEvaluationKind() == TEK_Aggregate);
      tempAddr = resultSlot.getAddr();
    } else {
      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    }

    // void __atomic_load(size_t size, void *mem, void *return, int order);
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);

    // Produce the r-value.
    return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
  }

  // Okay, we're doing this natively.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
  load->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  load->setAlignment(src.getAlignment().getQuantity());
  if (src.isVolatileQualified())
    load->setVolatile(true);
  if (src.getTBAAInfo())
    CGM.DecorateInstruction(load, src.getTBAAInfo());

  // Okay, turn that back into the original value type.
  QualType valueType = atomics.getValueType();
  llvm::Value *result = load;

  // If we're ignoring an aggregate return, don't do anything.
  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
    return RValue::getAggregate(0, false);

  // The easiest way to do this this is to go through memory, but we
  // try not to in some easy cases.
  if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
    llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
    if (isa<llvm::IntegerType>(resultTy)) {
      assert(result->getType() == resultTy);
      result = EmitFromMemory(result, valueType);
    } else if (isa<llvm::PointerType>(resultTy)) {
      result = Builder.CreateIntToPtr(result, resultTy);
    } else {
      result = Builder.CreateBitCast(result, resultTy);
    }
    return RValue::get(result);
  }

  // Create a temporary.  This needs to be big enough to hold the
  // atomic integer.
  llvm::Value *temp;
  bool tempIsVolatile = false;
  CharUnits tempAlignment;
  if (atomics.getEvaluationKind() == TEK_Aggregate) {
    assert(!resultSlot.isIgnored());
    temp = resultSlot.getAddr();
    tempAlignment = atomics.getValueAlignment();
    tempIsVolatile = resultSlot.isVolatile();
  } else {
    temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
    tempAlignment = atomics.getAtomicAlignment();
  }

  // Slam the integer into the temporary.
  llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
  Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
    ->setVolatile(tempIsVolatile);

  return atomics.convertTempToRValue(temp, resultSlot, loc);
}
Example #25
0
void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
                                                llvm::Value *Callee,
                                                const ThunkInfo *Thunk) {
  assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
         "Please use a new CGF for this thunk");
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());

  // Adjust the 'this' pointer if necessary
  llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
                                             *this, LoadCXXThis(), Thunk->This)
                                       : LoadCXXThis();

  // Start building CallArgs.
  CallArgList CallArgs;
  QualType ThisType = MD->getThisType(getContext());
  CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);

  if (isa<CXXDestructorDecl>(MD))
    CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, GD, CallArgs);

  // Add the rest of the arguments.
  for (FunctionDecl::param_const_iterator I = MD->param_begin(),
       E = MD->param_end(); I != E; ++I)
    EmitDelegateCallArg(CallArgs, *I, (*I)->getLocStart());

  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();

#ifndef NDEBUG
  const CGFunctionInfo &CallFnInfo =
    CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
                                       RequiredArgs::forPrototypePlus(FPT, 1));
  assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
         CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
         CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
  assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
         similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
                 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
  assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
  for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
    assert(similar(CallFnInfo.arg_begin()[i].info,
                   CallFnInfo.arg_begin()[i].type,
                   CurFnInfo->arg_begin()[i].info,
                   CurFnInfo->arg_begin()[i].type));
#endif

  // Determine whether we have a return value slot to use.
  QualType ResultType =
      CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType();
  ReturnValueSlot Slot;
  if (!ResultType->isVoidType() &&
      CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
      !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
    Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
  
  // Now emit our call.
  RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD);
  
  // Consider return adjustment if we have ThunkInfo.
  if (Thunk && !Thunk->Return.isEmpty())
    RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);

  // Emit return.
  if (!ResultType->isVoidType() && Slot.isNull())
    CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);

  // Disable the final ARC autorelease.
  AutoreleaseResult = false;

  FinishFunction();
}
Example #26
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue;

  // If we've got a scalar value of the right size, try to avoid going
  // through memory.
  if (rvalue.isScalar() && !atomics.hasPadding()) {
    llvm::Value *value = rvalue.getScalarVal();
    if (isa<llvm::IntegerType>(value->getType())) {
      intValue = value;
    } else {
      llvm::IntegerType *inputIntTy =
        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
      if (isa<llvm::PointerType>(value->getType())) {
        intValue = Builder.CreatePtrToInt(value, inputIntTy);
      } else {
        intValue = Builder.CreateBitCast(value, inputIntTy);
      }
    }

  // Otherwise, we need to go through memory.
  } else {
    // Put the r-value in memory.
    llvm::Value *addr = atomics.materializeRValue(rvalue);

    // Cast the temporary to the atomic int type and pull a value out.
    addr = atomics.emitCastToAtomicIntPointer(addr);
    intValue = Builder.CreateAlignedLoad(addr,
                                 atomics.getAtomicAlignment().getQuantity());
  }

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
Example #27
0
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
  QualType AllocType = E->getAllocatedType();
  FunctionDecl *NewFD = E->getOperatorNew();
  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();

  CallArgList NewArgs;

  // The allocation size is the first argument.
  QualType SizeTy = getContext().getSizeType();

  llvm::Value *NumElements = 0;
  llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
  
  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));

  // Emit the rest of the arguments.
  // FIXME: Ideally, this should just use EmitCallArgs.
  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();

  // First, use the types from the function type.
  // We start at 1 here because the first argument (the allocation size)
  // has already been emitted.
  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
    QualType ArgType = NewFTy->getArgType(i);

    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
           getTypePtr() ==
           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
           "type mismatch in call argument!");

    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));

  }

  // Either we've emitted all the call args, or we have a call to a
  // variadic function.
  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
         "Extra arguments in non-variadic function!");

  // If we still have any arguments, emit them using the type of the argument.
  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
       NewArg != NewArgEnd; ++NewArg) {
    QualType ArgType = NewArg->getType();
    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));
  }

  // Emit the call to new.
  RValue RV =
    EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
             CGM.GetAddrOfFunction(NewFD), NewArgs, NewFD);

  // If an allocation function is declared with an empty exception specification
  // it returns null to indicate failure to allocate storage. [expr.new]p13.
  // (We don't need to check for null when there's no new initializer and
  // we're allocating a POD type).
  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
    !(AllocType->isPODType() && !E->hasInitializer());

  llvm::BasicBlock *NewNull = 0;
  llvm::BasicBlock *NewNotNull = 0;
  llvm::BasicBlock *NewEnd = 0;

  llvm::Value *NewPtr = RV.getScalarVal();

  if (NullCheckResult) {
    NewNull = createBasicBlock("new.null");
    NewNotNull = createBasicBlock("new.notnull");
    NewEnd = createBasicBlock("new.end");

    llvm::Value *IsNull =
      Builder.CreateICmpEQ(NewPtr,
                           llvm::Constant::getNullValue(NewPtr->getType()),
                           "isnull");

    Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
    EmitBlock(NewNotNull);
  }

  if (uint64_t CookiePadding = CalculateCookiePadding(getContext(), E)) {
    uint64_t CookieOffset = 
      CookiePadding - getContext().getTypeSize(SizeTy) / 8;
    
    llvm::Value *NumElementsPtr = 
      Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset);
    
    NumElementsPtr = Builder.CreateBitCast(NumElementsPtr, 
                                           ConvertType(SizeTy)->getPointerTo());
    Builder.CreateStore(NumElements, NumElementsPtr);

    // Now add the padding to the new ptr.
    NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookiePadding);
  }
  
  NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));

  EmitNewInitializer(*this, E, NewPtr, NumElements);

  if (NullCheckResult) {
    Builder.CreateBr(NewEnd);
    NewNotNull = Builder.GetInsertBlock();
    EmitBlock(NewNull);
    Builder.CreateBr(NewEnd);
    EmitBlock(NewEnd);

    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
    PHI->reserveOperandSpace(2);
    PHI->addIncoming(NewPtr, NewNotNull);
    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);

    NewPtr = PHI;
  }

  return NewPtr;
}
Example #28
0
void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
                                    const CGFunctionInfo &FnInfo,
                                    GlobalDecl GD, const ThunkInfo &Thunk) {
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  QualType ResultType = FPT->getResultType();
  QualType ThisType = MD->getThisType(getContext());

  FunctionArgList FunctionArgs;

  // FIXME: It would be nice if more of this code could be shared with 
  // CodeGenFunction::GenerateCode.

  // Create the implicit 'this' parameter declaration.
  CurGD = GD;
  CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);

  // Add the rest of the parameters.
  for (FunctionDecl::param_const_iterator I = MD->param_begin(),
       E = MD->param_end(); I != E; ++I) {
    ParmVarDecl *Param = *I;
    
    FunctionArgs.push_back(Param);
  }
  
  StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
                SourceLocation());

  CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
  CXXThisValue = CXXABIThisValue;

  // Adjust the 'this' pointer if necessary.
  llvm::Value *AdjustedThisPtr = 
    PerformTypeAdjustment(*this, LoadCXXThis(), 
                          Thunk.This.NonVirtual, 
                          Thunk.This.VCallOffsetOffset,
                          /*IsReturnAdjustment*/false);
  
  CallArgList CallArgs;
  
  // Add our adjusted 'this' pointer.
  CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);

  // Add the rest of the parameters.
  for (FunctionDecl::param_const_iterator I = MD->param_begin(),
       E = MD->param_end(); I != E; ++I) {
    ParmVarDecl *param = *I;
    EmitDelegateCallArg(CallArgs, param);
  }

  // Get our callee.
  llvm::Type *Ty =
    CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);

#ifndef NDEBUG
  const CGFunctionInfo &CallFnInfo =
    CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
                                       RequiredArgs::forPrototypePlus(FPT, 1));
  assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
         CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
         CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
  assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
         similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
                 FnInfo.getReturnInfo(), FnInfo.getReturnType()));
  assert(CallFnInfo.arg_size() == FnInfo.arg_size());
  for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
    assert(similar(CallFnInfo.arg_begin()[i].info,
                   CallFnInfo.arg_begin()[i].type,
                   FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
#endif
  
  // Determine whether we have a return value slot to use.
  ReturnValueSlot Slot;
  if (!ResultType->isVoidType() &&
      FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
      hasAggregateLLVMType(CurFnInfo->getReturnType()))
    Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
  
  // Now emit our call.
  RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
  
  if (!Thunk.Return.isEmpty())
    RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);

  if (!ResultType->isVoidType() && Slot.isNull())
    CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);

  // Disable the final ARC autorelease.
  AutoreleaseResult = false;

  FinishFunction();

  // Set the right linkage.
  CGM.setFunctionLinkage(MD, Fn);
  
  // Set the right visibility.
  setThunkVisibility(CGM, MD, Thunk, Fn);
}
Example #29
0
/// Emit a compare-and-exchange op for atomic type.
///
std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
    LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
    AggValueSlot Slot) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!Expected.isAggregate() ||
         Expected.getAggregateAddr()->getType()->getPointerElementType() ==
             Obj.getAddress()->getType()->getPointerElementType());
  assert(!Desired.isAggregate() ||
         Desired.getAggregateAddr()->getType()->getPointerElementType() ==
             Obj.getAddress()->getType()->getPointerElementType());
  AtomicInfo Atomics(*this, Obj);

  if (Failure >= Success)
    // Don't assert on undefined behavior.
    Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);

  auto Alignment = Atomics.getValueAlignment();
  // Check whether we should use a library call.
  if (Atomics.shouldUseLibcall()) {
    auto *ExpectedAddr = Atomics.materializeRValue(Expected);
    // Produce a source address.
    auto *DesiredAddr = Atomics.materializeRValue(Desired);
    // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
    // void *desired, int success, int failure);
    CallArgList Args;
    Args.add(RValue::get(Atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
             getContext().VoidPtrTy);
    Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
             getContext().VoidPtrTy);
    Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
             getContext().VoidPtrTy);
    Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
             getContext().IntTy);
    Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
             getContext().IntTy);
    auto SuccessFailureRVal = emitAtomicLibcall(
        *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
    auto *PreviousVal =
        Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
    return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
  }

  // If we've got a scalar value of the right size, try to avoid going
  // through memory.
  auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
  auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);

  // Do the atomic store.
  auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
  auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
                                          Success, Failure);
  // Other decoration.
  Inst->setVolatile(Obj.isVolatileQualified());
  Inst->setWeak(IsWeak);

  // Okay, turn that back into the original value type.
  auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
  auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
  return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
                        RValue::get(SuccessFailureVal));
}
Example #30
0
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
  QualType MemTy = AtomicTy;
  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
    MemTy = AT->getValueType();
  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
  uint64_t Size = sizeChars.getQuantity();
  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
  unsigned Align = alignChars.getQuantity();
  unsigned MaxInlineWidthInBits =
    getTarget().getMaxAtomicInlineWidth();
  bool UseLibcall = (Size != Align ||
                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);

  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
              *Val2 = nullptr;
  llvm::Value *Ptr = EmitScalarExpr(E->getPtr());

  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
    assert(!Dest && "Init does not return a value");
    LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
    EmitAtomicInit(E->getVal1(), lvalue);
    return RValue::get(nullptr);
  }

  llvm::Value *Order = EmitScalarExpr(E->getOrder());

  switch (E->getOp()) {
  case AtomicExpr::AO__c11_atomic_init:
    llvm_unreachable("Already handled!");

  case AtomicExpr::AO__c11_atomic_load:
  case AtomicExpr::AO__atomic_load_n:
    break;

  case AtomicExpr::AO__atomic_load:
    Dest = EmitScalarExpr(E->getVal1());
    break;

  case AtomicExpr::AO__atomic_store:
    Val1 = EmitScalarExpr(E->getVal1());
    break;

  case AtomicExpr::AO__atomic_exchange:
    Val1 = EmitScalarExpr(E->getVal1());
    Dest = EmitScalarExpr(E->getVal2());
    break;

  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  case AtomicExpr::AO__atomic_compare_exchange_n:
  case AtomicExpr::AO__atomic_compare_exchange:
    Val1 = EmitScalarExpr(E->getVal1());
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
      Val2 = EmitScalarExpr(E->getVal2());
    else
      Val2 = EmitValToTemp(*this, E->getVal2());
    OrderFail = EmitScalarExpr(E->getOrderFail());
    if (E->getNumSubExprs() == 6)
      IsWeak = EmitScalarExpr(E->getWeak());
    break;

  case AtomicExpr::AO__c11_atomic_fetch_add:
  case AtomicExpr::AO__c11_atomic_fetch_sub:
    if (MemTy->isPointerType()) {
      // For pointer arithmetic, we're required to do a bit of math:
      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
      // ... but only for the C11 builtins. The GNU builtins expect the
      // user to multiply by sizeof(T).
      QualType Val1Ty = E->getVal1()->getType();
      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
      CharUnits PointeeIncAmt =
          getContext().getTypeSizeInChars(MemTy->getPointeeType());
      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
      break;
    }
    // Fall through.
  case AtomicExpr::AO__atomic_fetch_add:
  case AtomicExpr::AO__atomic_fetch_sub:
  case AtomicExpr::AO__atomic_add_fetch:
  case AtomicExpr::AO__atomic_sub_fetch:
  case AtomicExpr::AO__c11_atomic_store:
  case AtomicExpr::AO__c11_atomic_exchange:
  case AtomicExpr::AO__atomic_store_n:
  case AtomicExpr::AO__atomic_exchange_n:
  case AtomicExpr::AO__c11_atomic_fetch_and:
  case AtomicExpr::AO__c11_atomic_fetch_or:
  case AtomicExpr::AO__c11_atomic_fetch_xor:
  case AtomicExpr::AO__atomic_fetch_and:
  case AtomicExpr::AO__atomic_fetch_or:
  case AtomicExpr::AO__atomic_fetch_xor:
  case AtomicExpr::AO__atomic_fetch_nand:
  case AtomicExpr::AO__atomic_and_fetch:
  case AtomicExpr::AO__atomic_or_fetch:
  case AtomicExpr::AO__atomic_xor_fetch:
  case AtomicExpr::AO__atomic_nand_fetch:
    Val1 = EmitValToTemp(*this, E->getVal1());
    break;
  }

  QualType RValTy = E->getType().getUnqualifiedType();

  auto GetDest = [&] {
    if (!RValTy->isVoidType() && !Dest) {
      Dest = CreateMemTemp(RValTy, ".atomicdst");
    }
    return Dest;
  };

  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
  if (UseLibcall) {
    bool UseOptimizedLibcall = false;
    switch (E->getOp()) {
    case AtomicExpr::AO__c11_atomic_fetch_add:
    case AtomicExpr::AO__atomic_fetch_add:
    case AtomicExpr::AO__c11_atomic_fetch_and:
    case AtomicExpr::AO__atomic_fetch_and:
    case AtomicExpr::AO__c11_atomic_fetch_or:
    case AtomicExpr::AO__atomic_fetch_or:
    case AtomicExpr::AO__c11_atomic_fetch_sub:
    case AtomicExpr::AO__atomic_fetch_sub:
    case AtomicExpr::AO__c11_atomic_fetch_xor:
    case AtomicExpr::AO__atomic_fetch_xor:
      // For these, only library calls for certain sizes exist.
      UseOptimizedLibcall = true;
      break;
    default:
      // Only use optimized library calls for sizes for which they exist.
      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
        UseOptimizedLibcall = true;
      break;
    }

    CallArgList Args;
    if (!UseOptimizedLibcall) {
      // For non-optimized library calls, the size is the first parameter
      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
               getContext().getSizeType());
    }
    // Atomic address is the first or second parameter
    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);

    std::string LibCallName;
    QualType LoweredMemTy =
      MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
    QualType RetTy;
    bool HaveRetTy = false;
    switch (E->getOp()) {
    // There is only one libcall for compare an exchange, because there is no
    // optimisation benefit possible from a libcall version of a weak compare
    // and exchange.
    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
    //                                void *desired, int success, int failure)
    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
    //                                  int success, int failure)
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
    case AtomicExpr::AO__atomic_compare_exchange:
    case AtomicExpr::AO__atomic_compare_exchange_n:
      LibCallName = "__atomic_compare_exchange";
      RetTy = getContext().BoolTy;
      HaveRetTy = true;
      Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
                        E->getExprLoc(), sizeChars);
      Args.add(RValue::get(Order), getContext().IntTy);
      Order = OrderFail;
      break;
    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
    //                        int order)
    // T __atomic_exchange_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_exchange:
    case AtomicExpr::AO__atomic_exchange_n:
    case AtomicExpr::AO__atomic_exchange:
      LibCallName = "__atomic_exchange";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // void __atomic_store(size_t size, void *mem, void *val, int order)
    // void __atomic_store_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_store:
    case AtomicExpr::AO__atomic_store:
    case AtomicExpr::AO__atomic_store_n:
      LibCallName = "__atomic_store";
      RetTy = getContext().VoidTy;
      HaveRetTy = true;
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // void __atomic_load(size_t size, void *mem, void *return, int order)
    // T __atomic_load_N(T *mem, int order)
    case AtomicExpr::AO__c11_atomic_load:
    case AtomicExpr::AO__atomic_load:
    case AtomicExpr::AO__atomic_load_n:
      LibCallName = "__atomic_load";
      break;
    // T __atomic_fetch_add_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_add:
    case AtomicExpr::AO__atomic_fetch_add:
      LibCallName = "__atomic_fetch_add";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_and_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_and:
    case AtomicExpr::AO__atomic_fetch_and:
      LibCallName = "__atomic_fetch_and";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_or_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_or:
    case AtomicExpr::AO__atomic_fetch_or:
      LibCallName = "__atomic_fetch_or";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_sub_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_sub:
    case AtomicExpr::AO__atomic_fetch_sub:
      LibCallName = "__atomic_fetch_sub";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_xor_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_xor:
    case AtomicExpr::AO__atomic_fetch_xor:
      LibCallName = "__atomic_fetch_xor";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    default: return EmitUnsupportedRValue(E, "atomic library call");
    }

    // Optimized functions have the size in their name.
    if (UseOptimizedLibcall)
      LibCallName += "_" + llvm::utostr(Size);
    // By default, assume we return a value of the atomic type.
    if (!HaveRetTy) {
      if (UseOptimizedLibcall) {
        // Value is returned directly.
        // The function returns an appropriately sized integer type.
        RetTy = getContext().getIntTypeForBitwidth(
            getContext().toBits(sizeChars), /*Signed=*/false);
      } else {
        // Value is returned through parameter before the order.
        RetTy = getContext().VoidTy;
        Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
      }
    }
    // order is always the last parameter
    Args.add(RValue::get(Order),
             getContext().IntTy);

    RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
    // The value is returned directly from the libcall.
    if (HaveRetTy && !RetTy->isVoidType())
      return Res;
    // The value is returned via an explicit out param.
    if (RetTy->isVoidType())
      return RValue::get(nullptr);
    // The value is returned directly for optimized libcalls but the caller is
    // expected an out-param.
    if (UseOptimizedLibcall) {
      llvm::Value *ResVal = Res.getScalarVal();
      llvm::StoreInst *StoreDest = Builder.CreateStore(
          ResVal,
          Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
      StoreDest->setAlignment(Align);
    }
    return convertTempToRValue(Dest, RValTy, E->getExprLoc());
  }

  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
                 E->getOp() == AtomicExpr::AO__atomic_store ||
                 E->getOp() == AtomicExpr::AO__atomic_store_n;
  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
                E->getOp() == AtomicExpr::AO__atomic_load ||
                E->getOp() == AtomicExpr::AO__atomic_load_n;

  llvm::Type *ITy =
      llvm::IntegerType::get(getLLVMContext(), Size * 8);
  llvm::Value *OrigDest = GetDest();
  Ptr = Builder.CreateBitCast(
      Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
  if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
  if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
  if (Dest && !E->isCmpXChg())
    Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());

  if (isa<llvm::ConstantInt>(Order)) {
    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
    switch (ord) {
    case AtomicExpr::AO_ABI_memory_order_relaxed:
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Monotonic);
      break;
    case AtomicExpr::AO_ABI_memory_order_consume:
    case AtomicExpr::AO_ABI_memory_order_acquire:
      if (IsStore)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Acquire);
      break;
    case AtomicExpr::AO_ABI_memory_order_release:
      if (IsLoad)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Release);
      break;
    case AtomicExpr::AO_ABI_memory_order_acq_rel:
      if (IsLoad || IsStore)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::AcquireRelease);
      break;
    case AtomicExpr::AO_ABI_memory_order_seq_cst:
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::SequentiallyConsistent);
      break;
    default: // invalid order
      // We should not ever get here normally, but it's hard to
      // enforce that in general.
      break;
    }
    if (RValTy->isVoidType())
      return RValue::get(nullptr);
    return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
  }

  // Long case, when Order isn't obviously constant.

  // Create all the relevant BB's
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
                   *SeqCstBB = nullptr;
  MonotonicBB = createBasicBlock("monotonic", CurFn);
  if (!IsStore)
    AcquireBB = createBasicBlock("acquire", CurFn);
  if (!IsLoad)
    ReleaseBB = createBasicBlock("release", CurFn);
  if (!IsLoad && !IsStore)
    AcqRelBB = createBasicBlock("acqrel", CurFn);
  SeqCstBB = createBasicBlock("seqcst", CurFn);
  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);

  // Create the switch for the split
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  // doesn't matter unless someone is crazy enough to use something that
  // doesn't fold to a constant for the ordering.
  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);

  // Emit all the different atomics
  Builder.SetInsertPoint(MonotonicBB);
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
               Size, Align, llvm::Monotonic);
  Builder.CreateBr(ContBB);
  if (!IsStore) {
    Builder.SetInsertPoint(AcquireBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::Acquire);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
                AcquireBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
                AcquireBB);
  }
  if (!IsLoad) {
    Builder.SetInsertPoint(ReleaseBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::Release);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
                ReleaseBB);
  }
  if (!IsLoad && !IsStore) {
    Builder.SetInsertPoint(AcqRelBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::AcquireRelease);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
                AcqRelBB);
  }
  Builder.SetInsertPoint(SeqCstBB);
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
               Size, Align, llvm::SequentiallyConsistent);
  Builder.CreateBr(ContBB);
  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
              SeqCstBB);

  // Cleanup and return
  Builder.SetInsertPoint(ContBB);
  if (RValTy->isVoidType())
    return RValue::get(nullptr);
  return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}