示例#1
0
Decl *Sema::ActOnEntityDecl(ASTContext &C, const QualType &T,
                            SourceLocation IDLoc, const IdentifierInfo *IDInfo) {
  auto Quals = T.getQualifiers();

  if(Quals.hasAttributeSpec(Qualifiers::AS_external))
    return ActOnExternalEntityDecl(C, T, IDLoc, IDInfo);
  else if(Quals.hasAttributeSpec(Qualifiers::AS_intrinsic))
    return ActOnIntrinsicEntityDecl(C, T, IDLoc, IDInfo);

  if (auto Prev = LookupIdentifier(IDInfo)) {
    FunctionDecl *FD = dyn_cast<FunctionDecl>(Prev);
    if(auto VD = dyn_cast<VarDecl>(Prev)) {
      if(VD->isArgument() && VD->getType().isNull()) {
        VD->setType(T);
        return VD;
      } else if(VD->isFunctionResult())
         FD = CurrentContextAsFunction();
    }

    if(FD && (FD->isNormalFunction() || FD->isExternal())) {
      if(FD->getType().isNull() || FD->getType()->isVoidType()) {
        SetFunctionType(FD, T, IDLoc, SourceRange()); //Fixme: proper loc and range
        return FD;
      } else {
        Diags.Report(IDLoc, diag::err_func_return_type_already_specified) << IDInfo;
        return nullptr;
      }
    }
    Diags.Report(IDLoc, diag::err_redefinition) << IDInfo;
    Diags.Report(Prev->getLocation(), diag::note_previous_definition);
    return nullptr;
  }

  VarDecl *VD = VarDecl::Create(C, CurContext, IDLoc, IDInfo, T);
  CurContext->addDecl(VD);

  if(!T.isNull()) {
    auto SubT = T;
    if(T->isArrayType()) {
      CheckArrayTypeDeclarationCompability(T->asArrayType(), VD);
      SubT = T->asArrayType()->getElementType();
      VD->MarkUsedAsVariable(IDLoc);
    }
    else if(SubT->isCharacterType())
      CheckCharacterLengthDeclarationCompability(SubT, VD);
  }

  return VD;
}
示例#2
0
static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
                                              const ParmVarDecl *Callback,
                                              CXXRecordDecl *CallbackDecl,
                                              ArrayRef<Expr *> CallArgs) {
  assert(CallbackDecl != nullptr);
  assert(CallbackDecl->isLambda());
  FunctionDecl *callOperatorDecl = CallbackDecl->getLambdaCallOperator();
  assert(callOperatorDecl != nullptr);

  DeclRefExpr *callOperatorDeclRef =
      DeclRefExpr::Create(/* Ctx =*/ C,
                          /* QualifierLoc =*/ NestedNameSpecifierLoc(),
                          /* TemplateKWLoc =*/ SourceLocation(),
                          const_cast<FunctionDecl *>(callOperatorDecl),
                          /* RefersToEnclosingVariableOrCapture=*/ false,
                          /* NameLoc =*/ SourceLocation(),
                          /* T =*/ callOperatorDecl->getType(),
                          /* VK =*/ VK_LValue);

  return new (C)
      CXXOperatorCallExpr(/*AstContext=*/C, OO_Call, callOperatorDeclRef,
                          /*args=*/CallArgs,
                          /*QualType=*/C.VoidTy,
                          /*ExprValueType=*/VK_RValue,
                          /*SourceLocation=*/SourceLocation(), FPOptions());
}
  // Here is the test Eval function specialization. Here the CallExpr to the
  // function is created.
  CallExpr*
  EvaluateTSynthesizer::BuildEvalCallExpr(const QualType InstTy,
                                            Expr* SubTree,
                                            ASTOwningVector<Expr*>& CallArgs) {
    // Set up new context for the new FunctionDecl
    DeclContext* PrevContext = m_Sema->CurContext;

    m_Sema->CurContext = m_EvalDecl->getDeclContext();

    // Create template arguments
    Sema::InstantiatingTemplate Inst(*m_Sema, m_NoSLoc, m_EvalDecl);
    TemplateArgument Arg(InstTy);
    TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, &Arg, 1U);

    // Substitute the declaration of the templated function, with the
    // specified template argument
    Decl* D = m_Sema->SubstDecl(m_EvalDecl,
                                m_EvalDecl->getDeclContext(),
                                MultiLevelTemplateArgumentList(TemplateArgs));

    FunctionDecl* Fn = dyn_cast<FunctionDecl>(D);
    // Creates new body of the substituted declaration
    m_Sema->InstantiateFunctionDefinition(Fn->getLocation(), Fn, true, true);

    m_Sema->CurContext = PrevContext;

    const FunctionProtoType* FPT = Fn->getType()->getAs<FunctionProtoType>();
    FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
    QualType FnTy = m_Context->getFunctionType(Fn->getResultType(),
                                               FPT->arg_type_begin(),
                                               FPT->getNumArgs(),
                                               EPI);
    DeclRefExpr* DRE = m_Sema->BuildDeclRefExpr(Fn,
                                                FnTy,
                                                VK_RValue,
                                                m_NoSLoc
                                                ).takeAs<DeclRefExpr>();

    // TODO: Figure out a way to avoid passing in wrong source locations
    // of the symbol being replaced. This is important when we calculate the
    // size of the memory buffers and may lead to creation of wrong wrappers.
    Scope* S = m_Sema->getScopeForContext(m_Sema->CurContext);
    CallExpr* EvalCall = m_Sema->ActOnCallExpr(S,
                                               DRE,
                                               SubTree->getLocStart(),
                                               move_arg(CallArgs),
                                               SubTree->getLocEnd()
                                               ).takeAs<CallExpr>();
    assert (EvalCall && "Cannot create call to Eval");

    return EvalCall;

  }
示例#4
0
ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
                                         MultiExprArg ExecConfig,
                                         SourceLocation GGGLoc) {
  FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
  if (!ConfigDecl)
    return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
                     << "cudaConfigureCall");
  QualType ConfigQTy = ConfigDecl->getType();

  DeclRefExpr *ConfigDR = new (Context)
      DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
  MarkFunctionReferenced(LLLLoc, ConfigDecl);

  return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
                       /*IsExecConfig=*/true);
}
示例#5
0
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
  // Also, we need to decide how allocators actually work -- they're not
  // really part of the CXXNewExpr because they happen BEFORE the
  // CXXConstructExpr subexpression. See PR12014 for some discussion.
  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
  
  unsigned blockCount = currBldrCtx->blockCount();
  const LocationContext *LCtx = Pred->getLocationContext();
  DefinedOrUnknownSVal symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx,
                                                             CNE->getType(),
                                                             blockCount);
  ProgramStateRef State = Pred->getState();

  CallEventManager &CEMgr = getStateManager().getCallEventManager();
  CallEventRef<CXXAllocatorCall> Call =
    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);

  // Invalidate placement args.
  // FIXME: Once we figure out how we want allocators to work,
  // we should be using the usual pre-/(default-)eval-/post-call checks here.
  State = Call->invalidateRegions(blockCount);

  // If we're compiling with exceptions enabled, and this allocation function
  // is not declared as non-throwing, failures /must/ be signalled by
  // exceptions, and thus the return value will never be NULL.
  // C++11 [basic.stc.dynamic.allocation]p3.
  FunctionDecl *FD = CNE->getOperatorNew();
  if (FD && getContext().getLangOpts().CXXExceptions) {
    QualType Ty = FD->getType();
    if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
      if (!ProtoType->isNothrow(getContext()))
        State = State->assume(symVal, true);
  }

  if (CNE->isArray()) {
    // FIXME: allocating an array requires simulating the constructors.
    // For now, just return a symbolicated region.
    const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
    QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
    const ElementRegion *EleReg =
      getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
    State = State->BindExpr(CNE, Pred->getLocationContext(),
                            loc::MemRegionVal(EleReg));
    Bldr.generateNode(CNE, Pred, State);
    return;
  }

  // FIXME: Once we have proper support for CXXConstructExprs inside
  // CXXNewExpr, we need to make sure that the constructed object is not
  // immediately invalidated here. (The placement call should happen before
  // the constructor call anyway.)
  if (FD && FD->isReservedGlobalPlacementOperator()) {
    // Non-array placement new should always return the placement location.
    SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
    SVal Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
                                       CNE->getPlacementArg(0)->getType());
    State = State->BindExpr(CNE, LCtx, Result);
  } else {
    State = State->BindExpr(CNE, LCtx, symVal);
  }

  // If the type is not a record, we won't have a CXXConstructExpr as an
  // initializer. Copy the value over.
  if (const Expr *Init = CNE->getInitializer()) {
    if (!isa<CXXConstructExpr>(Init)) {
      QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
      (void)ObjTy;
      assert(!ObjTy->isRecordType());
      SVal Location = State->getSVal(CNE, LCtx);
      if (isa<Loc>(Location))
        State = State->bindLoc(cast<Loc>(Location), State->getSVal(Init, LCtx));
    }
  }

  Bldr.generateNode(CNE, Pred, State);
}
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
  // Also, we need to decide how allocators actually work -- they're not
  // really part of the CXXNewExpr because they happen BEFORE the
  // CXXConstructExpr subexpression. See PR12014 for some discussion.

  unsigned blockCount = currBldrCtx->blockCount();
  const LocationContext *LCtx = Pred->getLocationContext();
  DefinedOrUnknownSVal symVal = UnknownVal();
  FunctionDecl *FD = CNE->getOperatorNew();

  bool IsStandardGlobalOpNewFunction = false;
  if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
    if (FD->getNumParams() == 2) {
      QualType T = FD->getParamDecl(1)->getType();
      if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
        // NoThrow placement new behaves as a standard new.
        IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
    }
    else
      // Placement forms are considered non-standard.
      IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
  }

  // We assume all standard global 'operator new' functions allocate memory in
  // heap. We realize this is an approximation that might not correctly model
  // a custom global allocator.
  if (IsStandardGlobalOpNewFunction)
    symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
  else
    symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
                                          blockCount);

  ProgramStateRef State = Pred->getState();
  CallEventManager &CEMgr = getStateManager().getCallEventManager();
  CallEventRef<CXXAllocatorCall> Call =
    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);

  // Invalidate placement args.
  // FIXME: Once we figure out how we want allocators to work,
  // we should be using the usual pre-/(default-)eval-/post-call checks here.
  State = Call->invalidateRegions(blockCount);
  if (!State)
    return;

  // If this allocation function is not declared as non-throwing, failures
  // /must/ be signalled by exceptions, and thus the return value will never be
  // NULL. -fno-exceptions does not influence this semantics.
  // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
  // where new can return NULL. If we end up supporting that option, we can
  // consider adding a check for it here.
  // C++11 [basic.stc.dynamic.allocation]p3.
  if (FD) {
    QualType Ty = FD->getType();
    if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
      if (!ProtoType->isNothrow(getContext()))
        State = State->assume(symVal, true);
  }

  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);

  if (CNE->isArray()) {
    // FIXME: allocating an array requires simulating the constructors.
    // For now, just return a symbolicated region.
    const MemRegion *NewReg = symVal.castAs<loc::MemRegionVal>().getRegion();
    QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
    const ElementRegion *EleReg =
      getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
    State = State->BindExpr(CNE, Pred->getLocationContext(),
                            loc::MemRegionVal(EleReg));
    Bldr.generateNode(CNE, Pred, State);
    return;
  }

  // FIXME: Once we have proper support for CXXConstructExprs inside
  // CXXNewExpr, we need to make sure that the constructed object is not
  // immediately invalidated here. (The placement call should happen before
  // the constructor call anyway.)
  SVal Result = symVal;
  if (FD && FD->isReservedGlobalPlacementOperator()) {
    // Non-array placement new should always return the placement location.
    SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
    Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
                                  CNE->getPlacementArg(0)->getType());
  }

  // Bind the address of the object, then check to see if we cached out.
  State = State->BindExpr(CNE, LCtx, Result);
  ExplodedNode *NewN = Bldr.generateNode(CNE, Pred, State);
  if (!NewN)
    return;

  // If the type is not a record, we won't have a CXXConstructExpr as an
  // initializer. Copy the value over.
  if (const Expr *Init = CNE->getInitializer()) {
    if (!isa<CXXConstructExpr>(Init)) {
      assert(Bldr.getResults().size() == 1);
      Bldr.takeNodes(NewN);
      evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx),
               /*FirstInit=*/IsStandardGlobalOpNewFunction);
    }
  }
}
示例#7
0
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
  QualType AllocType = E->getAllocatedType();
  FunctionDecl *NewFD = E->getOperatorNew();
  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();

  CallArgList NewArgs;

  // The allocation size is the first argument.
  QualType SizeTy = getContext().getSizeType();

  llvm::Value *NumElements = 0;
  llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
  
  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));

  // Emit the rest of the arguments.
  // FIXME: Ideally, this should just use EmitCallArgs.
  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();

  // First, use the types from the function type.
  // We start at 1 here because the first argument (the allocation size)
  // has already been emitted.
  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
    QualType ArgType = NewFTy->getArgType(i);

    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
           getTypePtr() ==
           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
           "type mismatch in call argument!");

    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));

  }

  // Either we've emitted all the call args, or we have a call to a
  // variadic function.
  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
         "Extra arguments in non-variadic function!");

  // If we still have any arguments, emit them using the type of the argument.
  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
       NewArg != NewArgEnd; ++NewArg) {
    QualType ArgType = NewArg->getType();
    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));
  }

  // Emit the call to new.
  RValue RV =
    EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
             CGM.GetAddrOfFunction(NewFD), NewArgs, NewFD);

  // If an allocation function is declared with an empty exception specification
  // it returns null to indicate failure to allocate storage. [expr.new]p13.
  // (We don't need to check for null when there's no new initializer and
  // we're allocating a POD type).
  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
    !(AllocType->isPODType() && !E->hasInitializer());

  llvm::BasicBlock *NewNull = 0;
  llvm::BasicBlock *NewNotNull = 0;
  llvm::BasicBlock *NewEnd = 0;

  llvm::Value *NewPtr = RV.getScalarVal();

  if (NullCheckResult) {
    NewNull = createBasicBlock("new.null");
    NewNotNull = createBasicBlock("new.notnull");
    NewEnd = createBasicBlock("new.end");

    llvm::Value *IsNull =
      Builder.CreateICmpEQ(NewPtr,
                           llvm::Constant::getNullValue(NewPtr->getType()),
                           "isnull");

    Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
    EmitBlock(NewNotNull);
  }

  if (uint64_t CookiePadding = CalculateCookiePadding(getContext(), E)) {
    uint64_t CookieOffset = 
      CookiePadding - getContext().getTypeSize(SizeTy) / 8;
    
    llvm::Value *NumElementsPtr = 
      Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset);
    
    NumElementsPtr = Builder.CreateBitCast(NumElementsPtr, 
                                           ConvertType(SizeTy)->getPointerTo());
    Builder.CreateStore(NumElements, NumElementsPtr);

    // Now add the padding to the new ptr.
    NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookiePadding);
  }
  
  NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));

  EmitNewInitializer(*this, E, NewPtr, NumElements);

  if (NullCheckResult) {
    Builder.CreateBr(NewEnd);
    NewNotNull = Builder.GetInsertBlock();
    EmitBlock(NewNull);
    Builder.CreateBr(NewEnd);
    EmitBlock(NewEnd);

    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
    PHI->reserveOperandSpace(2);
    PHI->addIncoming(NewPtr, NewNotNull);
    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);

    NewPtr = PHI;
  }

  return NewPtr;
}
示例#8
0
bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
  // Form and check allocation and deallocation calls.
  assert(!IsPromiseDependentType &&
         "cannot make statement while the promise type is dependent");
  QualType PromiseType = Fn.CoroutinePromise->getType();

  if (S.RequireCompleteType(Loc, PromiseType, diag::err_incomplete_type))
    return false;

  const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;

  // FIXME: Add support for stateful allocators.

  FunctionDecl *OperatorNew = nullptr;
  FunctionDecl *OperatorDelete = nullptr;
  FunctionDecl *UnusedResult = nullptr;
  bool PassAlignment = false;
  SmallVector<Expr *, 1> PlacementArgs;

  S.FindAllocationFunctions(Loc, SourceRange(),
                            /*UseGlobal*/ false, PromiseType,
                            /*isArray*/ false, PassAlignment, PlacementArgs,
                            OperatorNew, UnusedResult);

  bool IsGlobalOverload =
      OperatorNew && !isa<CXXRecordDecl>(OperatorNew->getDeclContext());
  // If we didn't find a class-local new declaration and non-throwing new
  // was is required then we need to lookup the non-throwing global operator
  // instead.
  if (RequiresNoThrowAlloc && (!OperatorNew || IsGlobalOverload)) {
    auto *StdNoThrow = buildStdNoThrowDeclRef(S, Loc);
    if (!StdNoThrow)
      return false;
    PlacementArgs = {StdNoThrow};
    OperatorNew = nullptr;
    S.FindAllocationFunctions(Loc, SourceRange(),
                              /*UseGlobal*/ true, PromiseType,
                              /*isArray*/ false, PassAlignment, PlacementArgs,
                              OperatorNew, UnusedResult);
  }

  assert(OperatorNew && "expected definition of operator new to be found");

  if (RequiresNoThrowAlloc) {
    const auto *FT = OperatorNew->getType()->getAs<FunctionProtoType>();
    if (!FT->isNothrow(S.Context, /*ResultIfDependent*/ false)) {
      S.Diag(OperatorNew->getLocation(),
             diag::err_coroutine_promise_new_requires_nothrow)
          << OperatorNew;
      S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
          << OperatorNew;
      return false;
    }
  }

  if ((OperatorDelete = findDeleteForPromise(S, Loc, PromiseType)) == nullptr)
    return false;

  Expr *FramePtr =
      buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_frame, {});

  Expr *FrameSize =
      buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_size, {});

  // Make new call.

  ExprResult NewRef =
      S.BuildDeclRefExpr(OperatorNew, OperatorNew->getType(), VK_LValue, Loc);
  if (NewRef.isInvalid())
    return false;

  SmallVector<Expr *, 2> NewArgs(1, FrameSize);
  for (auto Arg : PlacementArgs)
    NewArgs.push_back(Arg);

  ExprResult NewExpr =
      S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
  NewExpr = S.ActOnFinishFullExpr(NewExpr.get());
  if (NewExpr.isInvalid())
    return false;

  // Make delete call.

  QualType OpDeleteQualType = OperatorDelete->getType();

  ExprResult DeleteRef =
      S.BuildDeclRefExpr(OperatorDelete, OpDeleteQualType, VK_LValue, Loc);
  if (DeleteRef.isInvalid())
    return false;

  Expr *CoroFree =
      buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_free, {FramePtr});

  SmallVector<Expr *, 2> DeleteArgs{CoroFree};

  // Check if we need to pass the size.
  const auto *OpDeleteType =
      OpDeleteQualType.getTypePtr()->getAs<FunctionProtoType>();
  if (OpDeleteType->getNumParams() > 1)
    DeleteArgs.push_back(FrameSize);

  ExprResult DeleteExpr =
      S.ActOnCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
  DeleteExpr = S.ActOnFinishFullExpr(DeleteExpr.get());
  if (DeleteExpr.isInvalid())
    return false;

  this->Allocate = NewExpr.get();
  this->Deallocate = DeleteExpr.get();

  return true;
}
示例#9
0
// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
// array and kernels are launched using cudaLaunchKernel().
void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
                                            FunctionArgList &Args) {
  // Build the shadow stack entry at the very start of the function.

  // Calculate amount of space we will need for all arguments.  If we have no
  // args, allocate a single pointer so we still have a valid pointer to the
  // argument array that we can pass to runtime, even if it will be unused.
  Address KernelArgs = CGF.CreateTempAlloca(
      VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
      llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
  // Store pointers to the arguments in a locally allocated launch_args.
  for (unsigned i = 0; i < Args.size(); ++i) {
    llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
    llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
    CGF.Builder.CreateDefaultAlignedStore(
        VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
  }

  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");

  // Lookup cudaLaunchKernel function.
  // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
  //                              void **args, size_t sharedMem,
  //                              cudaStream_t stream);
  TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
  DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
  IdentifierInfo &cudaLaunchKernelII =
      CGM.getContext().Idents.get("cudaLaunchKernel");
  FunctionDecl *cudaLaunchKernelFD = nullptr;
  for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
    if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
      cudaLaunchKernelFD = FD;
  }

  if (cudaLaunchKernelFD == nullptr) {
    CGM.Error(CGF.CurFuncDecl->getLocation(),
              "Can't find declaration for cudaLaunchKernel()");
    return;
  }
  // Create temporary dim3 grid_dim, block_dim.
  ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
  QualType Dim3Ty = GridDimParam->getType();
  Address GridDim =
      CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
  Address BlockDim =
      CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
  Address ShmemSize =
      CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
  Address Stream =
      CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
  llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
      llvm::FunctionType::get(IntTy,
                              {/*gridDim=*/GridDim.getType(),
                               /*blockDim=*/BlockDim.getType(),
                               /*ShmemSize=*/ShmemSize.getType(),
                               /*Stream=*/Stream.getType()},
                              /*isVarArg=*/false),
      "__cudaPopCallConfiguration");

  CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
                              {GridDim.getPointer(), BlockDim.getPointer(),
                               ShmemSize.getPointer(), Stream.getPointer()});

  // Emit the call to cudaLaunch
  llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
  CallArgList LaunchKernelArgs;
  LaunchKernelArgs.add(RValue::get(Kernel),
                       cudaLaunchKernelFD->getParamDecl(0)->getType());
  LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
  LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
  LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
                       cudaLaunchKernelFD->getParamDecl(3)->getType());
  LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
                       cudaLaunchKernelFD->getParamDecl(4)->getType());
  LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
                       cudaLaunchKernelFD->getParamDecl(5)->getType());

  QualType QT = cudaLaunchKernelFD->getType();
  QualType CQT = QT.getCanonicalType();
  llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
  llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);

  const CGFunctionInfo &FI =
      CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
  llvm::FunctionCallee cudaLaunchKernelFn =
      CGM.CreateRuntimeFunction(FTy, "cudaLaunchKernel");
  CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
               LaunchKernelArgs);
  CGF.EmitBranch(EndBlock);

  CGF.EmitBlock(EndBlock);
}