Пример #1
0
/// DeclareGlobalAllocationFunction - Declares a single implicit global
/// allocation function if it doesn't already exist.
void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
                                           QualType Return, QualType Argument)
{
  DeclContext *GlobalCtx = Context.getTranslationUnitDecl();

  // Check if this function is already declared.
  {
    DeclContext::lookup_iterator Alloc, AllocEnd;
    for (llvm::tie(Alloc, AllocEnd) = GlobalCtx->lookup(Context, Name);
         Alloc != AllocEnd; ++Alloc) {
      // FIXME: Do we need to check for default arguments here?
      FunctionDecl *Func = cast<FunctionDecl>(*Alloc);
      if (Func->getNumParams() == 1 &&
          Context.getCanonicalType(Func->getParamDecl(0)->getType())==Argument)
        return;
    }
  }

  QualType FnType = Context.getFunctionType(Return, &Argument, 1, false, 0);
  FunctionDecl *Alloc =
    FunctionDecl::Create(Context, GlobalCtx, SourceLocation(), Name,
                         FnType, FunctionDecl::None, false, true,
                         SourceLocation());
  Alloc->setImplicit();
  ParmVarDecl *Param = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
                                           0, Argument, VarDecl::None, 0);
  Alloc->setParams(Context, &Param, 1);

  // FIXME: Also add this declaration to the IdentifierResolver, but
  // make sure it is at the end of the chain to coincide with the
  // global scope.
  ((DeclContext *)TUScope->getEntity())->addDecl(Context, Alloc);
}
Пример #2
0
void SimpleInliner::generateParamStrings(void)
{
  unsigned int ArgNum = TheCallExpr->getNumArgs();
  FunctionDecl *FD = TheCallExpr->getDirectCallee();
  unsigned int Idx;

  for(Idx = 0; Idx < FD->getNumParams(); ++Idx) {
    const ParmVarDecl *PD = FD->getParamDecl(Idx);
    std::string ParmStr = PD->getNameAsString();
    PD->getType().getAsStringInternal(ParmStr, 
                                      Context->getPrintingPolicy());
    if (Idx < ArgNum) {
      const Expr *Arg = TheCallExpr->getArg(Idx);
      ParmStr += " = ";
      std::string ArgStr("");
      RewriteHelper->getExprString(Arg, ArgStr);
      ParmStr += ArgStr;
    }
    ParmStr += ";\n";
    ParmStrings.push_back(ParmStr);
  }
}
  Expr* ValueExtractionSynthesizer::SynthesizeSVRInit(Expr* E) {
    if (!m_gClingVD)
      FindAndCacheRuntimeDecls();

    // Build a reference to gCling
    ExprResult gClingDRE
      = m_Sema->BuildDeclRefExpr(m_gClingVD, m_Context->VoidPtrTy,
                                 VK_RValue, SourceLocation());
    // We have the wrapper as Sema's CurContext
    FunctionDecl* FD = cast<FunctionDecl>(m_Sema->CurContext);

    ExprWithCleanups* Cleanups = 0;
    // In case of ExprWithCleanups we need to extend its 'scope' to the call.
    if (E && isa<ExprWithCleanups>(E)) {
      Cleanups = cast<ExprWithCleanups>(E);
      E = Cleanups->getSubExpr();
    }

    // Build a reference to Value* in the wrapper, should be
    // the only argument of the wrapper.
    SourceLocation locStart = (E) ? E->getLocStart() : FD->getLocStart();
    SourceLocation locEnd = (E) ? E->getLocEnd() : FD->getLocEnd();
    ExprResult wrapperSVRDRE
      = m_Sema->BuildDeclRefExpr(FD->getParamDecl(0), m_Context->VoidPtrTy,
                                 VK_RValue, locStart);
    QualType ETy = (E) ? E->getType() : m_Context->VoidTy;
    QualType desugaredTy = ETy.getDesugaredType(*m_Context);

    // The expr result is transported as reference, pointer, array, float etc
    // based on the desugared type. We should still expose the typedef'ed
    // (sugared) type to the cling::Value.
    if (desugaredTy->isRecordType() && E->getValueKind() == VK_LValue) {
      // returning a lvalue (not a temporary): the value should contain
      // a reference to the lvalue instead of copying it.
      desugaredTy = m_Context->getLValueReferenceType(desugaredTy);
      ETy = m_Context->getLValueReferenceType(ETy);
    }
    Expr* ETyVP
      = utils::Synthesize::CStyleCastPtrExpr(m_Sema, m_Context->VoidPtrTy,
                                             (uint64_t)ETy.getAsOpaquePtr());
    Expr* ETransaction
      = utils::Synthesize::CStyleCastPtrExpr(m_Sema, m_Context->VoidPtrTy,
                                             (uint64_t)getTransaction());

    llvm::SmallVector<Expr*, 6> CallArgs;
    CallArgs.push_back(gClingDRE.take());
    CallArgs.push_back(wrapperSVRDRE.take());
    CallArgs.push_back(ETyVP);
    CallArgs.push_back(ETransaction);

    ExprResult Call;
    SourceLocation noLoc;
    if (desugaredTy->isVoidType()) {
      // In cases where the cling::Value gets reused we need to reset the
      // previous settings to void.
      // We need to synthesize setValueNoAlloc(...), E, because we still need
      // to run E.

      // FIXME: Suboptimal: this discards the already created AST nodes.
      QualType vpQT = m_Context->VoidPtrTy;
      QualType vQT = m_Context->VoidTy;
      Expr* vpQTVP
        = utils::Synthesize::CStyleCastPtrExpr(m_Sema, vpQT,
                                               (uint64_t)vQT.getAsOpaquePtr());
      CallArgs[2] = vpQTVP;


      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedNoAlloc,
                                   locStart, CallArgs, locEnd);

      if (E)
        Call = m_Sema->CreateBuiltinBinOp(locStart, BO_Comma, Call.take(), E);

    }
    else if (desugaredTy->isRecordType() || desugaredTy->isConstantArrayType()){
      // 2) object types :
      // check existance of copy constructor before call
      if (!availableCopyConstructor(desugaredTy, m_Sema))
        return E;
      // call new (setValueWithAlloc(gCling, &SVR, ETy)) (E)
      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedWithAlloc,
                                   locStart, CallArgs, locEnd);
      Expr* placement = Call.take();
      if (const ConstantArrayType* constArray
          = dyn_cast<ConstantArrayType>(desugaredTy.getTypePtr())) {
        CallArgs.clear();
        CallArgs.push_back(E);
        CallArgs.push_back(placement);
        uint64_t arrSize
          = m_Context->getConstantArrayElementCount(constArray);
        Expr* arrSizeExpr
          = utils::Synthesize::IntegerLiteralExpr(*m_Context, arrSize);

        CallArgs.push_back(arrSizeExpr);
        // 2.1) arrays:
        // call copyArray(T* src, void* placement, int size)
        Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedCopyArray,
                                     locStart, CallArgs, locEnd);

      }
      else {
        TypeSourceInfo* ETSI
          = m_Context->getTrivialTypeSourceInfo(ETy, noLoc);

        Call = m_Sema->BuildCXXNew(E->getSourceRange(),
                                   /*useGlobal ::*/true,
                                   /*placementLParen*/ noLoc,
                                   MultiExprArg(placement),
                                   /*placementRParen*/ noLoc,
                                   /*TypeIdParens*/ SourceRange(),
                                   /*allocType*/ ETSI->getType(),
                                   /*allocTypeInfo*/ETSI,
                                   /*arraySize*/0,
                                   /*directInitRange*/E->getSourceRange(),
                                   /*initializer*/E,
                                   /*mayContainAuto*/false
                                   );
      }
    }
    else if (desugaredTy->isIntegralOrEnumerationType()
             || desugaredTy->isReferenceType()
             || desugaredTy->isPointerType()
             || desugaredTy->isFloatingType()) {
      if (desugaredTy->isIntegralOrEnumerationType()) {
        // 1)  enum, integral, float, double, referece, pointer types :
        //      call to cling::internal::setValueNoAlloc(...);

        // If the type is enum or integral we need to force-cast it into
        // uint64 in order to pick up the correct overload.
        if (desugaredTy->isIntegralOrEnumerationType()) {
          QualType UInt64Ty = m_Context->UnsignedLongLongTy;
          TypeSourceInfo* TSI
            = m_Context->getTrivialTypeSourceInfo(UInt64Ty, noLoc);
          Expr* castedE
            = m_Sema->BuildCStyleCastExpr(noLoc, TSI, noLoc, E).take();
          CallArgs.push_back(castedE);
        }
      }
      else if (desugaredTy->isReferenceType()) {
        // we need to get the address of the references
        Expr* AddrOfE = m_Sema->BuildUnaryOp(/*Scope*/0, noLoc, UO_AddrOf,
                                             E).take();
        CallArgs.push_back(AddrOfE);
      }
      else if (desugaredTy->isPointerType()) {
        // function pointers need explicit void* cast.
        QualType VoidPtrTy = m_Context->VoidPtrTy;
        TypeSourceInfo* TSI
          = m_Context->getTrivialTypeSourceInfo(VoidPtrTy, noLoc);
        Expr* castedE
          = m_Sema->BuildCStyleCastExpr(noLoc, TSI, noLoc, E).take();
        CallArgs.push_back(castedE);
      }
      else if (desugaredTy->isFloatingType()) {
        // floats and double will fall naturally in the correct
        // case, because of the overload resolution.
        CallArgs.push_back(E);
      }
      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedNoAlloc,
                                   locStart, CallArgs, locEnd);
    }
    else
      assert(0 && "Unhandled code path?");

    assert(!Call.isInvalid() && "Invalid Call");

    // Extend the scope of the temporary cleaner if applicable.
    if (Cleanups) {
      Cleanups->setSubExpr(Call.take());
      Cleanups->setValueKind(Call.take()->getValueKind());
      Cleanups->setType(Call.take()->getType());
      return Cleanups;
    }
    return Call.take();
  }
Пример #4
0
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
  // Also, we need to decide how allocators actually work -- they're not
  // really part of the CXXNewExpr because they happen BEFORE the
  // CXXConstructExpr subexpression. See PR12014 for some discussion.

  unsigned blockCount = currBldrCtx->blockCount();
  const LocationContext *LCtx = Pred->getLocationContext();
  DefinedOrUnknownSVal symVal = UnknownVal();
  FunctionDecl *FD = CNE->getOperatorNew();

  bool IsStandardGlobalOpNewFunction = false;
  if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
    if (FD->getNumParams() == 2) {
      QualType T = FD->getParamDecl(1)->getType();
      if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
        // NoThrow placement new behaves as a standard new.
        IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
    }
    else
      // Placement forms are considered non-standard.
      IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
  }

  // We assume all standard global 'operator new' functions allocate memory in
  // heap. We realize this is an approximation that might not correctly model
  // a custom global allocator.
  if (IsStandardGlobalOpNewFunction)
    symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
  else
    symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
                                          blockCount);

  ProgramStateRef State = Pred->getState();
  CallEventManager &CEMgr = getStateManager().getCallEventManager();
  CallEventRef<CXXAllocatorCall> Call =
    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);

  // Invalidate placement args.
  // FIXME: Once we figure out how we want allocators to work,
  // we should be using the usual pre-/(default-)eval-/post-call checks here.
  State = Call->invalidateRegions(blockCount);
  if (!State)
    return;

  // If this allocation function is not declared as non-throwing, failures
  // /must/ be signalled by exceptions, and thus the return value will never be
  // NULL. -fno-exceptions does not influence this semantics.
  // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
  // where new can return NULL. If we end up supporting that option, we can
  // consider adding a check for it here.
  // C++11 [basic.stc.dynamic.allocation]p3.
  if (FD) {
    QualType Ty = FD->getType();
    if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
      if (!ProtoType->isNothrow(getContext()))
        State = State->assume(symVal, true);
  }

  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);

  if (CNE->isArray()) {
    // FIXME: allocating an array requires simulating the constructors.
    // For now, just return a symbolicated region.
    const MemRegion *NewReg = symVal.castAs<loc::MemRegionVal>().getRegion();
    QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
    const ElementRegion *EleReg =
      getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
    State = State->BindExpr(CNE, Pred->getLocationContext(),
                            loc::MemRegionVal(EleReg));
    Bldr.generateNode(CNE, Pred, State);
    return;
  }

  // FIXME: Once we have proper support for CXXConstructExprs inside
  // CXXNewExpr, we need to make sure that the constructed object is not
  // immediately invalidated here. (The placement call should happen before
  // the constructor call anyway.)
  SVal Result = symVal;
  if (FD && FD->isReservedGlobalPlacementOperator()) {
    // Non-array placement new should always return the placement location.
    SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
    Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
                                  CNE->getPlacementArg(0)->getType());
  }

  // Bind the address of the object, then check to see if we cached out.
  State = State->BindExpr(CNE, LCtx, Result);
  ExplodedNode *NewN = Bldr.generateNode(CNE, Pred, State);
  if (!NewN)
    return;

  // If the type is not a record, we won't have a CXXConstructExpr as an
  // initializer. Copy the value over.
  if (const Expr *Init = CNE->getInitializer()) {
    if (!isa<CXXConstructExpr>(Init)) {
      assert(Bldr.getResults().size() == 1);
      Bldr.takeNodes(NewN);
      evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx),
               /*FirstInit=*/IsStandardGlobalOpNewFunction);
    }
  }
}
Пример #5
0
/// FindAllocationOverload - Find an fitting overload for the allocation
/// function in the specified scope.
bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
                                  DeclarationName Name, Expr** Args,
                                  unsigned NumArgs, DeclContext *Ctx,
                                  bool AllowMissing, FunctionDecl *&Operator)
{
  DeclContext::lookup_iterator Alloc, AllocEnd;
  llvm::tie(Alloc, AllocEnd) = Ctx->lookup(Context, Name);
  if (Alloc == AllocEnd) {
    if (AllowMissing)
      return false;
    return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
      << Name << Range;
  }

  OverloadCandidateSet Candidates;
  for (; Alloc != AllocEnd; ++Alloc) {
    // Even member operator new/delete are implicitly treated as
    // static, so don't use AddMemberCandidate.
    if (FunctionDecl *Fn = dyn_cast<FunctionDecl>(*Alloc))
      AddOverloadCandidate(Fn, Args, NumArgs, Candidates,
                           /*SuppressUserConversions=*/false);
  }

  // Do the resolution.
  OverloadCandidateSet::iterator Best;
  switch(BestViableFunction(Candidates, Best)) {
  case OR_Success: {
    // Got one!
    FunctionDecl *FnDecl = Best->Function;
    // The first argument is size_t, and the first parameter must be size_t,
    // too. This is checked on declaration and can be assumed. (It can't be
    // asserted on, though, since invalid decls are left in there.)
    for (unsigned i = 1; i < NumArgs; ++i) {
      // FIXME: Passing word to diagnostic.
      if (PerformCopyInitialization(Args[i-1],
                                    FnDecl->getParamDecl(i)->getType(),
                                    "passing"))
        return true;
    }
    Operator = FnDecl;
    return false;
  }

  case OR_No_Viable_Function:
    if (AllowMissing)
      return false;
    Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
      << Name << Range;
    PrintOverloadCandidates(Candidates, /*OnlyViable=*/false);
    return true;

  case OR_Ambiguous:
    Diag(StartLoc, diag::err_ovl_ambiguous_call)
      << Name << Range;
    PrintOverloadCandidates(Candidates, /*OnlyViable=*/true);
    return true;

  case OR_Deleted:
    Diag(StartLoc, diag::err_ovl_deleted_call)
      << Best->Function->isDeleted()
      << Name << Range;
    PrintOverloadCandidates(Candidates, /*OnlyViable=*/true);
    return true;
  }
  assert(false && "Unreachable, bad result from BestViableFunction");
  return true;
}
Пример #6
0
void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
  // If this is a member template, introduce the template parameter scope.
  ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
  TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
  if (LM.TemplateScope) {
    Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
    ++CurTemplateDepthTracker;
  }
  // Start the delayed C++ method declaration
  Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);

  // Introduce the parameters into scope and parse their default
  // arguments.
  ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
                            Scope::FunctionDeclarationScope | Scope::DeclScope);
  for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
    auto Param = cast<ParmVarDecl>(LM.DefaultArgs[I].Param);
    // Introduce the parameter into scope.
    bool HasUnparsed = Param->hasUnparsedDefaultArg();
    Actions.ActOnDelayedCXXMethodParameter(getCurScope(), Param);
    if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
      // Mark the end of the default argument so that we know when to stop when
      // we parse it later on.
      Token LastDefaultArgToken = Toks->back();
      Token DefArgEnd;
      DefArgEnd.startToken();
      DefArgEnd.setKind(tok::eof);
      DefArgEnd.setLocation(LastDefaultArgToken.getEndLoc());
      DefArgEnd.setEofData(Param);
      Toks->push_back(DefArgEnd);

      // Parse the default argument from its saved token stream.
      Toks->push_back(Tok); // So that the current token doesn't get lost
      PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);

      // Consume the previously-pushed token.
      ConsumeAnyToken();

      // Consume the '='.
      assert(Tok.is(tok::equal) && "Default argument not starting with '='");
      SourceLocation EqualLoc = ConsumeToken();

      // The argument isn't actually potentially evaluated unless it is
      // used.
      EnterExpressionEvaluationContext Eval(Actions,
                                            Sema::PotentiallyEvaluatedIfUsed,
                                            Param);

      ExprResult DefArgResult;
      if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
        Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
        DefArgResult = ParseBraceInitializer();
      } else
        DefArgResult = ParseAssignmentExpression();
      DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
      if (DefArgResult.isInvalid()) {
        Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
      } else {
        if (Tok.isNot(tok::eof) || Tok.getEofData() != Param) {
          // The last two tokens are the terminator and the saved value of
          // Tok; the last token in the default argument is the one before
          // those.
          assert(Toks->size() >= 3 && "expected a token in default arg");
          Diag(Tok.getLocation(), diag::err_default_arg_unparsed)
            << SourceRange(Tok.getLocation(),
                           (*Toks)[Toks->size() - 3].getLocation());
        }
        Actions.ActOnParamDefaultArgument(Param, EqualLoc,
                                          DefArgResult.get());
      }

      // There could be leftover tokens (e.g. because of an error).
      // Skip through until we reach the 'end of default argument' token.
      while (Tok.isNot(tok::eof))
        ConsumeAnyToken();

      if (Tok.is(tok::eof) && Tok.getEofData() == Param)
        ConsumeAnyToken();

      delete Toks;
      LM.DefaultArgs[I].Toks = nullptr;
    } else if (HasUnparsed) {
      assert(Param->hasInheritedDefaultArg());
      FunctionDecl *Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
      ParmVarDecl *OldParam = Old->getParamDecl(I);
      assert (!OldParam->hasUnparsedDefaultArg());
      if (OldParam->hasUninstantiatedDefaultArg())
        Param->setUninstantiatedDefaultArg(
                                      Param->getUninstantiatedDefaultArg());
      else
        Param->setDefaultArg(OldParam->getInit());
    }
  }

  // Parse a delayed exception-specification, if there is one.
  if (CachedTokens *Toks = LM.ExceptionSpecTokens) {
    // Add the 'stop' token.
    Token LastExceptionSpecToken = Toks->back();
    Token ExceptionSpecEnd;
    ExceptionSpecEnd.startToken();
    ExceptionSpecEnd.setKind(tok::eof);
    ExceptionSpecEnd.setLocation(LastExceptionSpecToken.getEndLoc());
    ExceptionSpecEnd.setEofData(LM.Method);
    Toks->push_back(ExceptionSpecEnd);

    // Parse the default argument from its saved token stream.
    Toks->push_back(Tok); // So that the current token doesn't get lost
    PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);

    // Consume the previously-pushed token.
    ConsumeAnyToken();

    // C++11 [expr.prim.general]p3:
    //   If a declaration declares a member function or member function
    //   template of a class X, the expression this is a prvalue of type
    //   "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
    //   and the end of the function-definition, member-declarator, or
    //   declarator.
    CXXMethodDecl *Method;
    if (FunctionTemplateDecl *FunTmpl
          = dyn_cast<FunctionTemplateDecl>(LM.Method))
      Method = cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
    else
      Method = cast<CXXMethodDecl>(LM.Method);

    Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
                                     Method->getTypeQualifiers(),
                                     getLangOpts().CPlusPlus11);

    // Parse the exception-specification.
    SourceRange SpecificationRange;
    SmallVector<ParsedType, 4> DynamicExceptions;
    SmallVector<SourceRange, 4> DynamicExceptionRanges;
    ExprResult NoexceptExpr;
    CachedTokens *ExceptionSpecTokens;

    ExceptionSpecificationType EST
      = tryParseExceptionSpecification(/*Delayed=*/false, SpecificationRange,
                                       DynamicExceptions,
                                       DynamicExceptionRanges, NoexceptExpr,
                                       ExceptionSpecTokens);

    if (Tok.isNot(tok::eof) || Tok.getEofData() != LM.Method)
      Diag(Tok.getLocation(), diag::err_except_spec_unparsed);

    // Attach the exception-specification to the method.
    Actions.actOnDelayedExceptionSpecification(LM.Method, EST,
                                               SpecificationRange,
                                               DynamicExceptions,
                                               DynamicExceptionRanges,
                                               NoexceptExpr.isUsable()?
                                                 NoexceptExpr.get() : nullptr);

    // There could be leftover tokens (e.g. because of an error).
    // Skip through until we reach the original token position.
    while (Tok.isNot(tok::eof))
      ConsumeAnyToken();

    // Clean up the remaining EOF token.
    if (Tok.is(tok::eof) && Tok.getEofData() == LM.Method)
      ConsumeAnyToken();

    delete Toks;
    LM.ExceptionSpecTokens = nullptr;
  }

  PrototypeScope.Exit();

  // Finish the delayed C++ method declaration.
  Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
}
Пример #7
0
// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
// array and kernels are launched using cudaLaunchKernel().
void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
                                            FunctionArgList &Args) {
  // Build the shadow stack entry at the very start of the function.

  // Calculate amount of space we will need for all arguments.  If we have no
  // args, allocate a single pointer so we still have a valid pointer to the
  // argument array that we can pass to runtime, even if it will be unused.
  Address KernelArgs = CGF.CreateTempAlloca(
      VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
      llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
  // Store pointers to the arguments in a locally allocated launch_args.
  for (unsigned i = 0; i < Args.size(); ++i) {
    llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
    llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
    CGF.Builder.CreateDefaultAlignedStore(
        VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
  }

  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");

  // Lookup cudaLaunchKernel function.
  // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
  //                              void **args, size_t sharedMem,
  //                              cudaStream_t stream);
  TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
  DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
  IdentifierInfo &cudaLaunchKernelII =
      CGM.getContext().Idents.get("cudaLaunchKernel");
  FunctionDecl *cudaLaunchKernelFD = nullptr;
  for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
    if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
      cudaLaunchKernelFD = FD;
  }

  if (cudaLaunchKernelFD == nullptr) {
    CGM.Error(CGF.CurFuncDecl->getLocation(),
              "Can't find declaration for cudaLaunchKernel()");
    return;
  }
  // Create temporary dim3 grid_dim, block_dim.
  ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
  QualType Dim3Ty = GridDimParam->getType();
  Address GridDim =
      CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
  Address BlockDim =
      CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
  Address ShmemSize =
      CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
  Address Stream =
      CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
  llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
      llvm::FunctionType::get(IntTy,
                              {/*gridDim=*/GridDim.getType(),
                               /*blockDim=*/BlockDim.getType(),
                               /*ShmemSize=*/ShmemSize.getType(),
                               /*Stream=*/Stream.getType()},
                              /*isVarArg=*/false),
      "__cudaPopCallConfiguration");

  CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
                              {GridDim.getPointer(), BlockDim.getPointer(),
                               ShmemSize.getPointer(), Stream.getPointer()});

  // Emit the call to cudaLaunch
  llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
  CallArgList LaunchKernelArgs;
  LaunchKernelArgs.add(RValue::get(Kernel),
                       cudaLaunchKernelFD->getParamDecl(0)->getType());
  LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
  LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
  LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
                       cudaLaunchKernelFD->getParamDecl(3)->getType());
  LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
                       cudaLaunchKernelFD->getParamDecl(4)->getType());
  LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
                       cudaLaunchKernelFD->getParamDecl(5)->getType());

  QualType QT = cudaLaunchKernelFD->getType();
  QualType CQT = QT.getCanonicalType();
  llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
  llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);

  const CGFunctionInfo &FI =
      CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
  llvm::FunctionCallee cudaLaunchKernelFn =
      CGM.CreateRuntimeFunction(FTy, "cudaLaunchKernel");
  CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
               LaunchKernelArgs);
  CGF.EmitBranch(EndBlock);

  CGF.EmitBlock(EndBlock);
}