Пример #1
0
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
  if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
      !vd->isExceptionVariable() && !vd->isInitCapture() &&
      !vd->isImplicit() && vd->getDeclContext() == dc) {
    QualType ty = vd->getType();
    return ty->isScalarType() || ty->isVectorType() || ty->isRecordType();
  }
  return false;
}
Пример #2
0
/// Classify an expression which creates a temporary, based on its type.
static Cl::Kinds ClassifyTemporary(QualType T) {
  if (T->isRecordType())
    return Cl::CL_ClassTemporary;
  if (T->isArrayType())
    return Cl::CL_ArrayTemporary;

  // No special classification: these don't behave differently from normal
  // prvalues.
  return Cl::CL_PRValue;
}
Пример #3
0
/// FindAllocationFunctions - Finds the overloads of operator new and delete
/// that are appropriate for the allocation.
bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
                                   bool UseGlobal, QualType AllocType,
                                   bool IsArray, Expr **PlaceArgs,
                                   unsigned NumPlaceArgs,
                                   FunctionDecl *&OperatorNew,
                                   FunctionDecl *&OperatorDelete)
{
  // --- Choosing an allocation function ---
  // C++ 5.3.4p8 - 14 & 18
  // 1) If UseGlobal is true, only look in the global scope. Else, also look
  //   in the scope of the allocated class.
  // 2) If an array size is given, look for operator new[], else look for
  //   operator new.
  // 3) The first argument is always size_t. Append the arguments from the
  //   placement form.
  // FIXME: Also find the appropriate delete operator.

  llvm::SmallVector<Expr*, 8> AllocArgs(1 + NumPlaceArgs);
  // We don't care about the actual value of this argument.
  // FIXME: Should the Sema create the expression and embed it in the syntax
  // tree? Or should the consumer just recalculate the value?
  AllocArgs[0] = new (Context) IntegerLiteral(llvm::APInt::getNullValue(
                                        Context.Target.getPointerWidth(0)),
                                    Context.getSizeType(),
                                    SourceLocation());
  std::copy(PlaceArgs, PlaceArgs + NumPlaceArgs, AllocArgs.begin() + 1);

  DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
                                        IsArray ? OO_Array_New : OO_New);
  if (AllocType->isRecordType() && !UseGlobal) {
    CXXRecordDecl *Record 
      = cast<CXXRecordDecl>(AllocType->getAsRecordType()->getDecl());
    // FIXME: We fail to find inherited overloads.
    if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
                          AllocArgs.size(), Record, /*AllowMissing=*/true,
                          OperatorNew))
      return true;
  }
  if (!OperatorNew) {
    // Didn't find a member overload. Look for a global one.
    DeclareGlobalNewDelete();
    DeclContext *TUDecl = Context.getTranslationUnitDecl();
    if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
                          AllocArgs.size(), TUDecl, /*AllowMissing=*/false,
                          OperatorNew))
      return true;
  }

  // FIXME: This is leaked on error. But so much is currently in Sema that it's
  // easier to clean it in one go.
  AllocArgs[0]->Destroy(Context);
  return false;
}
Пример #4
0
bool SymbolManager::canSymbolicate(QualType T) {
  if (Loc::IsLocType(T))
    return true;

  if (T->isIntegerType())
    return T->isScalarType();

  if (T->isRecordType())
    return true;

  return false;
}
Пример #5
0
bool IsUninitializedPodVariable(const VarDecl* variableDeclaration, ASTContext* context)
{
    QualType type = variableDeclaration->getType();
    if (! type.isPODType(*context))
        return false;

    if (IsRecordTypeWithoutDataMembers(type))
        return false;

    return (!variableDeclaration->hasInit()) ||
           (type->isRecordType() && HasImplicitInitialization(variableDeclaration));
}
/// CheckSpecifiedExceptionType - Check if the given type is valid in an
/// exception specification. Incomplete types, or pointers to incomplete types
/// other than void are not allowed.
bool Sema::CheckSpecifiedExceptionType(QualType T, const SourceRange &Range) {

  // This check (and the similar one below) deals with issue 437, that changes
  // C++ 9.2p2 this way:
  // Within the class member-specification, the class is regarded as complete
  // within function bodies, default arguments, exception-specifications, and
  // constructor ctor-initializers (including such things in nested classes).
  if (T->isRecordType() && T->getAs<RecordType>()->isBeingDefined())
    return false;
    
  // C++ 15.4p2: A type denoted in an exception-specification shall not denote
  //   an incomplete type.
  if (RequireCompleteType(Range.getBegin(), T,
      PDiag(diag::err_incomplete_in_exception_spec) << /*direct*/0 << Range))
    return true;

  // C++ 15.4p2: A type denoted in an exception-specification shall not denote
  //   an incomplete type a pointer or reference to an incomplete type, other
  //   than (cv) void*.
  int kind;
  if (const PointerType* IT = T->getAs<PointerType>()) {
    T = IT->getPointeeType();
    kind = 1;
  } else if (const ReferenceType* IT = T->getAs<ReferenceType>()) {
    T = IT->getPointeeType();
    kind = 2;
  } else
    return false;

  // Again as before
  if (T->isRecordType() && T->getAs<RecordType>()->isBeingDefined())
    return false;
    
  if (!T->isVoidType() && RequireCompleteType(Range.getBegin(), T,
      PDiag(diag::err_incomplete_in_exception_spec) << kind << Range))
    return true;

  return false;
}
Пример #7
0
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
  if (Loc::isLocType(type))
    return makeNull();

  if (type->isIntegralOrEnumerationType())
    return makeIntVal(0, type);

  if (type->isArrayType() || type->isRecordType() || type->isVectorType() ||
      type->isAnyComplexType())
    return makeCompoundVal(type, BasicVals.getEmptySValList());

  // FIXME: Handle floats.
  return UnknownVal();
}
Пример #8
0
bool SymbolManager::canSymbolicate(QualType T) {
  T = T.getCanonicalType();

  if (Loc::isLocType(T))
    return true;

  if (T->isIntegralOrEnumerationType())
    return true;

  if (T->isRecordType() && !T->isUnionType())
    return true;

  return false;
}
Пример #9
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);

  ProgramStateRef state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();

  if (!IE->isGLValue() &&
      (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
       T->isAnyComplexType())) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();

    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }

    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
      vals = getBasicVals().prependSVal(V, vals);
    }

    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }

  // Handle scalars: int{5} and int{} and GLvalues.
  // Note, if the InitListExpr is a GLvalue, it means that there is an address
  // representing it, so it must have a single init element.
  assert(NumInitElements <= 1);

  SVal V;
  if (NumInitElements == 0)
    V = getSValBuilder().makeZeroVal(T);
  else
    V = state->getSVal(IE->getInit(0), LCtx);

  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
Пример #10
0
/// ClassifyUnnamed - Return the classification of an expression yielding an
/// unnamed value of the given type. This applies in particular to function
/// calls and casts.
static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
  // In C, function calls are always rvalues.
  if (!Ctx.getLangOptions().CPlusPlus) return Cl::CL_PRValue;

  // C++ [expr.call]p10: A function call is an lvalue if the result type is an
  //   lvalue reference type or an rvalue reference to function type, an xvalue
  //   if the result type is an rvalue refernence to object type, and a prvalue
  //   otherwise.
  if (T->isLValueReferenceType())
    return Cl::CL_LValue;
  const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
  if (!RV) // Could still be a class temporary, though.
    return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;

  return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
}
Пример #11
0
void GRExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  if (CNE->isArray()) {
    // FIXME: allocating an array has not been handled.
    return;
  }

  unsigned Count = Builder->getCurrentBlockCount();
  DefinedOrUnknownSVal SymVal = getValueManager().getConjuredSymbolVal(NULL,CNE,
                                                         CNE->getType(), Count);
  const MemRegion *NewReg = cast<loc::MemRegionVal>(SymVal).getRegion();

  QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();

  const ElementRegion *EleReg = 
                         getStoreManager().GetElementZeroRegion(NewReg, ObjTy);

  // Evaluate constructor arguments.
  const FunctionProtoType *FnType = NULL;
  const CXXConstructorDecl *CD = CNE->getConstructor();
  if (CD)
    FnType = CD->getType()->getAs<FunctionProtoType>();
  ExplodedNodeSet ArgsEvaluated;
  EvalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
                FnType, Pred, ArgsEvaluated);

  // Initialize the object region and bind the 'new' expression.
  for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(), 
                                 E = ArgsEvaluated.end(); I != E; ++I) {
    const GRState *state = GetState(*I);

    if (ObjTy->isRecordType()) {
      state = state->InvalidateRegion(EleReg, CNE, Count);
    } else {
      if (CNE->hasInitializer()) {
        SVal V = state->getSVal(*CNE->constructor_arg_begin());
        state = state->bindLoc(loc::MemRegionVal(EleReg), V);
      } else {
        // Explicitly set to undefined, because currently we retrieve symbolic
        // value from symbolic region.
        state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
      }
    }
    state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
    MakeNode(Dst, CNE, *I, state);
  }
}
Пример #12
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);

  ProgramStateRef state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();
  
  if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
      T->isAnyComplexType()) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
    
    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }
    
    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
      if (dyn_cast_or_null<CXXTempObjectRegion>(V.getAsRegion()))
        V = UnknownVal();
      vals = getBasicVals().consVals(V, vals);
    }
    
    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }

  // Handle scalars: int{5} and int{}.
  assert(NumInitElements <= 1);

  SVal V;
  if (NumInitElements == 0)
    V = getSValBuilder().makeZeroVal(T);
  else
    V = state->getSVal(IE->getInit(0), LCtx);

  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
Пример #13
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);

  const ProgramState *state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();
  
  if (T->isArrayType() || T->isRecordType() || T->isVectorType()) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
    
    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }
    
    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      vals = getBasicVals().consVals(state->getSVal(cast<Expr>(*it), LCtx),
                                     vals);
    }
    
    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }
  
  if (Loc::isLocType(T) || T->isIntegerType()) {
    assert(IE->getNumInits() == 1);
    const Expr *initEx = IE->getInit(0);
    B.generateNode(IE, Pred, state->BindExpr(IE, LCtx,
                                             state->getSVal(initEx, LCtx)));
    return;
  }
  
  llvm_unreachable("unprocessed InitListExpr type");
}
Пример #14
0
/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
/// is valid:
///
///   An expression e can be explicitly converted to a type T using a
///   @c static_cast if the declaration "T t(e);" is well-formed [...].
TryCastResult
TryStaticImplicitCast(Sema &Self, Expr *SrcExpr, QualType DestType,
                      bool CStyle, const SourceRange &OpRange, unsigned &msg)
{
  if (DestType->isReferenceType()) {
    // At this point of CheckStaticCast, if the destination is a reference,
    // this has to work. There is no other way that works.
    // On the other hand, if we're checking a C-style cast, we've still got
    // the reinterpret_cast way. In that case, we pass an ICS so we don't
    // get error messages.
    ImplicitConversionSequence ICS;
    bool failed = Self.CheckReferenceInit(SrcExpr, DestType, CStyle ? &ICS : 0);
    if (!failed)
      return TC_Success;
    if (CStyle)
      return TC_NotApplicable;
    // If we didn't pass the ICS, we already got an error message.
    msg = 0;
    return TC_Failed;
  }
  if (DestType->isRecordType()) {
    // There are no further possibilities for the target type being a class,
    // neither in static_cast nor in a C-style cast. So we can fail here.
    // FIXME: We need to store this constructor in the AST.
    if (Self.PerformInitializationByConstructor(DestType, &SrcExpr, 1,
          OpRange.getBegin(), OpRange, DeclarationName(), Sema::IK_Direct))
      return TC_Success;
    // The function already emitted an error.
    msg = 0;
    return TC_Failed;
  }

  // FIXME: To get a proper error from invalid conversions here, we need to
  // reimplement more of this.
  // FIXME: This does not actually perform the conversion, and thus does not
  // check for ambiguity or access.
  ImplicitConversionSequence ICS = Self.TryImplicitConversion(
    SrcExpr, DestType);
  return ICS.ConversionKind == ImplicitConversionSequence::BadConversion ?
    TC_NotApplicable : TC_Success;
}
Пример #15
0
/// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in:
/// @code ::delete ptr; @endcode
/// or
/// @code delete [] ptr; @endcode
Action::OwningExprResult
Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
                     bool ArrayForm, ExprArg Operand)
{
  // C++ 5.3.5p1: "The operand shall have a pointer type, or a class type
  //   having a single conversion function to a pointer type. The result has
  //   type void."
  // DR599 amends "pointer type" to "pointer to object type" in both cases.

  Expr *Ex = (Expr *)Operand.get();
  if (!Ex->isTypeDependent()) {
    QualType Type = Ex->getType();

    if (Type->isRecordType()) {
      // FIXME: Find that one conversion function and amend the type.
    }

    if (!Type->isPointerType())
      return ExprError(Diag(StartLoc, diag::err_delete_operand)
        << Type << Ex->getSourceRange());

    QualType Pointee = Type->getAsPointerType()->getPointeeType();
    if (Pointee->isFunctionType() || Pointee->isVoidType())
      return ExprError(Diag(StartLoc, diag::err_delete_operand)
        << Type << Ex->getSourceRange());
    else if (!Pointee->isDependentType() &&
             RequireCompleteType(StartLoc, Pointee, 
                                 diag::warn_delete_incomplete,
                                 Ex->getSourceRange()))
      return ExprError();

    // FIXME: Look up the correct operator delete overload and pass a pointer
    // along.
    // FIXME: Check access and ambiguity of operator delete and destructor.
  }

  Operand.release();
  return Owned(new (Context) CXXDeleteExpr(Context.VoidTy, UseGlobal, ArrayForm,
                                           0, Ex, StartLoc));
}
Пример #16
0
/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
/// is valid:
///
///   An expression e can be explicitly converted to a type T using a
///   @c static_cast if the declaration "T t(e);" is well-formed [...].
TryStaticCastResult
TryStaticImplicitCast(Sema &Self, Expr *SrcExpr, QualType DestType,
                      const SourceRange &OpRange)
{
  if (DestType->isReferenceType()) {
    // At this point of CheckStaticCast, if the destination is a reference,
    // this has to work. There is no other way that works.
    return Self.CheckReferenceInit(SrcExpr, DestType) ?
      TSC_Failed : TSC_Success;
  }
  if (DestType->isRecordType()) {
    // FIXME: Use an implementation of C++ [over.match.ctor] for this.
    return TSC_NotApplicable;
  }

  // FIXME: To get a proper error from invalid conversions here, we need to
  // reimplement more of this.
  ImplicitConversionSequence ICS = Self.TryImplicitConversion(
    SrcExpr, DestType);
  return ICS.ConversionKind == ImplicitConversionSequence::BadConversion ?
    TSC_NotApplicable : TSC_Success;
}
void CastToStructChecker::checkPreStmt(const CastExpr *CE,
                                       CheckerContext &C) const {
    const Expr *E = CE->getSubExpr();
    ASTContext &Ctx = C.getASTContext();
    QualType OrigTy = Ctx.getCanonicalType(E->getType());
    QualType ToTy = Ctx.getCanonicalType(CE->getType());

    const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
    const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());

    if (!ToPTy || !OrigPTy)
        return;

    QualType OrigPointeeTy = OrigPTy->getPointeeType();
    QualType ToPointeeTy = ToPTy->getPointeeType();

    if (!ToPointeeTy->isStructureOrClassType())
        return;

    // We allow cast from void*.
    if (OrigPointeeTy->isVoidType())
        return;

    // Now the cast-to-type is struct pointer, the original type is not void*.
    if (!OrigPointeeTy->isRecordType()) {
        if (ExplodedNode *N = C.addTransition()) {
            if (!BT)
                BT.reset(
                    new BuiltinBug(this, "Cast from non-struct type to struct type",
                                   "Casting a non-structure type to a structure type "
                                   "and accessing a field can lead to memory access "
                                   "errors or data corruption."));
            BugReport *R = new BugReport(*BT,BT->getDescription(), N);
            R->addRange(CE->getSourceRange());
            C.emitReport(R);
        }
    }
}
Пример #18
0
  ASTNodeInfo EvaluateTSynthesizer::VisitDeclStmt(DeclStmt* Node) {
    // Visit all the children, which are the contents of the DeclGroupRef
    for (Stmt::child_iterator
           I = Node->child_begin(), E = Node->child_end(); I != E; ++I) {
      if (*I) {
        Expr* E = cast_or_null<Expr>(*I);
        if (!E || !IsArtificiallyDependent(E))
          continue;
        //FIXME: don't assume there is only one decl.
        assert(Node->isSingleDecl() && "There is more that one decl in stmt");
        VarDecl* CuredDecl = cast_or_null<VarDecl>(Node->getSingleDecl());
        assert(CuredDecl && "Not a variable declaration!");
        QualType CuredDeclTy = CuredDecl->getType();
        // check if the case is sometype * somevar = init;
        // or some_builtin_type somevar = init;
        if (CuredDecl->hasInit() && (CuredDeclTy->isAnyPointerType()
                                     || !CuredDeclTy->isRecordType())) {
          *I = SubstituteUnknownSymbol(CuredDeclTy, CuredDecl->getInit());
          continue;
        }

        // 1. Check whether this is the case of MyClass A(dep->symbol())
        // 2. Insert the RuntimeUniverse's LifetimeHandler instance
        // 3. Change the A's initializer to *(MyClass*)instance.getMemory()
        // 4. Make A reference (&A)
        // 5. Set the new initializer of A
        if (CuredDeclTy->isLValueReferenceType())
          continue;

        // Set Sema's Current DeclContext to the one we need
        DeclContext* OldDC = m_Sema->CurContext;
        m_Sema->CurContext = CuredDecl->getDeclContext();

        ASTNodeInfo NewNode;
        // 2.1 Get unique name for the LifetimeHandler instance and
        // initialize it
        std::string UniqueName;
        createUniqueName(UniqueName);
        IdentifierInfo& II = m_Context->Idents.get(UniqueName);

        // Prepare the initialization Exprs.
        // We want to call LifetimeHandler(DynamicExprInfo* ExprInfo,
        //                                 DeclContext DC,
        //                                 const char* type)
        //                                 Interpreter* interp)
        llvm::SmallVector<Expr*, 4> Inits;
        // Add MyClass in LifetimeHandler unique(DynamicExprInfo* ExprInfo
        //                                       DC,
        //                                       "MyClass"
        //                                       Interpreter* Interp)
        // Build Arg0 DynamicExprInfo
        Inits.push_back(BuildDynamicExprInfo(E));
        // Build Arg1 DeclContext* DC
        QualType DCTy = m_Context->getTypeDeclType(m_DeclContextDecl);
        Inits.push_back(utils::Synthesize::CStyleCastPtrExpr(m_Sema, DCTy,
                                                     (uint64_t)m_CurDeclContext)
                        );
        // Build Arg2 llvm::StringRef
        // Get the type of the type without specifiers
        PrintingPolicy Policy(m_Context->getLangOpts());
        Policy.SuppressTagKeyword = 1;
        std::string Res;
        CuredDeclTy.getAsStringInternal(Res, Policy);
        Inits.push_back(ConstructConstCharPtrExpr(Res.c_str()));

        // Build Arg3 cling::Interpreter
        CXXScopeSpec CXXSS;
        DeclarationNameInfo NameInfo(m_gCling->getDeclName(), 
                                     m_gCling->getLocStart());
        Expr* gClingDRE 
          = m_Sema->BuildDeclarationNameExpr(CXXSS, NameInfo ,m_gCling).take();
        Inits.push_back(gClingDRE);
        
        // 2.3 Create a variable from LifetimeHandler.
        QualType HandlerTy = m_Context->getTypeDeclType(m_LifetimeHandlerDecl);
        TypeSourceInfo* TSI = m_Context->getTrivialTypeSourceInfo(HandlerTy,
                                                                  m_NoSLoc);
        VarDecl* HandlerInstance = VarDecl::Create(*m_Context,
                                                   CuredDecl->getDeclContext(),
                                                   m_NoSLoc,
                                                   m_NoSLoc,
                                                   &II,
                                                   HandlerTy,
                                                   TSI,
                                                   SC_None);
        
        // 2.4 Call the best-match constructor. The method does overload
        // resolution of the constructors and then initializes the new
        // variable with it
        ExprResult InitExprResult
          = m_Sema->ActOnParenListExpr(m_NoSLoc,
                                       m_NoELoc,
                                       Inits);
        m_Sema->AddInitializerToDecl(HandlerInstance,
                                     InitExprResult.take(),
                                     /*DirectInit*/ true,
                                     /*TypeMayContainAuto*/ false);
        
        // 2.5 Register the instance in the enclosing context
        CuredDecl->getDeclContext()->addDecl(HandlerInstance);
        NewNode.addNode(new (m_Context)
                        DeclStmt(DeclGroupRef(HandlerInstance),
                                 m_NoSLoc,
                                 m_NoELoc)
                        );
        
        // 3.1 Build a DeclRefExpr, which holds the object
        DeclRefExpr* MemberExprBase
          = m_Sema->BuildDeclRefExpr(HandlerInstance,
                                     HandlerTy,
                                     VK_LValue,
                                     m_NoSLoc
                                     ).takeAs<DeclRefExpr>();
        // 3.2 Create a MemberExpr to getMemory from its declaration.
        CXXScopeSpec SS;
        LookupResult MemberLookup(*m_Sema, m_LHgetMemoryDecl->getDeclName(),
                                  m_NoSLoc, Sema::LookupMemberName);
        // Add the declaration as if doesn't exist.
        // TODO: Check whether this is the most appropriate variant
        MemberLookup.addDecl(m_LHgetMemoryDecl, AS_public);
        MemberLookup.resolveKind();
        Expr* MemberExpr = m_Sema->BuildMemberReferenceExpr(MemberExprBase,
                                                            HandlerTy,
                                                            m_NoSLoc,
                                                            /*IsArrow=*/false,
                                                            SS,
                                                            m_NoSLoc,
                                                     /*FirstQualifierInScope=*/0,
                                                            MemberLookup,
                                                            /*TemplateArgs=*/0
                                                            ).take();
        // 3.3 Build the actual call
        Scope* S = m_Sema->getScopeForContext(m_Sema->CurContext);
        Expr* theCall = m_Sema->ActOnCallExpr(S,
                                              MemberExpr,
                                              m_NoSLoc,
                                              MultiExprArg(),
                                              m_NoELoc).take();
        // Cast to the type LHS type
        Expr* Result 
          = utils::Synthesize::CStyleCastPtrExpr(m_Sema, CuredDeclTy, theCall);
        // Cast once more (dereference the cstyle cast)
        Result = m_Sema->BuildUnaryOp(S, m_NoSLoc, UO_Deref, Result).take();
        // 4.
        CuredDecl->setType(m_Context->getLValueReferenceType(CuredDeclTy));
        // 5.
        CuredDecl->setInit(Result);

        NewNode.addNode(Node);

        // Restore Sema's original DeclContext
        m_Sema->CurContext = OldDC;
        return NewNode;
      }
    }
    return ASTNodeInfo(Node, 0);
  }
Пример #19
0
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;
  bool IsSimpleConstantInitializer = false;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      
      // If this value is an array or struct, is POD, and if the initializer is
      // a staticly determinable constant, try to optimize it.
      if (D.getInit() && !isByRef &&
          (Ty->isArrayType() || Ty->isRecordType()) &&
          Ty->isPODType() &&
          D.getInit()->isConstantInitializer(getContext())) {
        // If this variable is marked 'const', emit the value as a global.
        if (CGM.getCodeGenOpts().MergeAllConstants &&
            Ty.isConstant(getContext())) {
          EmitStaticBlockVarDecl(D);
          return;
        }
        
        IsSimpleConstantInitializer = true;
      }
      
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      if (isByRef)
        LTy = BuildByRefType(&D);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString());

      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class,
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }

    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");

      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);

      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;

      {
        // Push a cleanup block and restore the stack there.
        DelayedCleanupBlock scope(*this);

        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }

    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");

    // Allocate memory for the array.
    llvm::AllocaInst *VLA = 
      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
    VLA->setAlignment(getContext().getDeclAlignInBytes(&D));

    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");

    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef)
      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
                                    D.getNameAsString());

    bool isVolatile =
      getContext().getCanonicalType(D.getType()).isVolatileQualified();
    
    // If the initializer was a simple constant initializer, we can optimize it
    // in various ways.
    if (IsSimpleConstantInitializer) {
      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
      assert(Init != 0 && "Wasn't a simple constant init?");
      
      llvm::Value *AlignVal = 
        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align);
      const llvm::Type *IntPtr =
        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
      llvm::Value *SizeVal =
        llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty));

      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
      if (Loc->getType() != BP)
        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
      
      // If the initializer is all zeros, codegen with memset.
      if (isa<llvm::ConstantAggregateZero>(Init)) {
        llvm::Value *Zero =
          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
      } else {
        // Otherwise, create a temporary global with the initializer then 
        // memcpy from the global to the alloca.
        std::string Name = GetStaticDeclName(*this, D, ".");
        llvm::GlobalVariable *GV =
          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
                                   llvm::GlobalValue::InternalLinkage,
                                   Init, Name, 0, false, 0);
        GV->setAlignment(Align);

        llvm::Value *SrcPtr = GV;
        if (SrcPtr->getType() != BP)
          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
        
        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
      }
    } else if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
    } else {
      EmitAggExpr(Init, Loc, isVolatile);
    }
  }

  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    Builder.CreateStore(DeclPtr, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
    DtorTy = getContext().getBaseElementType(Array);
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        
        if (const ConstantArrayType *Array = 
              getContext().getAsConstantArrayType(Ty)) {
          {
            DelayedCleanupBlock Scope(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          
            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          }
        } else {
          {
            DelayedCleanupBlock Scope(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);

            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
          }
        }
      }
  }

  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();

    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
    assert(F && "Could not find function!");

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    //
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    {
      DelayedCleanupBlock scope(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    {
      DelayedCleanupBlock scope(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
    // FIXME: Turn this on and audit the codegen
    if (0 && Exceptions) {
      EHCleanupBlock Cleanup(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
  }
}
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
  // Also, we need to decide how allocators actually work -- they're not
  // really part of the CXXNewExpr because they happen BEFORE the
  // CXXConstructExpr subexpression. See PR12014 for some discussion.
  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
  
  unsigned blockCount = currBldrCtx->blockCount();
  const LocationContext *LCtx = Pred->getLocationContext();
  DefinedOrUnknownSVal symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx,
                                                             CNE->getType(),
                                                             blockCount);
  ProgramStateRef State = Pred->getState();

  CallEventManager &CEMgr = getStateManager().getCallEventManager();
  CallEventRef<CXXAllocatorCall> Call =
    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);

  // Invalidate placement args.
  // FIXME: Once we figure out how we want allocators to work,
  // we should be using the usual pre-/(default-)eval-/post-call checks here.
  State = Call->invalidateRegions(blockCount);

  if (CNE->isArray()) {
    // FIXME: allocating an array requires simulating the constructors.
    // For now, just return a symbolicated region.
    const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
    QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
    const ElementRegion *EleReg =
      getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
    State = State->BindExpr(CNE, Pred->getLocationContext(),
                            loc::MemRegionVal(EleReg));
    Bldr.generateNode(CNE, Pred, State);
    return;
  }

  // FIXME: Once we have proper support for CXXConstructExprs inside
  // CXXNewExpr, we need to make sure that the constructed object is not
  // immediately invalidated here. (The placement call should happen before
  // the constructor call anyway.)
  FunctionDecl *FD = CNE->getOperatorNew();
  if (FD && FD->isReservedGlobalPlacementOperator()) {
    // Non-array placement new should always return the placement location.
    SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
    State = State->BindExpr(CNE, LCtx, PlacementLoc);
  } else {
    State = State->BindExpr(CNE, LCtx, symVal);
  }

  // If the type is not a record, we won't have a CXXConstructExpr as an
  // initializer. Copy the value over.
  if (const Expr *Init = CNE->getInitializer()) {
    if (!isa<CXXConstructExpr>(Init)) {
      QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
      (void)ObjTy;
      assert(!ObjTy->isRecordType());
      SVal Location = State->getSVal(CNE, LCtx);
      if (isa<Loc>(Location))
        State = State->bindLoc(cast<Loc>(Location), State->getSVal(Init, LCtx));
    }
  }

  Bldr.generateNode(CNE, Pred, State);
}
/// CheckExceptionSpecSubset - Check whether the second function type's
/// exception specification is a subset (or equivalent) of the first function
/// type. This is used by override and pointer assignment checks.
bool Sema::CheckExceptionSpecSubset(
    const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
    const FunctionProtoType *Superset, SourceLocation SuperLoc,
    const FunctionProtoType *Subset, SourceLocation SubLoc) {
  // FIXME: As usual, we could be more specific in our error messages, but
  // that better waits until we've got types with source locations.

  if (!SubLoc.isValid())
    SubLoc = SuperLoc;

  // If superset contains everything, we're done.
  if (!Superset->hasExceptionSpec() || Superset->hasAnyExceptionSpec())
    return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);

  // It does not. If the subset contains everything, we've failed.
  if (!Subset->hasExceptionSpec() || Subset->hasAnyExceptionSpec()) {
    Diag(SubLoc, DiagID);
    if (NoteID.getDiagID() != 0)
      Diag(SuperLoc, NoteID);
    return true;
  }

  // Neither contains everything. Do a proper comparison.
  for (FunctionProtoType::exception_iterator SubI = Subset->exception_begin(),
       SubE = Subset->exception_end(); SubI != SubE; ++SubI) {
    // Take one type from the subset.
    QualType CanonicalSubT = Context.getCanonicalType(*SubI);
    // Unwrap pointers and references so that we can do checks within a class
    // hierarchy. Don't unwrap member pointers; they don't have hierarchy
    // conversions on the pointee.
    bool SubIsPointer = false;
    if (const ReferenceType *RefTy = CanonicalSubT->getAs<ReferenceType>())
      CanonicalSubT = RefTy->getPointeeType();
    if (const PointerType *PtrTy = CanonicalSubT->getAs<PointerType>()) {
      CanonicalSubT = PtrTy->getPointeeType();
      SubIsPointer = true;
    }
    bool SubIsClass = CanonicalSubT->isRecordType();
    CanonicalSubT = CanonicalSubT.getLocalUnqualifiedType();

    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
                       /*DetectVirtual=*/false);

    bool Contained = false;
    // Make sure it's in the superset.
    for (FunctionProtoType::exception_iterator SuperI =
           Superset->exception_begin(), SuperE = Superset->exception_end();
         SuperI != SuperE; ++SuperI) {
      QualType CanonicalSuperT = Context.getCanonicalType(*SuperI);
      // SubT must be SuperT or derived from it, or pointer or reference to
      // such types.
      if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>())
        CanonicalSuperT = RefTy->getPointeeType();
      if (SubIsPointer) {
        if (const PointerType *PtrTy = CanonicalSuperT->getAs<PointerType>())
          CanonicalSuperT = PtrTy->getPointeeType();
        else {
          continue;
        }
      }
      CanonicalSuperT = CanonicalSuperT.getLocalUnqualifiedType();
      // If the types are the same, move on to the next type in the subset.
      if (CanonicalSubT == CanonicalSuperT) {
        Contained = true;
        break;
      }

      // Otherwise we need to check the inheritance.
      if (!SubIsClass || !CanonicalSuperT->isRecordType())
        continue;

      Paths.clear();
      if (!IsDerivedFrom(CanonicalSubT, CanonicalSuperT, Paths))
        continue;

      if (Paths.isAmbiguous(CanonicalSuperT))
        continue;

      // Do this check from a context without privileges.
      switch (CheckBaseClassAccess(SourceLocation(), false,
                                   CanonicalSuperT, CanonicalSubT,
                                   Paths.front(),
                                   /*ForceCheck*/ true,
                                   /*ForceUnprivileged*/ true,
                                   ADK_quiet)) {
      case AR_accessible: break;
      case AR_inaccessible: continue;
      case AR_dependent:
        llvm_unreachable("access check dependent for unprivileged context");
        break;
      case AR_delayed:
        llvm_unreachable("access check delayed in non-declaration");
        break;
      }

      Contained = true;
      break;
    }
    if (!Contained) {
      Diag(SubLoc, DiagID);
      if (NoteID.getDiagID() != 0)
        Diag(SuperLoc, NoteID);
      return true;
    }
  }
  // We've run half the gauntlet.
  return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
}
  Expr* ValueExtractionSynthesizer::SynthesizeSVRInit(Expr* E) {
    if (!m_gClingVD)
      FindAndCacheRuntimeDecls();

    // Build a reference to gCling
    ExprResult gClingDRE
      = m_Sema->BuildDeclRefExpr(m_gClingVD, m_Context->VoidPtrTy,
                                 VK_RValue, SourceLocation());
    // We have the wrapper as Sema's CurContext
    FunctionDecl* FD = cast<FunctionDecl>(m_Sema->CurContext);

    ExprWithCleanups* Cleanups = 0;
    // In case of ExprWithCleanups we need to extend its 'scope' to the call.
    if (E && isa<ExprWithCleanups>(E)) {
      Cleanups = cast<ExprWithCleanups>(E);
      E = Cleanups->getSubExpr();
    }

    // Build a reference to Value* in the wrapper, should be
    // the only argument of the wrapper.
    SourceLocation locStart = (E) ? E->getLocStart() : FD->getLocStart();
    SourceLocation locEnd = (E) ? E->getLocEnd() : FD->getLocEnd();
    ExprResult wrapperSVRDRE
      = m_Sema->BuildDeclRefExpr(FD->getParamDecl(0), m_Context->VoidPtrTy,
                                 VK_RValue, locStart);
    QualType ETy = (E) ? E->getType() : m_Context->VoidTy;
    QualType desugaredTy = ETy.getDesugaredType(*m_Context);

    // The expr result is transported as reference, pointer, array, float etc
    // based on the desugared type. We should still expose the typedef'ed
    // (sugared) type to the cling::Value.
    if (desugaredTy->isRecordType() && E->getValueKind() == VK_LValue) {
      // returning a lvalue (not a temporary): the value should contain
      // a reference to the lvalue instead of copying it.
      desugaredTy = m_Context->getLValueReferenceType(desugaredTy);
      ETy = m_Context->getLValueReferenceType(ETy);
    }
    Expr* ETyVP
      = utils::Synthesize::CStyleCastPtrExpr(m_Sema, m_Context->VoidPtrTy,
                                             (uint64_t)ETy.getAsOpaquePtr());
    Expr* ETransaction
      = utils::Synthesize::CStyleCastPtrExpr(m_Sema, m_Context->VoidPtrTy,
                                             (uint64_t)getTransaction());

    llvm::SmallVector<Expr*, 6> CallArgs;
    CallArgs.push_back(gClingDRE.take());
    CallArgs.push_back(wrapperSVRDRE.take());
    CallArgs.push_back(ETyVP);
    CallArgs.push_back(ETransaction);

    ExprResult Call;
    SourceLocation noLoc;
    if (desugaredTy->isVoidType()) {
      // In cases where the cling::Value gets reused we need to reset the
      // previous settings to void.
      // We need to synthesize setValueNoAlloc(...), E, because we still need
      // to run E.

      // FIXME: Suboptimal: this discards the already created AST nodes.
      QualType vpQT = m_Context->VoidPtrTy;
      QualType vQT = m_Context->VoidTy;
      Expr* vpQTVP
        = utils::Synthesize::CStyleCastPtrExpr(m_Sema, vpQT,
                                               (uint64_t)vQT.getAsOpaquePtr());
      CallArgs[2] = vpQTVP;


      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedNoAlloc,
                                   locStart, CallArgs, locEnd);

      if (E)
        Call = m_Sema->CreateBuiltinBinOp(locStart, BO_Comma, Call.take(), E);

    }
    else if (desugaredTy->isRecordType() || desugaredTy->isConstantArrayType()){
      // 2) object types :
      // check existance of copy constructor before call
      if (!availableCopyConstructor(desugaredTy, m_Sema))
        return E;
      // call new (setValueWithAlloc(gCling, &SVR, ETy)) (E)
      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedWithAlloc,
                                   locStart, CallArgs, locEnd);
      Expr* placement = Call.take();
      if (const ConstantArrayType* constArray
          = dyn_cast<ConstantArrayType>(desugaredTy.getTypePtr())) {
        CallArgs.clear();
        CallArgs.push_back(E);
        CallArgs.push_back(placement);
        uint64_t arrSize
          = m_Context->getConstantArrayElementCount(constArray);
        Expr* arrSizeExpr
          = utils::Synthesize::IntegerLiteralExpr(*m_Context, arrSize);

        CallArgs.push_back(arrSizeExpr);
        // 2.1) arrays:
        // call copyArray(T* src, void* placement, int size)
        Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedCopyArray,
                                     locStart, CallArgs, locEnd);

      }
      else {
        TypeSourceInfo* ETSI
          = m_Context->getTrivialTypeSourceInfo(ETy, noLoc);

        Call = m_Sema->BuildCXXNew(E->getSourceRange(),
                                   /*useGlobal ::*/true,
                                   /*placementLParen*/ noLoc,
                                   MultiExprArg(placement),
                                   /*placementRParen*/ noLoc,
                                   /*TypeIdParens*/ SourceRange(),
                                   /*allocType*/ ETSI->getType(),
                                   /*allocTypeInfo*/ETSI,
                                   /*arraySize*/0,
                                   /*directInitRange*/E->getSourceRange(),
                                   /*initializer*/E,
                                   /*mayContainAuto*/false
                                   );
      }
    }
    else if (desugaredTy->isIntegralOrEnumerationType()
             || desugaredTy->isReferenceType()
             || desugaredTy->isPointerType()
             || desugaredTy->isFloatingType()) {
      if (desugaredTy->isIntegralOrEnumerationType()) {
        // 1)  enum, integral, float, double, referece, pointer types :
        //      call to cling::internal::setValueNoAlloc(...);

        // If the type is enum or integral we need to force-cast it into
        // uint64 in order to pick up the correct overload.
        if (desugaredTy->isIntegralOrEnumerationType()) {
          QualType UInt64Ty = m_Context->UnsignedLongLongTy;
          TypeSourceInfo* TSI
            = m_Context->getTrivialTypeSourceInfo(UInt64Ty, noLoc);
          Expr* castedE
            = m_Sema->BuildCStyleCastExpr(noLoc, TSI, noLoc, E).take();
          CallArgs.push_back(castedE);
        }
      }
      else if (desugaredTy->isReferenceType()) {
        // we need to get the address of the references
        Expr* AddrOfE = m_Sema->BuildUnaryOp(/*Scope*/0, noLoc, UO_AddrOf,
                                             E).take();
        CallArgs.push_back(AddrOfE);
      }
      else if (desugaredTy->isPointerType()) {
        // function pointers need explicit void* cast.
        QualType VoidPtrTy = m_Context->VoidPtrTy;
        TypeSourceInfo* TSI
          = m_Context->getTrivialTypeSourceInfo(VoidPtrTy, noLoc);
        Expr* castedE
          = m_Sema->BuildCStyleCastExpr(noLoc, TSI, noLoc, E).take();
        CallArgs.push_back(castedE);
      }
      else if (desugaredTy->isFloatingType()) {
        // floats and double will fall naturally in the correct
        // case, because of the overload resolution.
        CallArgs.push_back(E);
      }
      Call = m_Sema->ActOnCallExpr(/*Scope*/0, m_UnresolvedNoAlloc,
                                   locStart, CallArgs, locEnd);
    }
    else
      assert(0 && "Unhandled code path?");

    assert(!Call.isInvalid() && "Invalid Call");

    // Extend the scope of the temporary cleaner if applicable.
    if (Cleanups) {
      Cleanups->setSubExpr(Call.take());
      Cleanups->setValueKind(Call.take()->getValueKind());
      Cleanups->setType(Call.take()->getType());
      return Cleanups;
    }
    return Call.take();
  }
Пример #23
0
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isObjCObjectType();
}
void AvoidCStyleCastsCheck::check(const MatchFinder::MatchResult &Result) {
  const auto *CastExpr = Result.Nodes.getNodeAs<CStyleCastExpr>("cast");

  // Ignore casts in macros.
  if (CastExpr->getExprLoc().isMacroID())
    return;

  // Casting to void is an idiomatic way to mute "unused variable" and similar
  // warnings.
  if (CastExpr->getCastKind() == CK_ToVoid)
    return;

  auto isFunction = [](QualType T) {
    T = T.getCanonicalType().getNonReferenceType();
    return T->isFunctionType() || T->isFunctionPointerType() ||
           T->isMemberFunctionPointerType();
  };

  const QualType DestTypeAsWritten =
      CastExpr->getTypeAsWritten().getUnqualifiedType();
  const QualType SourceTypeAsWritten =
      CastExpr->getSubExprAsWritten()->getType().getUnqualifiedType();
  const QualType SourceType = SourceTypeAsWritten.getCanonicalType();
  const QualType DestType = DestTypeAsWritten.getCanonicalType();

  auto ReplaceRange = CharSourceRange::getCharRange(
      CastExpr->getLParenLoc(), CastExpr->getSubExprAsWritten()->getBeginLoc());

  bool FnToFnCast =
      isFunction(SourceTypeAsWritten) && isFunction(DestTypeAsWritten);

  if (CastExpr->getCastKind() == CK_NoOp && !FnToFnCast) {
    // Function pointer/reference casts may be needed to resolve ambiguities in
    // case of overloaded functions, so detection of redundant casts is trickier
    // in this case. Don't emit "redundant cast" warnings for function
    // pointer/reference types.
    if (SourceTypeAsWritten == DestTypeAsWritten) {
      diag(CastExpr->getBeginLoc(), "redundant cast to the same type")
          << FixItHint::CreateRemoval(ReplaceRange);
      return;
    }
  }

  // The rest of this check is only relevant to C++.
  // We also disable it for Objective-C++.
  if (!getLangOpts().CPlusPlus || getLangOpts().ObjC1 || getLangOpts().ObjC2)
    return;
  // Ignore code inside extern "C" {} blocks.
  if (!match(expr(hasAncestor(linkageSpecDecl())), *CastExpr, *Result.Context)
           .empty())
    return;
  // Ignore code in .c files and headers included from them, even if they are
  // compiled as C++.
  if (getCurrentMainFile().endswith(".c"))
    return;

  SourceManager &SM = *Result.SourceManager;

  // Ignore code in .c files #included in other files (which shouldn't be done,
  // but people still do this for test and other purposes).
  if (SM.getFilename(SM.getSpellingLoc(CastExpr->getBeginLoc())).endswith(".c"))
    return;

  // Leave type spelling exactly as it was (unlike
  // getTypeAsWritten().getAsString() which would spell enum types 'enum X').
  StringRef DestTypeString =
      Lexer::getSourceText(CharSourceRange::getTokenRange(
                               CastExpr->getLParenLoc().getLocWithOffset(1),
                               CastExpr->getRParenLoc().getLocWithOffset(-1)),
                           SM, getLangOpts());

  auto Diag =
      diag(CastExpr->getBeginLoc(), "C-style casts are discouraged; use %0");

  auto ReplaceWithCast = [&](std::string CastText) {
    const Expr *SubExpr = CastExpr->getSubExprAsWritten()->IgnoreImpCasts();
    if (!isa<ParenExpr>(SubExpr)) {
      CastText.push_back('(');
      Diag << FixItHint::CreateInsertion(
          Lexer::getLocForEndOfToken(SubExpr->getEndLoc(), 0, SM,
                                     getLangOpts()),
          ")");
    }
    Diag << FixItHint::CreateReplacement(ReplaceRange, CastText);
  };
  auto ReplaceWithNamedCast = [&](StringRef CastType) {
    Diag << CastType;
    ReplaceWithCast((CastType + "<" + DestTypeString + ">").str());
  };

  // Suggest appropriate C++ cast. See [expr.cast] for cast notation semantics.
  switch (CastExpr->getCastKind()) {
  case CK_FunctionToPointerDecay:
    ReplaceWithNamedCast("static_cast");
    return;
  case CK_ConstructorConversion:
    if (!CastExpr->getTypeAsWritten().hasQualifiers() &&
        DestTypeAsWritten->isRecordType() &&
        !DestTypeAsWritten->isElaboratedTypeSpecifier()) {
      Diag << "constructor call syntax";
      // FIXME: Validate DestTypeString, maybe.
      ReplaceWithCast(DestTypeString.str());
    } else {
      ReplaceWithNamedCast("static_cast");
    }
    return;
  case CK_NoOp:
    if (FnToFnCast) {
      ReplaceWithNamedCast("static_cast");
      return;
    }
    if (SourceType == DestType) {
      Diag << "static_cast (if needed, the cast may be redundant)";
      ReplaceWithCast(("static_cast<" + DestTypeString + ">").str());
      return;
    }
    if (needsConstCast(SourceType, DestType) &&
        pointedUnqualifiedTypesAreEqual(SourceType, DestType)) {
      ReplaceWithNamedCast("const_cast");
      return;
    }
    if (DestType->isReferenceType()) {
      QualType Dest = DestType.getNonReferenceType();
      QualType Source = SourceType.getNonReferenceType();
      if (Source == Dest.withConst() ||
          SourceType.getNonReferenceType() == DestType.getNonReferenceType()) {
        ReplaceWithNamedCast("const_cast");
        return;
      }
      break;
    }
  // FALLTHROUGH
  case clang::CK_IntegralCast:
    // Convert integral and no-op casts between builtin types and enums to
    // static_cast. A cast from enum to integer may be unnecessary, but it's
    // still retained.
    if ((SourceType->isBuiltinType() || SourceType->isEnumeralType()) &&
        (DestType->isBuiltinType() || DestType->isEnumeralType())) {
      ReplaceWithNamedCast("static_cast");
      return;
    }
    break;
  case CK_BitCast:
    // FIXME: Suggest const_cast<...>(reinterpret_cast<...>(...)) replacement.
    if (!needsConstCast(SourceType, DestType)) {
      if (SourceType->isVoidPointerType())
        ReplaceWithNamedCast("static_cast");
      else
        ReplaceWithNamedCast("reinterpret_cast");
      return;
    }
    break;
  default:
    break;
  }

  Diag << "static_cast/const_cast/reinterpret_cast";
}
Пример #25
0
/// TryStaticDowncast - Common functionality of TryStaticReferenceDowncast and
/// TryStaticPointerDowncast. Tests whether a static downcast from SrcType to
/// DestType, both of which must be canonical, is possible and allowed.
TryStaticCastResult
TryStaticDowncast(Sema &Self, QualType SrcType, QualType DestType,
                  const SourceRange &OpRange, QualType OrigSrcType,
                  QualType OrigDestType)
{
  // Downcast can only happen in class hierarchies, so we need classes.
  if (!DestType->isRecordType() || !SrcType->isRecordType()) {
    return TSC_NotApplicable;
  }

  BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
                  /*DetectVirtual=*/true);
  if (!Self.IsDerivedFrom(DestType, SrcType, Paths)) {
    return TSC_NotApplicable;
  }

  // Target type does derive from source type. Now we're serious. If an error
  // appears now, it's not ignored.
  // This may not be entirely in line with the standard. Take for example:
  // struct A {};
  // struct B : virtual A {
  //   B(A&);
  // };
  // 
  // void f()
  // {
  //   (void)static_cast<const B&>(*((A*)0));
  // }
  // As far as the standard is concerned, p5 does not apply (A is virtual), so
  // p2 should be used instead - "const B& t(*((A*)0));" is perfectly valid.
  // However, both GCC and Comeau reject this example, and accepting it would
  // mean more complex code if we're to preserve the nice error message.
  // FIXME: Being 100% compliant here would be nice to have.

  // Must preserve cv, as always.
  if (!DestType.isAtLeastAsQualifiedAs(SrcType)) {
    Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
      << "static_cast" << OrigDestType << OrigSrcType << OpRange;
    return TSC_Failed;
  }

  if (Paths.isAmbiguous(SrcType.getUnqualifiedType())) {
    // This code is analoguous to that in CheckDerivedToBaseConversion, except
    // that it builds the paths in reverse order.
    // To sum up: record all paths to the base and build a nice string from
    // them. Use it to spice up the error message.
    Paths.clear();
    Paths.setRecordingPaths(true);
    Self.IsDerivedFrom(DestType, SrcType, Paths);
    std::string PathDisplayStr;
    std::set<unsigned> DisplayedPaths;
    for (BasePaths::paths_iterator Path = Paths.begin();
         Path != Paths.end(); ++Path) {
      if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) {
        // We haven't displayed a path to this particular base
        // class subobject yet.
        PathDisplayStr += "\n    ";
        for (BasePath::const_reverse_iterator Element = Path->rbegin();
             Element != Path->rend(); ++Element)
          PathDisplayStr += Element->Base->getType().getAsString() + " -> ";
        PathDisplayStr += DestType.getAsString();
      }
    }

    Self.Diag(OpRange.getBegin(), diag::err_ambiguous_base_to_derived_cast)
      << SrcType.getUnqualifiedType() << DestType.getUnqualifiedType()
      << PathDisplayStr << OpRange;
    return TSC_Failed;
  }

  if (Paths.getDetectedVirtual() != 0) {
    QualType VirtualBase(Paths.getDetectedVirtual(), 0);
    Self.Diag(OpRange.getBegin(), diag::err_static_downcast_via_virtual)
      << OrigSrcType << OrigDestType << VirtualBase << OpRange;
    return TSC_Failed;
  }

  // FIXME: Test accessibility.

  return TSC_Success;
}
Пример #26
0
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
  // FIXME: Handle rethrows.
  if (!E->getSubExpr()) {
    ErrorUnsupported(E, "rethrow expression");
    return;
  }
  
  QualType ThrowType = E->getSubExpr()->getType();
  // FIXME: We only handle non-class types for now.
  if (ThrowType->isRecordType()) {
    ErrorUnsupported(E, "throw expression");
    return;
  }

  // FIXME: Handle cleanup.
  if (!CleanupEntries.empty()){
    ErrorUnsupported(E, "throw expression");
    return;
  }
  
  // Now allocate the exception object.
  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
  uint64_t TypeSize = getContext().getTypeSize(ThrowType) / 8;
  
  llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
  llvm::Value *ExceptionPtr = 
    Builder.CreateCall(AllocExceptionFn, 
                       llvm::ConstantInt::get(SizeTy, TypeSize),
                       "exception");

  // Store the throw exception in the exception object.
  if (!hasAggregateLLVMType(ThrowType)) {
    llvm::Value *Value = EmitScalarExpr(E->getSubExpr());
    const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
    
    Builder.CreateStore(Value, Builder.CreateBitCast(ExceptionPtr, ValuePtrTy));
  } else {
    // FIXME: Handle complex and aggregate expressions.
    ErrorUnsupported(E, "throw expression");
  }
  
  // Now throw the exception.
  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
  
  llvm::SmallString<256> OutName;
  llvm::raw_svector_ostream Out(OutName);
  mangleCXXRtti(CGM.getMangleContext(), ThrowType, Out);
  
  // FIXME: Is it OK to use CreateRuntimeVariable for this?
  llvm::Constant *TypeInfo = 
    CGM.CreateRuntimeVariable(llvm::Type::getInt8Ty(getLLVMContext()),
                              OutName.c_str());
  llvm::Constant *Dtor = llvm::Constant::getNullValue(Int8PtrTy);
  
  llvm::CallInst *ThrowCall = 
    Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
  ThrowCall->setDoesNotReturn();
  Builder.CreateUnreachable();
  
  // Clear the insertion point to indicate we are in unreachable code.
  Builder.ClearInsertionPoint();
}
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isMemberFunctionPointerType();
}
Пример #28
0
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  
  unsigned blockCount = Builder->getCurrentBlockCount();
  DefinedOrUnknownSVal symVal =
    svalBuilder.getConjuredSymbolVal(NULL, CNE, CNE->getType(), blockCount);
  const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();  
  QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
  const ElementRegion *EleReg = 
    getStoreManager().GetElementZeroRegion(NewReg, ObjTy);

  if (CNE->isArray()) {
    // FIXME: allocating an array requires simulating the constructors.
    // For now, just return a symbolicated region.
    const GRState *state = GetState(Pred);
    state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
    MakeNode(Dst, CNE, Pred, state);
    return;
  }

  // Evaluate constructor arguments.
  const FunctionProtoType *FnType = NULL;
  const CXXConstructorDecl *CD = CNE->getConstructor();
  if (CD)
    FnType = CD->getType()->getAs<FunctionProtoType>();
  ExplodedNodeSet argsEvaluated;
  evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
                FnType, Pred, argsEvaluated);

  // Initialize the object region and bind the 'new' expression.
  for (ExplodedNodeSet::iterator I = argsEvaluated.begin(), 
                                 E = argsEvaluated.end(); I != E; ++I) {

    const GRState *state = GetState(*I);
    
    // Accumulate list of regions that are invalidated.
    // FIXME: Eventually we should unify the logic for constructor
    // processing in one place.
    llvm::SmallVector<const MemRegion*, 10> regionsToInvalidate;
    for (CXXNewExpr::const_arg_iterator
          ai = CNE->constructor_arg_begin(), ae = CNE->constructor_arg_end();
          ai != ae; ++ai)
    {
      SVal val = state->getSVal(*ai);
      if (const MemRegion *region = val.getAsRegion())
        regionsToInvalidate.push_back(region);
    }

    if (ObjTy->isRecordType()) {
      regionsToInvalidate.push_back(EleReg);
      // Invalidate the regions.
      state = state->invalidateRegions(regionsToInvalidate.data(),
                                       regionsToInvalidate.data() +
                                       regionsToInvalidate.size(),
                                       CNE, blockCount, 0,
                                       /* invalidateGlobals = */ true);
      
    } else {
      // Invalidate the regions.
      state = state->invalidateRegions(regionsToInvalidate.data(),
                                       regionsToInvalidate.data() +
                                       regionsToInvalidate.size(),
                                       CNE, blockCount, 0,
                                       /* invalidateGlobals = */ true);

      if (CNE->hasInitializer()) {
        SVal V = state->getSVal(*CNE->constructor_arg_begin());
        state = state->bindLoc(loc::MemRegionVal(EleReg), V);
      } else {
        // Explicitly set to undefined, because currently we retrieve symbolic
        // value from symbolic region.
        state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
      }
    }
    state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
    MakeNode(Dst, CNE, *I, state);
  }
}
Пример #29
0
static bool isRecordType(QualType T) {
  return T->isRecordType();
}