Exemple #1
0
bool Declarator::isDeclarationOfFunction() const {
  for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
    switch (DeclTypeInfo[i].Kind) {
    case DeclaratorChunk::Function:
      return true;
    case DeclaratorChunk::Paren:
      continue;
    case DeclaratorChunk::Pointer:
    case DeclaratorChunk::Reference:
    case DeclaratorChunk::Array:
    case DeclaratorChunk::BlockPointer:
    case DeclaratorChunk::MemberPointer:
      return false;
    }
    llvm_unreachable("Invalid type chunk");
  }
  
  switch (DS.getTypeSpecType()) {
    case TST_atomic:
    case TST_auto:
    case TST_bool:
    case TST_char:
    case TST_char16:
    case TST_char32:
    case TST_class:
    case TST_decimal128:
    case TST_decimal32:
    case TST_decimal64:
    case TST_double:
    case TST_enum:
    case TST_error:
    case TST_float:
    case TST_half:
    case TST_int:
    case TST_int128:
    case TST_struct:
    case TST_interface:
    case TST_union:
    case TST_unknown_anytype:
    case TST_unspecified:
    case TST_void:
    case TST_wchar:
      return false;

    case TST_decltype:
    case TST_typeofExpr:
      if (Expr *E = DS.getRepAsExpr())
        return E->getType()->isFunctionType();
      return false;
     
    case TST_underlyingType:
    case TST_typename:
    case TST_typeofType: {
      QualType QT = DS.getRepAsType().get();
      if (QT.isNull())
        return false;
      
      if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT))
        QT = LIT->getType();

      if (QT.isNull())
        return false;
        
      return QT->isFunctionType();
    }
  }

  llvm_unreachable("Invalid TypeSpecType!");
}
bool RetainSummaryManager::applyParamAnnotationEffect(
    const ParmVarDecl *pd, unsigned parm_idx, const NamedDecl *FD,
    RetainSummaryTemplate &Template) {
  QualType QT = pd->getType();
  if (auto K =
          hasAnyEnabledAttrOf<NSConsumedAttr, CFConsumedAttr, OSConsumedAttr,
                              GeneralizedConsumedAttr>(pd, QT)) {
    Template->addArg(AF, parm_idx, ArgEffect(DecRef, *K));
    return true;
  } else if (auto K = hasAnyEnabledAttrOf<
                 CFReturnsRetainedAttr, OSReturnsRetainedAttr,
                 OSReturnsRetainedOnNonZeroAttr, OSReturnsRetainedOnZeroAttr,
                 GeneralizedReturnsRetainedAttr>(pd, QT)) {

    // For OSObjects, we try to guess whether the object is created based
    // on the return value.
    if (K == ObjKind::OS) {
      QualType QT = getCallableReturnType(FD);

      bool HasRetainedOnZero = pd->hasAttr<OSReturnsRetainedOnZeroAttr>();
      bool HasRetainedOnNonZero = pd->hasAttr<OSReturnsRetainedOnNonZeroAttr>();

      // The usual convention is to create an object on non-zero return, but
      // it's reverted if the typedef chain has a typedef kern_return_t,
      // because kReturnSuccess constant is defined as zero.
      // The convention can be overwritten by custom attributes.
      bool SuccessOnZero =
          HasRetainedOnZero ||
          (hasTypedefNamed(QT, "kern_return_t") && !HasRetainedOnNonZero);
      bool ShouldSplit = !QT.isNull() && !QT->isVoidType();
      ArgEffectKind AK = RetainedOutParameter;
      if (ShouldSplit && SuccessOnZero) {
        AK = RetainedOutParameterOnZero;
      } else if (ShouldSplit && (!SuccessOnZero || HasRetainedOnNonZero)) {
        AK = RetainedOutParameterOnNonZero;
      }
      Template->addArg(AF, parm_idx, ArgEffect(AK, ObjKind::OS));
    }

    // For others:
    // Do nothing. Retained out parameters will either point to a +1 reference
    // or NULL, but the way you check for failure differs depending on the
    // API. Consequently, we don't have a good way to track them yet.
    return true;
  } else if (auto K = hasAnyEnabledAttrOf<CFReturnsNotRetainedAttr,
                                          OSReturnsNotRetainedAttr,
                                          GeneralizedReturnsNotRetainedAttr>(
                 pd, QT)) {
    Template->addArg(AF, parm_idx, ArgEffect(UnretainedOutParameter, *K));
    return true;
  }

  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
    for (const auto *OD : MD->overridden_methods()) {
      const ParmVarDecl *OP = OD->parameters()[parm_idx];
      if (applyParamAnnotationEffect(OP, parm_idx, OD, Template))
        return true;
    }
  }

  return false;
}
/// CheckConstCast - Check that a const_cast\<DestType\>(SrcExpr) is valid.
/// Refer to C++ 5.2.11 for details. const_cast is typically used in code
/// like this:
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void
CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
               const SourceRange &OpRange, const SourceRange &DestRange)
{
  QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();

  DestType = Self.Context.getCanonicalType(DestType);
  QualType SrcType = SrcExpr->getType();
  if (const LValueReferenceType *DestTypeTmp =
        DestType->getAsLValueReferenceType()) {
    if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
      // Cannot cast non-lvalue to lvalue reference type.
      Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
        << "const_cast" << OrigDestType << SrcExpr->getSourceRange();
      return;
    }

    // C++ 5.2.11p4: An lvalue of type T1 can be [cast] to an lvalue of type T2
    //   [...] if a pointer to T1 can be [cast] to the type pointer to T2.
    DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
    SrcType = Self.Context.getPointerType(SrcType);
  } else {
    // C++ 5.2.11p1: Otherwise, the result is an rvalue and the
    //   lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
    //   conversions are performed on the expression.
    Self.DefaultFunctionArrayConversion(SrcExpr);
    SrcType = SrcExpr->getType();
  }

  // C++ 5.2.11p5: For a const_cast involving pointers to data members [...]
  //   the rules for const_cast are the same as those used for pointers.

  if (!DestType->isPointerType() && !DestType->isMemberPointerType()) {
    // Cannot cast to non-pointer, non-reference type. Note that, if DestType
    // was a reference type, we converted it to a pointer above.
    // The status of rvalue references isn't entirely clear, but it looks like
    // conversion to them is simply invalid.
    // C++ 5.2.11p3: For two pointer types [...]
    Self.Diag(OpRange.getBegin(), diag::err_bad_const_cast_dest)
      << OrigDestType << DestRange;
    return;
  }
  if (DestType->isFunctionPointerType() ||
      DestType->isMemberFunctionPointerType()) {
    // Cannot cast direct function pointers.
    // C++ 5.2.11p2: [...] where T is any object type or the void type [...]
    // T is the ultimate pointee of source and target type.
    Self.Diag(OpRange.getBegin(), diag::err_bad_const_cast_dest)
      << OrigDestType << DestRange;
    return;
  }
  SrcType = Self.Context.getCanonicalType(SrcType);

  // Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
  // completely equal.
  // FIXME: const_cast should probably not be able to convert between pointers
  // to different address spaces.
  // C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
  // in multi-level pointers may change, but the level count must be the same,
  // as must be the final pointee type.
  while (SrcType != DestType &&
         Self.UnwrapSimilarPointerTypes(SrcType, DestType)) {
    SrcType = SrcType.getUnqualifiedType();
    DestType = DestType.getUnqualifiedType();
  }

  // Doug Gregor said to disallow this until users complain.
#if 0
  // If we end up with constant arrays of equal size, unwrap those too. A cast
  // from const int [N] to int (&)[N] is invalid by my reading of the
  // standard, but g++ accepts it even with -ansi -pedantic.
  // No more than one level, though, so don't embed this in the unwrap loop
  // above.
  const ConstantArrayType *SrcTypeArr, *DestTypeArr;
  if ((SrcTypeArr = Self.Context.getAsConstantArrayType(SrcType)) &&
     (DestTypeArr = Self.Context.getAsConstantArrayType(DestType)))
  {
    if (SrcTypeArr->getSize() != DestTypeArr->getSize()) {
      // Different array sizes.
      Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
        << "const_cast" << OrigDestType << OrigSrcType << OpRange;
      return;
    }
    SrcType = SrcTypeArr->getElementType().getUnqualifiedType();
    DestType = DestTypeArr->getElementType().getUnqualifiedType();
  }
#endif

  // Since we're dealing in canonical types, the remainder must be the same.
  if (SrcType != DestType) {
    // Cast between unrelated types.
    Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
      << "const_cast" << OrigDestType << OrigSrcType << OpRange;
    return;
  }
}
Exemple #4
0
void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, 
                           ExplodedNode *Pred, ExplodedNodeSet &Dst) {
  
  ExplodedNodeSet dstPreStmt;
  getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
  
  if (CastE->getCastKind() == CK_LValueToRValue) {
    for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
         I!=E; ++I) {
      ExplodedNode *subExprNode = *I;
      ProgramStateRef state = subExprNode->getState();
      const LocationContext *LCtx = subExprNode->getLocationContext();
      evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
    }
    return;
  }
  
  // All other casts.  
  QualType T = CastE->getType();
  QualType ExTy = Ex->getType();
  
  if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
    T = ExCast->getTypeAsWritten();
  
  StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
  for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
       I != E; ++I) {
    
    Pred = *I;
    ProgramStateRef state = Pred->getState();
    const LocationContext *LCtx = Pred->getLocationContext();

    switch (CastE->getCastKind()) {
      case CK_LValueToRValue:
        llvm_unreachable("LValueToRValue casts handled earlier.");
      case CK_ToVoid:
        continue;
        // The analyzer doesn't do anything special with these casts,
        // since it understands retain/release semantics already.
      case CK_ARCProduceObject:
      case CK_ARCConsumeObject:
      case CK_ARCReclaimReturnedObject:
      case CK_ARCExtendBlockObject: // Fall-through.
      case CK_CopyAndAutoreleaseBlockObject:
        // The analyser can ignore atomic casts for now, although some future
        // checkers may want to make certain that you're not modifying the same
        // value through atomic and nonatomic pointers.
      case CK_AtomicToNonAtomic:
      case CK_NonAtomicToAtomic:
        // True no-ops.
      case CK_NoOp:
      case CK_ConstructorConversion:
      case CK_UserDefinedConversion:
      case CK_FunctionToPointerDecay:
      case CK_BuiltinFnToFnPtr: {
        // Copy the SVal of Ex to CastE.
        ProgramStateRef state = Pred->getState();
        const LocationContext *LCtx = Pred->getLocationContext();
        SVal V = state->getSVal(Ex, LCtx);
        state = state->BindExpr(CastE, LCtx, V);
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
      case CK_MemberPointerToBoolean:
        // FIXME: For now, member pointers are represented by void *.
        // FALLTHROUGH
      case CK_Dependent:
      case CK_ArrayToPointerDecay:
      case CK_BitCast:
      case CK_IntegralCast:
      case CK_NullToPointer:
      case CK_IntegralToPointer:
      case CK_PointerToIntegral:
      case CK_PointerToBoolean:
      case CK_IntegralToBoolean:
      case CK_IntegralToFloating:
      case CK_FloatingToIntegral:
      case CK_FloatingToBoolean:
      case CK_FloatingCast:
      case CK_FloatingRealToComplex:
      case CK_FloatingComplexToReal:
      case CK_FloatingComplexToBoolean:
      case CK_FloatingComplexCast:
      case CK_FloatingComplexToIntegralComplex:
      case CK_IntegralRealToComplex:
      case CK_IntegralComplexToReal:
      case CK_IntegralComplexToBoolean:
      case CK_IntegralComplexCast:
      case CK_IntegralComplexToFloatingComplex:
      case CK_CPointerToObjCPointerCast:
      case CK_BlockPointerToObjCPointerCast:
      case CK_AnyPointerToBlockPointerCast:  
      case CK_ObjCObjectLValueCast: 
      case CK_ZeroToOCLEvent: {
        // Delegate to SValBuilder to process.
        SVal V = state->getSVal(Ex, LCtx);
        V = svalBuilder.evalCast(V, T, ExTy);
        state = state->BindExpr(CastE, LCtx, V);
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
      case CK_DerivedToBase:
      case CK_UncheckedDerivedToBase: {
        // For DerivedToBase cast, delegate to the store manager.
        SVal val = state->getSVal(Ex, LCtx);
        val = getStoreManager().evalDerivedToBase(val, CastE);
        state = state->BindExpr(CastE, LCtx, val);
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
      // Handle C++ dyn_cast.
      case CK_Dynamic: {
        SVal val = state->getSVal(Ex, LCtx);

        // Compute the type of the result.
        QualType resultType = CastE->getType();
        if (CastE->isGLValue())
          resultType = getContext().getPointerType(resultType);

        bool Failed = false;

        // Check if the value being cast evaluates to 0.
        if (val.isZeroConstant())
          Failed = true;
        // Else, evaluate the cast.
        else
          val = getStoreManager().evalDynamicCast(val, T, Failed);

        if (Failed) {
          if (T->isReferenceType()) {
            // A bad_cast exception is thrown if input value is a reference.
            // Currently, we model this, by generating a sink.
            Bldr.generateSink(CastE, Pred, state);
            continue;
          } else {
            // If the cast fails on a pointer, bind to 0.
            state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
          }
        } else {
          // If we don't know if the cast succeeded, conjure a new symbol.
          if (val.isUnknown()) {
            DefinedOrUnknownSVal NewSym =
              svalBuilder.conjureSymbolVal(0, CastE, LCtx, resultType,
                                           currBldrCtx->blockCount());
            state = state->BindExpr(CastE, LCtx, NewSym);
          } else 
            // Else, bind to the derived region value.
            state = state->BindExpr(CastE, LCtx, val);
        }
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
      case CK_NullToMemberPointer: {
        // FIXME: For now, member pointers are represented by void *.
        SVal V = svalBuilder.makeIntValWithPtrWidth(0, true);
        state = state->BindExpr(CastE, LCtx, V);
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
      // Various C++ casts that are not handled yet.
      case CK_ToUnion:
      case CK_BaseToDerived:
      case CK_BaseToDerivedMemberPointer:
      case CK_DerivedToBaseMemberPointer:
      case CK_ReinterpretMemberPointer:
      case CK_VectorSplat:
      case CK_LValueBitCast: {
        // Recover some path-sensitivty by conjuring a new value.
        QualType resultType = CastE->getType();
        if (CastE->isGLValue())
          resultType = getContext().getPointerType(resultType);
        SVal result = svalBuilder.conjureSymbolVal(0, CastE, LCtx,
                                                   resultType,
                                                   currBldrCtx->blockCount());
        state = state->BindExpr(CastE, LCtx, result);
        Bldr.generateNode(CastE, Pred, state);
        continue;
      }
    }
  }
}
static bool isISLObjectRef(QualType Ty) {
  return StringRef(Ty.getAsString()).startswith("isl_");
}
Exemple #6
0
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
                                 llvm::Value *Callee,
                                 const CallArgList &CallArgs,
                                 const Decl *TargetDecl) {
  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  llvm::SmallVector<llvm::Value*, 16> Args;

  // Handle struct-return functions by passing a pointer to the
  // location that we would like to return into.
  QualType RetTy = CallInfo.getReturnType();
  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();


  // If the call returns a temporary with struct return, create a temporary
  // alloca to hold the result.
  if (CGM.ReturnTypeUsesSret(CallInfo))
    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));

  assert(CallInfo.arg_size() == CallArgs.size() &&
         "Mismatch between function signature & arguments.");
  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
       I != E; ++I, ++info_it) {
    const ABIArgInfo &ArgInfo = info_it->info;
    RValue RV = I->first;

    switch (ArgInfo.getKind()) {
    case ABIArgInfo::Indirect:
      if (RV.isScalar() || RV.isComplex()) {
        // Make a temporary alloca to pass the argument.
        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
        if (RV.isScalar())
          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
        else
          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
      } else {
        Args.push_back(RV.getAggregateAddr());
      }
      break;

    case ABIArgInfo::Extend:
    case ABIArgInfo::Direct:
      if (RV.isScalar()) {
        Args.push_back(RV.getScalarVal());
      } else if (RV.isComplex()) {
        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
        Args.push_back(Tmp);
      } else {
        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
      }
      break;

    case ABIArgInfo::Ignore:
      break;

    case ABIArgInfo::Coerce: {
      // FIXME: Avoid the conversion through memory if possible.
      llvm::Value *SrcPtr;
      if (RV.isScalar()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
      } else if (RV.isComplex()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
      } else
        SrcPtr = RV.getAggregateAddr();
      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
                                       *this));
      break;
    }

    case ABIArgInfo::Expand:
      ExpandTypeToArgs(I->second, RV, Args);
      break;
    }
  }

  // If the callee is a bitcast of a function to a varargs pointer to function
  // type, check to see if we can remove the bitcast.  This handles some cases
  // with unprototyped functions.
  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
      const llvm::FunctionType *CurFT =
        cast<llvm::FunctionType>(CurPT->getElementType());
      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();

      if (CE->getOpcode() == llvm::Instruction::BitCast &&
          ActualFT->getReturnType() == CurFT->getReturnType() &&
          ActualFT->getNumParams() == CurFT->getNumParams() &&
          ActualFT->getNumParams() == Args.size()) {
        bool ArgsMatch = true;
        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
            ArgsMatch = false;
            break;
          }

        // Strip the cast if we can get away with it.  This is a nice cleanup,
        // but also allows us to inline the function at -O0 if it is marked
        // always_inline.
        if (ArgsMatch)
          Callee = CalleeF;
      }
    }


  llvm::BasicBlock *InvokeDest = getInvokeDest();
  unsigned CallingConv;
  CodeGen::AttributeListType AttributeList;
  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
                                                   AttributeList.end());

  llvm::CallSite CS;
  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
  } else {
    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
                              Args.data(), Args.data()+Args.size());
    EmitBlock(Cont);
  }

  CS.setAttributes(Attrs);
  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));

  // If the call doesn't return, finish the basic block and clear the
  // insertion point; this allows the rest of IRgen to discard
  // unreachable code.
  if (CS.doesNotReturn()) {
    Builder.CreateUnreachable();
    Builder.ClearInsertionPoint();

    // FIXME: For now, emit a dummy basic block because expr emitters in
    // generally are not ready to handle emitting expressions at unreachable
    // points.
    EnsureInsertPoint();

    // Return a reasonable RValue.
    return GetUndefRValue(RetTy);
  }

  llvm::Instruction *CI = CS.getInstruction();
  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
    CI->setName("call");

  switch (RetAI.getKind()) {
  case ABIArgInfo::Indirect:
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(Args[0]);
    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));

  case ABIArgInfo::Extend:
  case ABIArgInfo::Direct:
    if (RetTy->isAnyComplexType()) {
      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
      return RValue::getComplex(std::make_pair(Real, Imag));
    }
    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
      Builder.CreateStore(CI, V);
      return RValue::getAggregate(V);
    }
    return RValue::get(CI);

  case ABIArgInfo::Ignore:
    // If we are ignoring an argument that had a result, make sure to
    // construct the appropriate return value for our caller.
    return GetUndefRValue(RetTy);

  case ABIArgInfo::Coerce: {
    // FIXME: Avoid the conversion through memory if possible.
    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
    CreateCoercedStore(CI, V, *this);
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(V, false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(V);
    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
  }

  case ABIArgInfo::Expand:
    assert(0 && "Invalid ABI kind for return argument");
  }

  assert(0 && "Unhandled ABIArgInfo::Kind");
  return RValue::get(0);
}
Exemple #7
0
ExprResult
Sema::BuildAnonymousStructUnionMemberReference(SourceLocation loc,
                                               IndirectFieldDecl *indirectField,
                                               Expr *baseObjectExpr,
                                               SourceLocation opLoc) {
  // First, build the expression that refers to the base object.
  
  bool baseObjectIsPointer = false;
  Qualifiers baseQuals;
  
  // Case 1:  the base of the indirect field is not a field.
  VarDecl *baseVariable = indirectField->getVarDecl();
  CXXScopeSpec EmptySS;
  if (baseVariable) {
    assert(baseVariable->getType()->isRecordType());
    
    // In principle we could have a member access expression that
    // accesses an anonymous struct/union that's a static member of
    // the base object's class.  However, under the current standard,
    // static data members cannot be anonymous structs or unions.
    // Supporting this is as easy as building a MemberExpr here.
    assert(!baseObjectExpr && "anonymous struct/union is static data member?");
    
    DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
    
    ExprResult result 
      = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
    if (result.isInvalid()) return ExprError();
    
    baseObjectExpr = result.take();    
    baseObjectIsPointer = false;
    baseQuals = baseObjectExpr->getType().getQualifiers();
    
    // Case 2: the base of the indirect field is a field and the user
    // wrote a member expression.
  } else if (baseObjectExpr) {
    // The caller provided the base object expression. Determine
    // whether its a pointer and whether it adds any qualifiers to the
    // anonymous struct/union fields we're looking into.
    QualType objectType = baseObjectExpr->getType();
    
    if (const PointerType *ptr = objectType->getAs<PointerType>()) {
      baseObjectIsPointer = true;
      objectType = ptr->getPointeeType();
    } else {
      baseObjectIsPointer = false;
    }
    baseQuals = objectType.getQualifiers();
  }  

  // Build the implicit member references to the field of the
  // anonymous struct/union.
  Expr *result = baseObjectExpr;
  IndirectFieldDecl::chain_iterator
  FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
  
  // Build the first member access in the chain with full information.
  if (!baseVariable) {
    FieldDecl *field = cast<FieldDecl>(*FI);
    
    // FIXME: use the real found-decl info!
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    // Make a nameInfo that properly uses the anonymous name.
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    
    result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
                                     EmptySS, field, foundDecl,
                                     memberNameInfo).take();
    baseObjectIsPointer = false;
    
    // FIXME: check qualified member access
  }
  
  // In all cases, we should now skip the first declaration in the chain.
  ++FI;
  
  while (FI != FEnd) {
    FieldDecl *field = cast<FieldDecl>(*FI++);
    
    // FIXME: these are somewhat meaningless
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
                                     EmptySS, field, 
                                     foundDecl, memberNameInfo).take();
  }
  
  return Owned(result);
}
Exemple #8
0
/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
/// function that should return a value.  Check that we don't fall off the end
/// of a noreturn function.  We assume that functions and blocks not marked
/// noreturn will return.
static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
                                    const BlockExpr *blkExpr,
                                    const CheckFallThroughDiagnostics& CD,
                                    AnalysisContext &AC) {

  bool ReturnsVoid = false;
  bool HasNoReturn = false;

  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
    ReturnsVoid = FD->getResultType()->isVoidType();
    HasNoReturn = FD->hasAttr<NoReturnAttr>() ||
       FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
  }
  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
    ReturnsVoid = MD->getResultType()->isVoidType();
    HasNoReturn = MD->hasAttr<NoReturnAttr>();
  }
  else if (isa<BlockDecl>(D)) {
    QualType BlockTy = blkExpr->getType();
    if (const FunctionType *FT =
          BlockTy->getPointeeType()->getAs<FunctionType>()) {
      if (FT->getResultType()->isVoidType())
        ReturnsVoid = true;
      if (FT->getNoReturnAttr())
        HasNoReturn = true;
    }
  }

  Diagnostic &Diags = S.getDiagnostics();

  // Short circuit for compilation speed.
  if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
      return;

  // FIXME: Function try block
  if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
    switch (CheckFallThrough(AC)) {
      case UnknownFallThrough:
        break;

      case MaybeFallThrough:
        if (HasNoReturn)
          S.Diag(Compound->getRBracLoc(),
                 CD.diag_MaybeFallThrough_HasNoReturn);
        else if (!ReturnsVoid)
          S.Diag(Compound->getRBracLoc(),
                 CD.diag_MaybeFallThrough_ReturnsNonVoid);
        break;
      case AlwaysFallThrough:
        if (HasNoReturn)
          S.Diag(Compound->getRBracLoc(),
                 CD.diag_AlwaysFallThrough_HasNoReturn);
        else if (!ReturnsVoid)
          S.Diag(Compound->getRBracLoc(),
                 CD.diag_AlwaysFallThrough_ReturnsNonVoid);
        break;
      case NeverFallThroughOrReturn:
        if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn)
          S.Diag(Compound->getLBracLoc(),
                 CD.diag_NeverFallThroughOrReturn);
        break;
      case NeverFallThrough:
        break;
    }
  }
}
Exemple #9
0
/// CheckFallThrough - Check that we don't fall off the end of a
/// Statement that should return a value.
///
/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
/// MaybeFallThrough iff we might or might not fall off the end,
/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
/// return.  We assume NeverFallThrough iff we never fall off the end of the
/// statement but we may return.  We assume that functions not marked noreturn
/// will return.
static ControlFlowKind CheckFallThrough(AnalysisContext &AC) {
  CFG *cfg = AC.getCFG();
  if (cfg == 0) return UnknownFallThrough;

  // The CFG leaves in dead things, and we don't want the dead code paths to
  // confuse us, so we mark all live things first.
  llvm::BitVector live(cfg->getNumBlockIDs());
  unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(),
                                                          live);

  bool AddEHEdges = AC.getAddEHEdges();
  if (!AddEHEdges && count != cfg->getNumBlockIDs())
    // When there are things remaining dead, and we didn't add EH edges
    // from CallExprs to the catch clauses, we have to go back and
    // mark them as live.
    for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
      CFGBlock &b = **I;
      if (!live[b.getBlockID()]) {
        if (b.pred_begin() == b.pred_end()) {
          if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
            // When not adding EH edges from calls, catch clauses
            // can otherwise seem dead.  Avoid noting them as dead.
            count += reachable_code::ScanReachableFromBlock(b, live);
          continue;
        }
      }
    }

  // Now we know what is live, we check the live precessors of the exit block
  // and look for fall through paths, being careful to ignore normal returns,
  // and exceptional paths.
  bool HasLiveReturn = false;
  bool HasFakeEdge = false;
  bool HasPlainEdge = false;
  bool HasAbnormalEdge = false;

  // Ignore default cases that aren't likely to be reachable because all
  // enums in a switch(X) have explicit case statements.
  CFGBlock::FilterOptions FO;
  FO.IgnoreDefaultsWithCoveredEnums = 1;

  for (CFGBlock::filtered_pred_iterator
	 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
    const CFGBlock& B = **I;
    if (!live[B.getBlockID()])
      continue;

    // Destructors can appear after the 'return' in the CFG.  This is
    // normal.  We need to look pass the destructors for the return
    // statement (if it exists).
    CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
    bool hasNoReturnDtor = false;
    
    for ( ; ri != re ; ++ri) {
      CFGElement CE = *ri;

      // FIXME: The right solution is to just sever the edges in the
      // CFG itself.
      if (const CFGImplicitDtor *iDtor = ri->getAs<CFGImplicitDtor>())
        if (iDtor->isNoReturn(AC.getASTContext())) {
          hasNoReturnDtor = true;
          HasFakeEdge = true;
          break;
        }
      
      if (isa<CFGStmt>(CE))
        break;
    }
    
    if (hasNoReturnDtor)
      continue;
    
    // No more CFGElements in the block?
    if (ri == re) {
      if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
        HasAbnormalEdge = true;
        continue;
      }
      // A labeled empty statement, or the entry block...
      HasPlainEdge = true;
      continue;
    }

    CFGStmt CS = cast<CFGStmt>(*ri);
    Stmt *S = CS.getStmt();
    if (isa<ReturnStmt>(S)) {
      HasLiveReturn = true;
      continue;
    }
    if (isa<ObjCAtThrowStmt>(S)) {
      HasFakeEdge = true;
      continue;
    }
    if (isa<CXXThrowExpr>(S)) {
      HasFakeEdge = true;
      continue;
    }
    if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) {
      if (AS->isMSAsm()) {
        HasFakeEdge = true;
        HasLiveReturn = true;
        continue;
      }
    }
    if (isa<CXXTryStmt>(S)) {
      HasAbnormalEdge = true;
      continue;
    }

    bool NoReturnEdge = false;
    if (CallExpr *C = dyn_cast<CallExpr>(S)) {
      if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
            == B.succ_end()) {
        HasAbnormalEdge = true;
        continue;
      }
      Expr *CEE = C->getCallee()->IgnoreParenCasts();
      QualType calleeType = CEE->getType();
      if (calleeType == AC.getASTContext().BoundMemberTy) {
        calleeType = Expr::findBoundMemberType(CEE);
        assert(!calleeType.isNull() && "analyzing unresolved call?");
      }
      if (getFunctionExtInfo(calleeType).getNoReturn()) {
        NoReturnEdge = true;
        HasFakeEdge = true;
      } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
        ValueDecl *VD = DRE->getDecl();
        if (VD->hasAttr<NoReturnAttr>()) {
          NoReturnEdge = true;
          HasFakeEdge = true;
        }
      }
    }
    // FIXME: Add noreturn message sends.
    if (NoReturnEdge == false)
      HasPlainEdge = true;
  }
  if (!HasPlainEdge) {
    if (HasLiveReturn)
      return NeverFallThrough;
    return NeverFallThroughOrReturn;
  }
  if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
    return MaybeFallThrough;
  // This says AlwaysFallThrough for calls to functions that are not marked
  // noreturn, that don't return.  If people would like this warning to be more
  // accurate, such functions should be marked as noreturn.
  return AlwaysFallThrough;
}
void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
        CheckerContext &C) const {
    const ProgramState *state = C.getState();
    const FunctionDecl *FD = C.getCalleeDecl(CE);
    if (!FD)
        return;

    ASTContext &Ctx = C.getASTContext();
    if (!II)
        II = &Ctx.Idents.get("CFNumberCreate");

    if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
        return;

    // Get the value of the "theType" argument.
    SVal TheTypeVal = state->getSVal(CE->getArg(1));

    // FIXME: We really should allow ranges of valid theType values, and
    //   bifurcate the state appropriately.
    nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
    if (!V)
        return;

    uint64_t NumberKind = V->getValue().getLimitedValue();
    Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);

    // FIXME: In some cases we can emit an error.
    if (!TargetSize.isKnown())
        return;

    // Look at the value of the integer being passed by reference.  Essentially
    // we want to catch cases where the value passed in is not equal to the
    // size of the type being created.
    SVal TheValueExpr = state->getSVal(CE->getArg(2));

    // FIXME: Eventually we should handle arbitrary locations.  We can do this
    //  by having an enhanced memory model that does low-level typing.
    loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
    if (!LV)
        return;

    const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
    if (!R)
        return;

    QualType T = Ctx.getCanonicalType(R->getValueType());

    // FIXME: If the pointee isn't an integer type, should we flag a warning?
    //  People can do weird stuff with pointers.

    if (!T->isIntegerType())
        return;

    uint64_t SourceSize = Ctx.getTypeSize(T);

    // CHECK: is SourceSize == TargetSize
    if (SourceSize == TargetSize)
        return;

    // Generate an error.  Only generate a sink if 'SourceSize < TargetSize';
    // otherwise generate a regular node.
    //
    // FIXME: We can actually create an abstract "CFNumber" object that has
    //  the bits initialized to the provided values.
    //
    if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
                          : C.addTransition()) {
        llvm::SmallString<128> sbuf;
        llvm::raw_svector_ostream os(sbuf);

        os << (SourceSize == 8 ? "An " : "A ")
           << SourceSize << " bit integer is used to initialize a CFNumber "
           "object that represents "
           << (TargetSize == 8 ? "an " : "a ")
           << TargetSize << " bit integer. ";

        if (SourceSize < TargetSize)
            os << (TargetSize - SourceSize)
               << " bits of the CFNumber value will be garbage." ;
        else
            os << (SourceSize - TargetSize)
               << " bits of the input integer will be lost.";

        if (!BT)
            BT.reset(new APIMisuse("Bad use of CFNumberCreate"));

        BugReport *report = new BugReport(*BT, os.str(), N);
        report->addRange(CE->getArg(2)->getSourceRange());
        C.EmitReport(report);
    }
}
void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
        CheckerContext &C) const {
    if (!BT) {
        BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
                               "Objective-C pointer types"));

        ASTContext &Ctx = C.getASTContext();
        arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
        dictionaryWithObjectsAndKeysS =
            GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
        setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);

        initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
        initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
    }

    if (!isVariadicMessage(msg))
        return;

    // We are not interested in the selector arguments since they have
    // well-defined types, so the compiler will issue a warning for them.
    unsigned variadicArgsBegin = msg.getSelector().getNumArgs();

    // We're not interested in the last argument since it has to be nil or the
    // compiler would have issued a warning for it elsewhere.
    unsigned variadicArgsEnd = msg.getNumArgs() - 1;

    if (variadicArgsEnd <= variadicArgsBegin)
        return;

    // Verify that all arguments have Objective-C types.
    llvm::Optional<ExplodedNode*> errorNode;
    const ProgramState *state = C.getState();

    for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
        QualType ArgTy = msg.getArgType(I);
        if (ArgTy->isObjCObjectPointerType())
            continue;

        // Block pointers are treaded as Objective-C pointers.
        if (ArgTy->isBlockPointerType())
            continue;

        // Ignore pointer constants.
        if (isa<loc::ConcreteInt>(msg.getArgSVal(I, state)))
            continue;

        // Ignore pointer types annotated with 'NSObject' attribute.
        if (C.getASTContext().isObjCNSObjectType(ArgTy))
            continue;

        // Ignore CF references, which can be toll-free bridged.
        if (coreFoundation::isCFObjectRef(ArgTy))
            continue;

        // Generate only one error node to use for all bug reports.
        if (!errorNode.hasValue()) {
            errorNode = C.addTransition();
        }

        if (!errorNode.getValue())
            continue;

        llvm::SmallString<128> sbuf;
        llvm::raw_svector_ostream os(sbuf);

        if (const char *TypeName = GetReceiverNameType(msg))
            os << "Argument to '" << TypeName << "' method '";
        else
            os << "Argument to method '";

        os << msg.getSelector().getAsString()
           << "' should be an Objective-C pointer type, not '"
           << ArgTy.getAsString() << "'";

        BugReport *R = new BugReport(*BT, os.str(),
                                     errorNode.getValue());
        R->addRange(msg.getArgSourceRange(I));
        C.EmitReport(R);
    }
}
Exemple #12
0
ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
  ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
  if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
    return family;

  // Check for an explicit attribute.
  if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) {
    // The unfortunate necessity of mapping between enums here is due
    // to the attributes framework.
    switch (attr->getFamily()) {
    case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break;
    case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break;
    case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break;
    case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break;
    case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
    case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
    }
    Family = static_cast<unsigned>(family);
    return family;
  }

  family = getSelector().getMethodFamily();
  switch (family) {
  case OMF_None: break;

  // init only has a conventional meaning for an instance method, and
  // it has to return an object.
  case OMF_init:
    if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType())
      family = OMF_None;
    break;

  // alloc/copy/new have a conventional meaning for both class and
  // instance methods, but they require an object return.
  case OMF_alloc:
  case OMF_copy:
  case OMF_mutableCopy:
  case OMF_new:
    if (!getResultType()->isObjCObjectPointerType())
      family = OMF_None;
    break;

  // These selectors have a conventional meaning only for instance methods.
  case OMF_dealloc:
  case OMF_finalize:
  case OMF_retain:
  case OMF_release:
  case OMF_autorelease:
  case OMF_retainCount:
  case OMF_self:
    if (!isInstanceMethod())
      family = OMF_None;
    break;
      
  case OMF_performSelector:
    if (!isInstanceMethod() ||
        !getResultType()->isObjCIdType())
      family = OMF_None;
    else {
      unsigned noParams = param_size();
      if (noParams < 1 || noParams > 3)
        family = OMF_None;
      else {
        ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
        QualType ArgT = (*it);
        if (!ArgT->isObjCSelType()) {
          family = OMF_None;
          break;
        }
        while (--noParams) {
          it++;
          ArgT = (*it);
          if (!ArgT->isObjCIdType()) {
            family = OMF_None;
            break;
          }
        }
      }
    }
    break;
      
  }

  // Cache the result.
  Family = static_cast<unsigned>(family);
  return family;
}
Exemple #13
0
bool ArgType::matchesType(ASTContext &C, QualType argTy) const {
  if (Ptr) {
    // It has to be a pointer.
    const PointerType *PT = argTy->getAs<PointerType>();
    if (!PT)
      return false;

    // We cannot write through a const qualified pointer.
    if (PT->getPointeeType().isConstQualified())
      return false;

    argTy = PT->getPointeeType();
  }

  switch (K) {
    case InvalidTy:
      llvm_unreachable("ArgType must be valid");

    case UnknownTy:
      return true;
      
    case AnyCharTy: {
      if (const EnumType *ETy = argTy->getAs<EnumType>())
        argTy = ETy->getDecl()->getIntegerType();

      if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
        switch (BT->getKind()) {
          default:
            break;
          case BuiltinType::Char_S:
          case BuiltinType::SChar:
          case BuiltinType::UChar:
          case BuiltinType::Char_U:
            return true;            
        }
      return false;
    }
      
    case SpecificTy: {
      if (const EnumType *ETy = argTy->getAs<EnumType>())
        argTy = ETy->getDecl()->getIntegerType();
      argTy = C.getCanonicalType(argTy).getUnqualifiedType();

      if (T == argTy)
        return true;
      // Check for "compatible types".
      if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
        switch (BT->getKind()) {
          default:
            break;
          case BuiltinType::Char_S:
          case BuiltinType::SChar:
          case BuiltinType::Char_U:
          case BuiltinType::UChar:                    
            return T == C.UnsignedCharTy || T == C.SignedCharTy;
          case BuiltinType::Short:
            return T == C.UnsignedShortTy;
          case BuiltinType::UShort:
            return T == C.ShortTy;
          case BuiltinType::Int:
            return T == C.UnsignedIntTy;
          case BuiltinType::UInt:
            return T == C.IntTy;
          case BuiltinType::Long:
            return T == C.UnsignedLongTy;
          case BuiltinType::ULong:
            return T == C.LongTy;
          case BuiltinType::LongLong:
            return T == C.UnsignedLongLongTy;
          case BuiltinType::ULongLong:
            return T == C.LongLongTy;
        }
      return false;
    }

    case CStrTy: {
      const PointerType *PT = argTy->getAs<PointerType>();
      if (!PT)
        return false;
      QualType pointeeTy = PT->getPointeeType();
      if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
        switch (BT->getKind()) {
          case BuiltinType::Void:
          case BuiltinType::Char_U:
          case BuiltinType::UChar:
          case BuiltinType::Char_S:
          case BuiltinType::SChar:
            return true;
          default:
            break;
        }

      return false;
    }

    case WCStrTy: {
      const PointerType *PT = argTy->getAs<PointerType>();
      if (!PT)
        return false;
      QualType pointeeTy =
        C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
      return pointeeTy == C.getWCharType();
    }
    
    case WIntTy: {
      
      QualType PromoArg = 
        argTy->isPromotableIntegerType()
          ? C.getPromotedIntegerType(argTy) : argTy;
      
      QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
      PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
      
      // If the promoted argument is the corresponding signed type of the
      // wint_t type, then it should match.
      if (PromoArg->hasSignedIntegerRepresentation() &&
          C.getCorrespondingUnsignedType(PromoArg) == WInt)
        return true;

      return WInt == PromoArg;
    }

    case CPointerTy:
      return argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
             argTy->isBlockPointerType() || argTy->isNullPtrType();

    case ObjCPointerTy: {
      if (argTy->getAs<ObjCObjectPointerType>() ||
          argTy->getAs<BlockPointerType>())
        return true;
      
      // Handle implicit toll-free bridging.
      if (const PointerType *PT = argTy->getAs<PointerType>()) {
        // Things such as CFTypeRef are really just opaque pointers
        // to C structs representing CF types that can often be bridged
        // to Objective-C objects.  Since the compiler doesn't know which
        // structs can be toll-free bridged, we just accept them all.
        QualType pointee = PT->getPointeeType();
        if (pointee->getAsStructureType() || pointee->isVoidType())
          return true;
      }
      return false;      
    }
  }

  llvm_unreachable("Invalid ArgType Kind!");
}
Exemple #14
0
SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {

  for (;;) {

    switch (E->getStmtClass()) {

      case Stmt::AddrLabelExprClass:
        return ValMgr.makeLoc(cast<AddrLabelExpr>(E));

        // ParenExprs are no-ops.

      case Stmt::ParenExprClass:
        E = cast<ParenExpr>(E)->getSubExpr();
        continue;

      case Stmt::CharacterLiteralClass: {
        const CharacterLiteral* C = cast<CharacterLiteral>(E);
        return ValMgr.makeIntVal(C->getValue(), C->getType());
      }

      case Stmt::CXXBoolLiteralExprClass: {
        const SVal *X = ExprBindings.lookup(E);
        if (X) 
          return *X;
        else 
          return ValMgr.makeIntVal(cast<CXXBoolLiteralExpr>(E));
      }
      case Stmt::IntegerLiteralClass: {
        // In C++, this expression may have been bound to a temporary object.
        SVal const *X = ExprBindings.lookup(E);
        if (X)
          return *X;
        else
          return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
      }

      // Casts where the source and target type are the same
      // are no-ops.  We blast through these to get the descendant
      // subexpression that has a value.

      case Stmt::ImplicitCastExprClass:
      case Stmt::CStyleCastExprClass: {
        const CastExpr* C = cast<CastExpr>(E);
        QualType CT = C->getType();

        if (CT->isVoidType())
          return UnknownVal();

        break;
      }

        // Handle all other Stmt* using a lookup.

      default:
        break;
    };

    break;
  }

  return LookupExpr(E);
}
Exemple #15
0
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
                                           const Decl *TargetDecl,
                                           AttributeListType &PAL, 
                                           unsigned &CallingConv) {
  unsigned FuncAttrs = 0;
  unsigned RetAttrs = 0;

  CallingConv = FI.getEffectiveCallingConvention();

  // FIXME: handle sseregparm someday...
  if (TargetDecl) {
    if (TargetDecl->hasAttr<NoThrowAttr>())
      FuncAttrs |= llvm::Attribute::NoUnwind;
    if (TargetDecl->hasAttr<NoReturnAttr>())
      FuncAttrs |= llvm::Attribute::NoReturn;
    if (TargetDecl->hasAttr<ConstAttr>())
      FuncAttrs |= llvm::Attribute::ReadNone;
    else if (TargetDecl->hasAttr<PureAttr>())
      FuncAttrs |= llvm::Attribute::ReadOnly;
    if (TargetDecl->hasAttr<MallocAttr>())
      RetAttrs |= llvm::Attribute::NoAlias;
  }

  if (CodeGenOpts.OptimizeSize)
    FuncAttrs |= llvm::Attribute::OptimizeForSize;
  if (CodeGenOpts.DisableRedZone)
    FuncAttrs |= llvm::Attribute::NoRedZone;
  if (CodeGenOpts.NoImplicitFloat)
    FuncAttrs |= llvm::Attribute::NoImplicitFloat;

  QualType RetTy = FI.getReturnType();
  unsigned Index = 1;
  const ABIArgInfo &RetAI = FI.getReturnInfo();
  switch (RetAI.getKind()) {
  case ABIArgInfo::Extend:
   if (RetTy->isSignedIntegerType()) {
     RetAttrs |= llvm::Attribute::SExt;
   } else if (RetTy->isUnsignedIntegerType()) {
     RetAttrs |= llvm::Attribute::ZExt;
   }
   // FALLTHROUGH
  case ABIArgInfo::Direct:
    break;

  case ABIArgInfo::Indirect:
    PAL.push_back(llvm::AttributeWithIndex::get(Index,
                                                llvm::Attribute::StructRet |
                                                llvm::Attribute::NoAlias));
    ++Index;
    // sret disables readnone and readonly
    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
                   llvm::Attribute::ReadNone);
    break;

  case ABIArgInfo::Ignore:
  case ABIArgInfo::Coerce:
    break;

  case ABIArgInfo::Expand:
    assert(0 && "Invalid ABI kind for return argument");
  }

  if (RetAttrs)
    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));

  // FIXME: we need to honour command line settings also...
  // FIXME: RegParm should be reduced in case of nested functions and/or global
  // register variable.
  signed RegParm = 0;
  if (TargetDecl)
    if (const RegparmAttr *RegParmAttr
          = TargetDecl->getAttr<RegparmAttr>())
      RegParm = RegParmAttr->getNumParams();

  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
         ie = FI.arg_end(); it != ie; ++it) {
    QualType ParamType = it->type;
    const ABIArgInfo &AI = it->info;
    unsigned Attributes = 0;

    switch (AI.getKind()) {
    case ABIArgInfo::Coerce:
      break;

    case ABIArgInfo::Indirect:
      if (AI.getIndirectByVal())
        Attributes |= llvm::Attribute::ByVal;

      Attributes |=
        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
      // byval disables readnone and readonly.
      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
                     llvm::Attribute::ReadNone);
      break;

    case ABIArgInfo::Extend:
     if (ParamType->isSignedIntegerType()) {
       Attributes |= llvm::Attribute::SExt;
     } else if (ParamType->isUnsignedIntegerType()) {
       Attributes |= llvm::Attribute::ZExt;
     }
     // FALLS THROUGH
    case ABIArgInfo::Direct:
      if (RegParm > 0 &&
          (ParamType->isIntegerType() || ParamType->isPointerType())) {
        RegParm -=
          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
        if (RegParm >= 0)
          Attributes |= llvm::Attribute::InReg;
      }
      // FIXME: handle sseregparm someday...
      break;

    case ABIArgInfo::Ignore:
      // Skip increment, no matching LLVM parameter.
      continue;

    case ABIArgInfo::Expand: {
      std::vector<const llvm::Type*> Tys;
      // FIXME: This is rather inefficient. Do we ever actually need to do
      // anything here? The result should be just reconstructed on the other
      // side, so extension should be a non-issue.
      getTypes().GetExpandedTypes(ParamType, Tys);
      Index += Tys.size();
      continue;
    }
    }

    if (Attributes)
      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
    ++Index;
  }
  if (FuncAttrs)
    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
}
const ProgramState *SimpleConstraintManager::assumeSymRel(const ProgramState *state,
                                                     const SymExpr *LHS,
                                                     BinaryOperator::Opcode op,
                                                     const llvm::APSInt& Int) {
  assert(BinaryOperator::isComparisonOp(op) &&
         "Non-comparison ops should be rewritten as comparisons to zero.");

   // We only handle simple comparisons of the form "$sym == constant"
   // or "($sym+constant1) == constant2".
   // The adjustment is "constant1" in the above expression. It's used to
   // "slide" the solution range around for modular arithmetic. For example,
   // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
   // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
   // the subclasses of SimpleConstraintManager to handle the adjustment.
   llvm::APSInt Adjustment;

  // First check if the LHS is a simple symbol reference.
  SymbolRef Sym = dyn_cast<SymbolData>(LHS);
  if (Sym) {
    Adjustment = 0;
  } else {
    // Next, see if it's a "($sym+constant1)" expression.
    const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);

    // We don't handle "($sym1+$sym2)".
    // Give up and assume the constraint is feasible.
    if (!SE)
      return state;

    // We don't handle "(<expr>+constant1)".
    // Give up and assume the constraint is feasible.
    Sym = dyn_cast<SymbolData>(SE->getLHS());
    if (!Sym)
      return state;

    // Get the constant out of the expression "($sym+constant1)".
    switch (SE->getOpcode()) {
    case BO_Add:
      Adjustment = SE->getRHS();
      break;
    case BO_Sub:
      Adjustment = -SE->getRHS();
      break;
    default:
      // We don't handle non-additive operators.
      // Give up and assume the constraint is feasible.
      return state;
    }
  }

  // FIXME: This next section is a hack. It silently converts the integers to
  // be of the same type as the symbol, which is not always correct. Really the
  // comparisons should be performed using the Int's type, then mapped back to
  // the symbol's range of values.
  ProgramStateManager &StateMgr = state->getStateManager();
  ASTContext &Ctx = StateMgr.getContext();

  QualType T = Sym->getType(Ctx);
  assert(T->isIntegerType() || Loc::isLocType(T));
  unsigned bitwidth = Ctx.getTypeSize(T);
  bool isSymUnsigned 
    = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);

  // Convert the adjustment.
  Adjustment.setIsUnsigned(isSymUnsigned);
  Adjustment = Adjustment.extOrTrunc(bitwidth);

  // Convert the right-hand side integer.
  llvm::APSInt ConvertedInt(Int, isSymUnsigned);
  ConvertedInt = ConvertedInt.extOrTrunc(bitwidth);

  switch (op) {
  default:
    // No logic yet for other operators.  assume the constraint is feasible.
    return state;

  case BO_EQ:
    return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);

  case BO_NE:
    return assumeSymNE(state, Sym, ConvertedInt, Adjustment);

  case BO_GT:
    return assumeSymGT(state, Sym, ConvertedInt, Adjustment);

  case BO_GE:
    return assumeSymGE(state, Sym, ConvertedInt, Adjustment);

  case BO_LT:
    return assumeSymLT(state, Sym, ConvertedInt, Adjustment);

  case BO_LE:
    return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
  } // end switch
}
Exemple #17
0
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
  if (ArgType->isReferenceType())
    return EmitReferenceBindingToExpr(E, ArgType);

  return EmitAnyExprToTemp(E);
}
Exemple #18
0
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
                                      CheckerContext &C) const {
  const Decl *FD = Call.getDecl();
  if (!FD)
    return;

  const NonNullAttr *Att = FD->getAttr<NonNullAttr>();

  ProgramStateRef state = C.getState();

  CallEvent::param_type_iterator TyI = Call.param_type_begin(),
                                 TyE = Call.param_type_end();

  for (unsigned idx = 0, count = Call.getNumArgs(); idx != count; ++idx){

    // Check if the parameter is a reference. We want to report when reference
    // to a null pointer is passed as a paramter.
    bool haveRefTypeParam = false;
    if (TyI != TyE) {
      haveRefTypeParam = (*TyI)->isReferenceType();
      TyI++;
    }

    bool haveAttrNonNull = Att && Att->isNonNull(idx);

    if (!haveRefTypeParam && !haveAttrNonNull)
      continue;

    // If the value is unknown or undefined, we can't perform this check.
    const Expr *ArgE = Call.getArgExpr(idx);
    SVal V = Call.getArgSVal(idx);
    Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
    if (!DV)
      continue;

    // Process the case when the argument is not a location.
    assert(!haveRefTypeParam || DV->getAs<Loc>());

    if (haveAttrNonNull && !DV->getAs<Loc>()) {
      // If the argument is a union type, we want to handle a potential
      // transparent_union GCC extension.
      if (!ArgE)
        continue;

      QualType T = ArgE->getType();
      const RecordType *UT = T->getAsUnionType();
      if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
        continue;

      if (Optional<nonloc::CompoundVal> CSV =
              DV->getAs<nonloc::CompoundVal>()) {
        nonloc::CompoundVal::iterator CSV_I = CSV->begin();
        assert(CSV_I != CSV->end());
        V = *CSV_I;
        DV = V.getAs<DefinedSVal>();
        assert(++CSV_I == CSV->end());
        if (!DV)
          continue;
        // Retrieve the corresponding expression.
        if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
          if (const InitListExpr *IE =
                dyn_cast<InitListExpr>(CE->getInitializer()))
             ArgE = dyn_cast<Expr>(*(IE->begin()));

      } else {
        // FIXME: Handle LazyCompoundVals?
        continue;
      }
    }

    ConstraintManager &CM = C.getConstraintManager();
    ProgramStateRef stateNotNull, stateNull;
    llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);

    if (stateNull && !stateNotNull) {
      // Generate an error node.  Check for a null node in case
      // we cache out.
      if (ExplodedNode *errorNode = C.generateSink(stateNull)) {

        BugReport *R = 0;
        if (haveAttrNonNull)
          R = genReportNullAttrNonNull(errorNode, ArgE);
        else if (haveRefTypeParam)
          R = genReportReferenceToNullPointer(errorNode, ArgE);

        // Highlight the range of the argument that was null.
        R->addRange(Call.getArgSourceRange(idx));

        // Emit the bug report.
        C.emitReport(R);
      }

      // Always return.  Either we cached out or we just emitted an error.
      return;
    }

    // If a pointer value passed the check we should assume that it is
    // indeed not null from this point forward.
    assert(stateNotNull);
    state = stateNotNull;
  }

  // If we reach here all of the arguments passed the nonnull check.
  // If 'state' has been updated generated a new node.
  C.addTransition(state);
}
Exemple #19
0
static bool isRecordType(QualType T) {
  return T->isRecordType();
}
void AttrNonNullChecker::checkPreCall(const CallEvent &Call,
                                      CheckerContext &C) const {
  const Decl *FD = Call.getDecl();
  if (!FD)
    return;

  const NonNullAttr *Att = FD->getAttr<NonNullAttr>();
  if (!Att)
    return;

  ProgramStateRef state = C.getState();

  // Iterate through the arguments of CE and check them for null.
  for (unsigned idx = 0, count = Call.getNumArgs(); idx != count; ++idx) {
    if (!Att->isNonNull(idx))
      continue;

    SVal V = Call.getArgSVal(idx);
    Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();

    // If the value is unknown or undefined, we can't perform this check.
    if (!DV)
      continue;

    if (!DV->getAs<Loc>()) {
      // If the argument is a union type, we want to handle a potential
      // transparent_union GCC extension.
      const Expr *ArgE = Call.getArgExpr(idx);
      if (!ArgE)
        continue;

      QualType T = ArgE->getType();
      const RecordType *UT = T->getAsUnionType();
      if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
        continue;

      if (Optional<nonloc::CompoundVal> CSV =
              DV->getAs<nonloc::CompoundVal>()) {
        nonloc::CompoundVal::iterator CSV_I = CSV->begin();
        assert(CSV_I != CSV->end());
        V = *CSV_I;
        DV = V.getAs<DefinedSVal>();
        assert(++CSV_I == CSV->end());
        if (!DV)
          continue;        
      } else {
        // FIXME: Handle LazyCompoundVals?
        continue;
      }
    }

    ConstraintManager &CM = C.getConstraintManager();
    ProgramStateRef stateNotNull, stateNull;
    llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);

    if (stateNull && !stateNotNull) {
      // Generate an error node.  Check for a null node in case
      // we cache out.
      if (ExplodedNode *errorNode = C.generateSink(stateNull)) {

        // Lazily allocate the BugType object if it hasn't already been
        // created. Ownership is transferred to the BugReporter object once
        // the BugReport is passed to 'EmitWarning'.
        if (!BT)
          BT.reset(new BugType("Argument with 'nonnull' attribute passed null",
                               "API"));

        BugReport *R =
          new BugReport(*BT, "Null pointer passed as an argument to a "
                             "'nonnull' parameter", errorNode);

        // Highlight the range of the argument that was null.
        R->addRange(Call.getArgSourceRange(idx));
        if (const Expr *ArgE = Call.getArgExpr(idx))
          bugreporter::trackNullOrUndefValue(errorNode, ArgE, *R);
        // Emit the bug report.
        C.emitReport(R);
      }

      // Always return.  Either we cached out or we just emitted an error.
      return;
    }

    // If a pointer value passed the check we should assume that it is
    // indeed not null from this point forward.
    assert(stateNotNull);
    state = stateNotNull;
  }

  // If we reach here all of the arguments passed the nonnull check.
  // If 'state' has been updated generated a new node.
  C.addTransition(state);
}
Exemple #21
0
/// \brief Returns the alignment of the type source info data block.
unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) {
  if (Ty.isNull()) return 1;
  return TypeAligner().Visit(TypeLoc(Ty, nullptr));
}
Exemple #22
0
/// Create a fake body for dispatch_once.
static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
  // Check if we have at least two parameters.
  if (D->param_size() != 2)
    return 0;

  // Check if the first parameter is a pointer to integer type.
  const ParmVarDecl *Predicate = D->getParamDecl(0);
  QualType PredicateQPtrTy = Predicate->getType();
  const PointerType *PredicatePtrTy = PredicateQPtrTy->getAs<PointerType>();
  if (!PredicatePtrTy)
    return 0;
  QualType PredicateTy = PredicatePtrTy->getPointeeType();
  if (!PredicateTy->isIntegerType())
    return 0;
  
  // Check if the second parameter is the proper block type.
  const ParmVarDecl *Block = D->getParamDecl(1);
  QualType Ty = Block->getType();
  if (!isDispatchBlock(Ty))
    return 0;
  
  // Everything checks out.  Create a fakse body that checks the predicate,
  // sets it, and calls the block.  Basically, an AST dump of:
  //
  // void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) {
  //  if (!*predicate) {
  //    *predicate = 1;
  //    block();
  //  }
  // }
  
  ASTMaker M(C);
  
  // (1) Create the call.
  DeclRefExpr *DR = M.makeDeclRefExpr(Block);
  ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
  CallExpr *CE = new (C) CallExpr(C, ICE, ArrayRef<Expr*>(), C.VoidTy,
                                  VK_RValue, SourceLocation());

  // (2) Create the assignment to the predicate.
  IntegerLiteral *IL =
    IntegerLiteral::Create(C, llvm::APInt(C.getTypeSize(C.IntTy), (uint64_t) 1),
                           C.IntTy, SourceLocation());
  BinaryOperator *B =
    M.makeAssignment(
       M.makeDereference(
          M.makeLvalueToRvalue(
            M.makeDeclRefExpr(Predicate), PredicateQPtrTy),
            PredicateTy),
       M.makeIntegralCast(IL, PredicateTy),
       PredicateTy);
  
  // (3) Create the compound statement.
  Stmt *Stmts[2];
  Stmts[0] = B;
  Stmts[1] = CE;  
  CompoundStmt *CS = new (C) CompoundStmt(C, Stmts, 2, SourceLocation(),
                                          SourceLocation());
  
  // (4) Create the 'if' condition.
  ImplicitCastExpr *LValToRval =
    M.makeLvalueToRvalue(
      M.makeDereference(
        M.makeLvalueToRvalue(
          M.makeDeclRefExpr(Predicate),
          PredicateQPtrTy),
        PredicateTy),
    PredicateTy);
  
  UnaryOperator *UO = new (C) UnaryOperator(LValToRval, UO_LNot, C.IntTy,
                                           VK_RValue, OK_Ordinary,
                                           SourceLocation());
  
  // (5) Create the 'if' statement.
  IfStmt *If = new (C) IfStmt(C, SourceLocation(), 0, UO, CS);
  return If;
}
static bool isOSObjectPtr(QualType QT) {
  return isOSObjectSubclass(QT->getPointeeCXXRecordDecl());
}
Exemple #24
0
RegionOffset MemRegion::getAsOffset() const {
  const MemRegion *R = this;
  int64_t Offset = 0;

  while (1) {
    switch (R->getKind()) {
    default:
      return RegionOffset();
    case SymbolicRegionKind:
    case AllocaRegionKind:
    case CompoundLiteralRegionKind:
    case CXXThisRegionKind:
    case StringRegionKind:
    case VarRegionKind:
    case ObjCIvarRegionKind:
    case CXXTempObjectRegionKind:
      goto Finish;
    case CXXBaseObjectRegionKind: {
      const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
      R = BOR->getSuperRegion();

      QualType Ty;
      if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
        Ty = TVR->getDesugaredValueType(getContext());
      } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
        // If our base region is symbolic, we don't know what type it really is.
        // Pretend the type of the symbol is the true dynamic type.
        // (This will at least be self-consistent for the life of the symbol.)
        Ty = SR->getSymbol()->getType(getContext())->getPointeeType();
      }
      
      const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
      if (!Child) {
        // We cannot compute the offset of the base class.
        return RegionOffset();
      }
      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);

      CharUnits BaseOffset;
      const CXXRecordDecl *Base = BOR->getDecl();
      if (Child->isVirtuallyDerivedFrom(Base))
        BaseOffset = Layout.getVBaseClassOffset(Base);
      else
        BaseOffset = Layout.getBaseClassOffset(Base);

      // The base offset is in chars, not in bits.
      Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
      break;
    }
    case ElementRegionKind: {
      const ElementRegion *ER = cast<ElementRegion>(R);
      QualType EleTy = ER->getValueType();

      if (!IsCompleteType(getContext(), EleTy))
        return RegionOffset();

      SVal Index = ER->getIndex();
      if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) {
        int64_t i = CI->getValue().getSExtValue();
        // This type size is in bits.
        Offset += i * getContext().getTypeSize(EleTy);
      } else {
        // We cannot compute offset for non-concrete index.
        return RegionOffset();
      }
      R = ER->getSuperRegion();
      break;
    }
    case FieldRegionKind: {
      const FieldRegion *FR = cast<FieldRegion>(R);
      const RecordDecl *RD = FR->getDecl()->getParent();
      if (!RD->isCompleteDefinition())
        // We cannot compute offset for incomplete type.
        return RegionOffset();
      // Get the field number.
      unsigned idx = 0;
      for (RecordDecl::field_iterator FI = RD->field_begin(), 
             FE = RD->field_end(); FI != FE; ++FI, ++idx)
        if (FR->getDecl() == *FI)
          break;

      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
      // This is offset in bits.
      Offset += Layout.getFieldOffset(idx);
      R = FR->getSuperRegion();
      break;
    }
    }
  }

 Finish:
  return RegionOffset(R, Offset);
}
Optional<RetainSummaryManager::BehaviorSummary>
RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
                              bool &hasTrustedImplementationAnnotation) {

  IdentifierInfo *II = FD->getIdentifier();
  if (!II)
    return None;

  StringRef FName = II->getName();
  FName = FName.substr(FName.find_first_not_of('_'));

  QualType ResultTy = CE->getCallReturnType(Ctx);
  if (ResultTy->isObjCIdType()) {
    if (II->isStr("NSMakeCollectable"))
      return BehaviorSummary::Identity;
  } else if (ResultTy->isPointerType()) {
    // Handle: (CF|CG|CV)Retain
    //         CFAutorelease
    // It's okay to be a little sloppy here.
    if (FName == "CMBufferQueueDequeueAndRetain" ||
        FName == "CMBufferQueueDequeueIfDataReadyAndRetain") {
      // Part of: <rdar://problem/39390714>.
      // These are not retain. They just return something and retain it.
      return None;
    }
    if (CE->getNumArgs() == 1 &&
        (cocoa::isRefType(ResultTy, "CF", FName) ||
         cocoa::isRefType(ResultTy, "CG", FName) ||
         cocoa::isRefType(ResultTy, "CV", FName)) &&
        (isRetain(FD, FName) || isAutorelease(FD, FName) ||
         isMakeCollectable(FName)))
      return BehaviorSummary::Identity;

    // safeMetaCast is called by OSDynamicCast.
    // We assume that OSDynamicCast is either an identity (cast is OK,
    // the input was non-zero),
    // or that it returns zero (when the cast failed, or the input
    // was zero).
    if (TrackOSObjects) {
      if (isOSObjectDynamicCast(FName) && FD->param_size() >= 1) {
        return BehaviorSummary::IdentityOrZero;
      } else if (isOSObjectThisCast(FName) && isa<CXXMethodDecl>(FD) &&
                 !cast<CXXMethodDecl>(FD)->isStatic()) {
        return BehaviorSummary::IdentityThis;
      }
    }

    const FunctionDecl* FDD = FD->getDefinition();
    if (FDD && isTrustedReferenceCountImplementation(FDD)) {
      hasTrustedImplementationAnnotation = true;
      return BehaviorSummary::Identity;
    }
  }

  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
    const CXXRecordDecl *Parent = MD->getParent();
    if (TrackOSObjects && Parent && isOSObjectSubclass(Parent))
      if (FName == "release" || FName == "retain")
        return BehaviorSummary::NoOp;
  }

  return None;
}
Exemple #26
0
const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
                                                const LocationContext *LC) {
  const MemRegion *sReg = 0;

  if (D->hasGlobalStorage() && !D->isStaticLocal()) {

    // First handle the globals defined in system headers.
    if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
      // Whitelist the system globals which often DO GET modified, assume the
      // rest are immutable.
      if (D->getName().find("errno") != StringRef::npos)
        sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
      else
        sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);

    // Treat other globals as GlobalInternal unless they are constants.
    } else {
      QualType GQT = D->getType();
      const Type *GT = GQT.getTypePtrOrNull();
      // TODO: We could walk the complex types here and see if everything is
      // constified.
      if (GT && GQT.isConstQualified() && GT->isArithmeticType())
        sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
      else
        sReg = getGlobalsRegion();
    }
  
  // Finally handle static locals.  
  } else {
    // FIXME: Once we implement scope handling, we will need to properly lookup
    // 'D' to the proper LocationContext.
    const DeclContext *DC = D->getDeclContext();
    llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V =
      getStackOrCaptureRegionForDeclContext(LC, DC, D);
    
    if (V.is<const VarRegion*>())
      return V.get<const VarRegion*>();
    
    const StackFrameContext *STC = V.get<const StackFrameContext*>();

    if (!STC)
      sReg = getUnknownRegion();
    else {
      if (D->hasLocalStorage()) {
        sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
               ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
               : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
      }
      else {
        assert(D->isStaticLocal());
        const Decl *D = STC->getDecl();
        if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
          sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
                                  getFunctionTextRegion(FD));
        else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
          const BlockTextRegion *BTR =
            getBlockTextRegion(BD,
                     C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
                     STC->getAnalysisDeclContext());
          sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
                                  BTR);
        }
        else {
          // FIXME: For ObjC-methods, we need a new CodeTextRegion.  For now
          // just use the main global memspace.
          sReg = getGlobalsRegion();
        }
      }
    }
  }

  return getSubRegion<VarRegion>(D, sReg);
}
Exemple #27
0
const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
  const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();

  switch (Ty.getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
    assert(false && "Non-canonical or dependent types aren't possible.");
    break;

  case Type::Builtin: {
    switch (cast<BuiltinType>(Ty).getKind()) {
    case BuiltinType::Void:
    case BuiltinType::ObjCId:
    case BuiltinType::ObjCClass:
    case BuiltinType::ObjCSel:
      // LLVM void type can only be used as the result of a function call.  Just
      // map to the same as char.
      return llvm::Type::getInt8Ty(getLLVMContext());

    case BuiltinType::Bool:
      // Note that we always return bool as i1 for use as a scalar type.
      return llvm::Type::getInt1Ty(getLLVMContext());

    case BuiltinType::Char_S:
    case BuiltinType::Char_U:
    case BuiltinType::SChar:
    case BuiltinType::UChar:
    case BuiltinType::Short:
    case BuiltinType::UShort:
    case BuiltinType::Int:
    case BuiltinType::UInt:
    case BuiltinType::Long:
    case BuiltinType::ULong:
    case BuiltinType::LongLong:
    case BuiltinType::ULongLong:
    case BuiltinType::WChar:
    case BuiltinType::Char16:
    case BuiltinType::Char32:
      return llvm::IntegerType::get(getLLVMContext(),
        static_cast<unsigned>(Context.getTypeSize(T)));

    case BuiltinType::Float:
    case BuiltinType::Double:
    case BuiltinType::LongDouble:
      return getTypeForFormat(getLLVMContext(),
                              Context.getFloatTypeSemantics(T));

    case BuiltinType::NullPtr: {
      // Model std::nullptr_t as i8*
      const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
      return llvm::PointerType::getUnqual(Ty);
    }
        
    case BuiltinType::UInt128:
    case BuiltinType::Int128:
      return llvm::IntegerType::get(getLLVMContext(), 128);
    
    case BuiltinType::Overload:
    case BuiltinType::Dependent:
    case BuiltinType::UndeducedAuto:
      assert(0 && "Unexpected builtin type!");
      break;
    }
    assert(0 && "Unknown builtin type!");
    break;
  }
  case Type::Complex: {
    const llvm::Type *EltTy =
      ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
    return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
  }
  case Type::LValueReference:
  case Type::RValueReference: {
    const ReferenceType &RTy = cast<ReferenceType>(Ty);
    QualType ETy = RTy.getPointeeType();
    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
  }
  case Type::Pointer: {
    const PointerType &PTy = cast<PointerType>(Ty);
    QualType ETy = PTy.getPointeeType();
    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
  }

  case Type::VariableArray: {
    const VariableArrayType &A = cast<VariableArrayType>(Ty);
    assert(A.getIndexTypeCVRQualifiers() == 0 &&
           "FIXME: We only handle trivial array types so far!");
    // VLAs resolve to the innermost element type; this matches
    // the return of alloca, and there isn't any obviously better choice.
    return ConvertTypeForMemRecursive(A.getElementType());
  }
  case Type::IncompleteArray: {
    const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
    assert(A.getIndexTypeCVRQualifiers() == 0 &&
           "FIXME: We only handle trivial array types so far!");
    // int X[] -> [0 x int]
    return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
                                0);
  }
  case Type::ConstantArray: {
    const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
    const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
    return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
  }
  case Type::ExtVector:
  case Type::Vector: {
    const VectorType &VT = cast<VectorType>(Ty);
    return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
                                 VT.getNumElements());
  }
  case Type::FunctionNoProto:
  case Type::FunctionProto: {
    // First, check whether we can build the full function type.  If the
    // function type depends on an incomplete type (e.g. a struct or enum), we
    // cannot lower the function type.  Instead, turn it into an Opaque pointer
    // and have UpdateCompletedType revisit the function type when/if the opaque
    // argument type is defined.
    if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
      // This function's type depends on an incomplete tag type; make sure
      // we have an opaque type corresponding to the tag type.
      ConvertTagDeclType(TT->getDecl());
      // Create an opaque type for this function type, save it, and return it.
      llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
      FunctionTypes.insert(std::make_pair(&Ty, ResultType));
      return ResultType;
    }
    
    // The function type can be built; call the appropriate routines to
    // build it.
    const CGFunctionInfo *FI;
    bool isVariadic;
    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
      FI = &getFunctionInfo(
                   CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
                            true /*Recursive*/);
      isVariadic = FPT->isVariadic();
    } else {
      const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
      FI = &getFunctionInfo(
                CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
                            true /*Recursive*/);
      isVariadic = true;
    }

    return GetFunctionType(*FI, isVariadic, true);
  }

  case Type::ObjCObject:
    return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());

  case Type::ObjCInterface: {
    // Objective-C interfaces are always opaque (outside of the
    // runtime, which can do whatever it likes); we never refine
    // these.
    const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
    if (!T)
        T = llvm::OpaqueType::get(getLLVMContext());
    return T;
  }

  case Type::ObjCObjectPointer: {
    // Protocol qualifications do not influence the LLVM type, we just return a
    // pointer to the underlying interface type. We don't need to worry about
    // recursive conversion.
    const llvm::Type *T =
      ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
    return llvm::PointerType::getUnqual(T);
  }

  case Type::Record:
  case Type::Enum: {
    const TagDecl *TD = cast<TagType>(Ty).getDecl();
    const llvm::Type *Res = ConvertTagDeclType(TD);

    std::string TypeName(TD->getKindName());
    TypeName += '.';

    // Name the codegen type after the typedef name
    // if there is no tag type name available
    if (TD->getIdentifier())
      // FIXME: We should not have to check for a null decl context here.
      // Right now we do it because the implicit Obj-C decls don't have one.
      TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
        TD->getNameAsString();
    else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
      // FIXME: We should not have to check for a null decl context here.
      // Right now we do it because the implicit Obj-C decls don't have one.
      TypeName += TdT->getDecl()->getDeclContext() ? 
        TdT->getDecl()->getQualifiedNameAsString() :
        TdT->getDecl()->getNameAsString();
    else
      TypeName += "anon";

    TheModule.addTypeName(TypeName, Res);
    return Res;
  }

  case Type::BlockPointer: {
    const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
    PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
    return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
  }

  case Type::MemberPointer: {
    // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
    // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
    // If we ever want to support other ABIs this needs to be abstracted.

    QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
    const llvm::Type *PtrDiffTy =
        ConvertTypeRecursive(Context.getPointerDiffType());
    if (ETy->isFunctionType())
      return llvm::StructType::get(TheModule.getContext(), PtrDiffTy, PtrDiffTy,
                                   NULL);
    return PtrDiffTy;
  }
  }

  // FIXME: implement.
  return llvm::OpaqueType::get(getLLVMContext());
}
Exemple #28
0
const llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
  std::vector<const llvm::Type*> ArgTys;

  const llvm::Type *ResultType = 0;

  QualType RetTy = FI.getReturnType();
  const ABIArgInfo &RetAI = FI.getReturnInfo();
  switch (RetAI.getKind()) {
  case ABIArgInfo::Expand:
    assert(0 && "Invalid ABI kind for return argument");

  case ABIArgInfo::Extend:
  case ABIArgInfo::Direct:
    ResultType = ConvertType(RetTy);
    break;

  case ABIArgInfo::Indirect: {
    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
    ResultType = llvm::Type::getVoidTy(getLLVMContext());
    const llvm::Type *STy = ConvertType(RetTy);
    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
    break;
  }

  case ABIArgInfo::Ignore:
    ResultType = llvm::Type::getVoidTy(getLLVMContext());
    break;

  case ABIArgInfo::Coerce:
    ResultType = RetAI.getCoerceToType();
    break;
  }

  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
         ie = FI.arg_end(); it != ie; ++it) {
    const ABIArgInfo &AI = it->info;

    switch (AI.getKind()) {
    case ABIArgInfo::Ignore:
      break;

    case ABIArgInfo::Coerce:
      ArgTys.push_back(AI.getCoerceToType());
      break;

    case ABIArgInfo::Indirect: {
      // indirect arguments are always on the stack, which is addr space #0.
      const llvm::Type *LTy = ConvertTypeForMem(it->type);
      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
      break;
    }

    case ABIArgInfo::Extend:
    case ABIArgInfo::Direct:
      ArgTys.push_back(ConvertType(it->type));
      break;

    case ABIArgInfo::Expand:
      GetExpandedTypes(it->type, ArgTys);
      break;
    }
  }

  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
}
/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
/// valid.
/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
/// like this:
/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
void
CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
                     const SourceRange &OpRange, const SourceRange &DestRange)
{
  QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();

  DestType = Self.Context.getCanonicalType(DestType);
  QualType SrcType = SrcExpr->getType();
  if (const LValueReferenceType *DestTypeTmp =
        DestType->getAsLValueReferenceType()) {
    if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
      // Cannot cast non-lvalue to reference type.
      Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
        << "reinterpret_cast" << OrigDestType << SrcExpr->getSourceRange();
      return;
    }

    // C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
    //   same effect as the conversion *reinterpret_cast<T*>(&x) with the
    //   built-in & and * operators.
    // This code does this transformation for the checked types.
    DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
    SrcType = Self.Context.getPointerType(SrcType);
  } else if (const RValueReferenceType *DestTypeTmp =
               DestType->getAsRValueReferenceType()) {
    // Both the reference conversion and the rvalue rules apply.
    Self.DefaultFunctionArrayConversion(SrcExpr);
    SrcType = SrcExpr->getType();

    DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
    SrcType = Self.Context.getPointerType(SrcType);
  } else {
    // C++ 5.2.10p1: [...] the lvalue-to-rvalue, array-to-pointer, and
    //   function-to-pointer standard conversions are performed on the
    //   expression v.
    Self.DefaultFunctionArrayConversion(SrcExpr);
    SrcType = SrcExpr->getType();
  }

  // Canonicalize source for comparison.
  SrcType = Self.Context.getCanonicalType(SrcType);

  const MemberPointerType *DestMemPtr = DestType->getAsMemberPointerType(),
                          *SrcMemPtr = SrcType->getAsMemberPointerType();
  if (DestMemPtr && SrcMemPtr) {
    // C++ 5.2.10p9: An rvalue of type "pointer to member of X of type T1"
    //   can be explicitly converted to an rvalue of type "pointer to member
    //   of Y of type T2" if T1 and T2 are both function types or both object
    //   types.
    if (DestMemPtr->getPointeeType()->isFunctionType() !=
        SrcMemPtr->getPointeeType()->isFunctionType()) {
      Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
        << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
      return;
    }

    // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
    //   constness.
    if (CastsAwayConstness(Self, SrcType, DestType)) {
      Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
        << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
      return;
    }

    // A valid member pointer cast.
    return;
  }

  // See below for the enumeral issue.
  if (SrcType->isNullPtrType() && DestType->isIntegralType() &&
      !DestType->isEnumeralType()) {
    // C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
    //   type large enough to hold it. A value of std::nullptr_t can be
    //   converted to an integral type; the conversion has the same meaning
    //   and validity as a conversion of (void*)0 to the integral type.
    if (Self.Context.getTypeSize(SrcType) >
        Self.Context.getTypeSize(DestType)) {
      Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_small_int)
        << OrigDestType << DestRange;
    }
    return;
  }

  bool destIsPtr = DestType->isPointerType();
  bool srcIsPtr = SrcType->isPointerType();
  if (!destIsPtr && !srcIsPtr) {
    // Except for std::nullptr_t->integer and lvalue->reference, which are
    // handled above, at least one of the two arguments must be a pointer.
    Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
      << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
    return;
  }

  if (SrcType == DestType) {
    // C++ 5.2.10p2 has a note that mentions that, subject to all other
    // restrictions, a cast to the same type is allowed. The intent is not
    // entirely clear here, since all other paragraphs explicitly forbid casts
    // to the same type. However, the behavior of compilers is pretty consistent
    // on this point: allow same-type conversion if the involved types are
    // pointers, disallow otherwise.
    return;
  }

  // Note: Clang treats enumeration types as integral types. If this is ever
  // changed for C++, the additional check here will be redundant.
  if (DestType->isIntegralType() && !DestType->isEnumeralType()) {
    assert(srcIsPtr && "One type must be a pointer");
    // C++ 5.2.10p4: A pointer can be explicitly converted to any integral
    //   type large enough to hold it.
    if (Self.Context.getTypeSize(SrcType) >
        Self.Context.getTypeSize(DestType)) {
      Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_small_int)
        << OrigDestType << DestRange;
    }
    return;
  }

  if (SrcType->isIntegralType() || SrcType->isEnumeralType()) {
    assert(destIsPtr && "One type must be a pointer");
    // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
    //   converted to a pointer.
    return;
  }

  if (!destIsPtr || !srcIsPtr) {
    // With the valid non-pointer conversions out of the way, we can be even
    // more stringent.
    Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
      << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
    return;
  }

  // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
  if (CastsAwayConstness(Self, SrcType, DestType)) {
    Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
      << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
    return;
  }

  // Not casting away constness, so the only remaining check is for compatible
  // pointer categories.

  if (SrcType->isFunctionPointerType()) {
    if (DestType->isFunctionPointerType()) {
      // C++ 5.2.10p6: A pointer to a function can be explicitly converted to
      // a pointer to a function of a different type.
      return;
    }

    // C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
    //   an object type or vice versa is conditionally-supported.
    // Compilers support it in C++03 too, though, because it's necessary for
    // casting the return value of dlsym() and GetProcAddress().
    // FIXME: Conditionally-supported behavior should be configurable in the
    // TargetInfo or similar.
    if (!Self.getLangOptions().CPlusPlus0x) {
      Self.Diag(OpRange.getBegin(), diag::ext_reinterpret_cast_fn_obj)
        << OpRange;
    }
    return;
  }

  if (DestType->isFunctionPointerType()) {
    // See above.
    if (!Self.getLangOptions().CPlusPlus0x) {
      Self.Diag(OpRange.getBegin(), diag::ext_reinterpret_cast_fn_obj)
        << OpRange;
    }
    return;
  }

  // C++ 5.2.10p7: A pointer to an object can be explicitly converted to
  //   a pointer to an object of different type.
  // Void pointers are not specified, but supported by every compiler out there.
  // So we finish by allowing everything that remains - it's got to be two
  // object pointers.
}
Exemple #30
0
// access image memory at given index
Expr *ASTTranslate::accessMemImgAt(DeclRefExpr *LHS, HipaccAccessor *Acc,
    MemoryAccess mem_acc, Expr *idx_x, Expr *idx_y) {
  Expr *result, *coord;

  // mark image as being used within the kernel
  Kernel->setUsed(LHS->getNameInfo().getAsString());

  // construct coordinate: (int2)(gid_x, gid_y)
  coord = createBinaryOperator(Ctx, idx_x, idx_y, BO_Comma, Ctx.IntTy);
  coord = createParenExpr(Ctx, coord);
  QualType QTcoord = simdTypes.getSIMDType(Ctx.IntTy, "int", SIMD2);
  coord = createCStyleCastExpr(Ctx, QTcoord, CK_VectorSplat, coord, nullptr,
      Ctx.getTrivialTypeSourceInfo(QTcoord));
  FunctionDecl *image_function = getImageFunction(Acc, mem_acc);

  // create function call for image objects in OpenCL
  if (mem_acc == READ_ONLY) {
    // parameters for read_image
    SmallVector<Expr *, 16> args;
    args.push_back(LHS);
    args.push_back(kernelSamplerRef);
    args.push_back(coord);

    result = createFunctionCall(Ctx, image_function, args);

    QualType QT = Acc->getImage()->getType();
    if (QT->isVectorType()) {
      SmallVector<Expr *, 16> args;
      args.push_back(result);
      result = createFunctionCall(Ctx, getConvertFunction(QT), args);
    } else {
      result = createExtVectorElementExpr(Ctx, QT, result, "x");
    }
  } else {
    QualType QT;

    // determine cast type for write_image functions
    if (image_function == builtins.getBuiltinFunction(OPENCLBIwrite_imagei)) {
      QT = simdTypes.getSIMDType(Ctx.IntTy, "int", SIMD4);
    } else if (image_function ==
        builtins.getBuiltinFunction(OPENCLBIwrite_imageui)) {
      QT = simdTypes.getSIMDType(Ctx.UnsignedIntTy, "uint", SIMD4);
    } else {
      QT = simdTypes.getSIMDType(Ctx.FloatTy, "float", SIMD4);
    }

    // writeImageRHS is set by VisitBinaryOperator - side effect
    if (!writeImageRHS->getType()->isVectorType()) {
      // introduce temporary for propagating the RHS to a vector
      std::string tmp_lit("_tmp" + std::to_string(literalCount++));
      VarDecl *tmp_decl = createVarDecl(Ctx, kernelDecl, tmp_lit, QT,
          writeImageRHS);
      DeclContext *DC = FunctionDecl::castToDeclContext(kernelDecl);
      DC->addDecl(tmp_decl);
      DeclRefExpr *tmp_dre = createDeclRefExpr(Ctx, tmp_decl);
      preStmts.push_back(createDeclStmt(Ctx, tmp_decl));
      preCStmt.push_back(curCStmt);
      writeImageRHS = tmp_dre;
    }

    if (writeImageRHS->getType() != QT) {
      // convert to proper vector type
      SmallVector<Expr *, 16> args;
      args.push_back(writeImageRHS);
      writeImageRHS = createFunctionCall(Ctx, getConvertFunction(QT), args);
    }

    // parameters for write_image
    SmallVector<Expr *, 16> args;
    args.push_back(LHS);
    args.push_back(coord);
    args.push_back(writeImageRHS);

    result = createFunctionCall(Ctx, image_function, args);
  }

  return result;
}