Esempio n. 1
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);

  ProgramStateRef state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();
  
  if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
      T->isAnyComplexType()) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
    
    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }
    
    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
      if (dyn_cast_or_null<CXXTempObjectRegion>(V.getAsRegion()))
        V = UnknownVal();
      vals = getBasicVals().consVals(V, vals);
    }
    
    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }

  // Handle scalars: int{5} and int{}.
  assert(NumInitElements <= 1);

  SVal V;
  if (NumInitElements == 0)
    V = getSValBuilder().makeZeroVal(T);
  else
    V = state->getSVal(IE->getInit(0), LCtx);

  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
Esempio n. 2
0
// <type> ::= <pointer-type>
// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
  QualType PointeeTy = T->getPointeeType();
  if (PointeeTy->isArrayType()) {
    // Pointers to arrays are mangled like arrays.
    mangleExtraDimensions(T->getPointeeType());
  } else if (PointeeTy->isFunctionType()) {
    // Function pointers are special.
    Out << '6';
    mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
               NULL, false, false);
  } else {
    if (!PointeeTy.hasQualifiers())
      // Lack of qualifiers is mangled as 'A'.
      Out << 'A';
    mangleType(PointeeTy);
  }
}
// Based on QualType::isTrivial.
bool isTriviallyDefaultConstructible(QualType Type, const ASTContext &Context) {
  if (Type.isNull())
    return false;

  if (Type->isArrayType())
    return isTriviallyDefaultConstructible(Context.getBaseElementType(Type),
                                           Context);

  // Return false for incomplete types after skipping any incomplete array
  // types which are expressly allowed by the standard and thus our API.
  if (Type->isIncompleteType())
    return false;

  if (Context.getLangOpts().ObjCAutoRefCount) {
    switch (Type.getObjCLifetime()) {
    case Qualifiers::OCL_ExplicitNone:
      return true;

    case Qualifiers::OCL_Strong:
    case Qualifiers::OCL_Weak:
    case Qualifiers::OCL_Autoreleasing:
      return false;

    case Qualifiers::OCL_None:
      if (Type->isObjCLifetimeType())
        return false;
      break;
    }
  }

  QualType CanonicalType = Type.getCanonicalType();
  if (CanonicalType->isDependentType())
    return false;

  // As an extension, Clang treats vector types as Scalar types.
  if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
    return true;

  if (const auto *RT = CanonicalType->getAs<RecordType>()) {
    return recordIsTriviallyDefaultConstructible(*RT->getDecl(), Context);
  }

  // No other types can match.
  return false;
}
Esempio n. 4
0
NamedDecl *Sema::LookupInlineAsmIdentifier(StringRef Name, SourceLocation Loc,
                                           unsigned &Length, unsigned &Size, 
                                           unsigned &Type, bool &IsVarDecl) {
  // FIXME: Temporary hack until the frontend parser is hooked up to parse 
  // variables.
  StringRef ParsedName = parseIdentifier(Name);
  assert (ParsedName == Name && "Identifier not parsed correctly!");

  Length = 1;
  Size = 0;
  Type = 0;
  IsVarDecl = false;
  LookupResult Result(*this, &Context.Idents.get(Name), Loc,
                      Sema::LookupOrdinaryName);

  if (!LookupName(Result, getCurScope())) {
    // If we don't find anything, return null; the AsmParser will assume
    // it is a label of some sort.
    return 0;
  }

  if (!Result.isSingleResult()) {
    // FIXME: Diagnose result.
    return 0;
  }

  NamedDecl *FoundDecl = Result.getFoundDecl();
  if (isa<FunctionDecl>(FoundDecl))
    return FoundDecl;
  if (VarDecl *Var = dyn_cast<VarDecl>(FoundDecl)) {
    QualType Ty = Var->getType();
    Type = Size = Context.getTypeSizeInChars(Ty).getQuantity();
    if (Ty->isArrayType()) {
      const ArrayType *ATy = Context.getAsArrayType(Ty);
      Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
      Length = Size / Type;
    }
    IsVarDecl = true;
    return FoundDecl;
  }

  // FIXME: Handle other kinds of results? (FieldDecl, etc.)
  // FIXME: Diagnose if we find something we can't handle, like a typedef.
  return 0;
}
Esempio n. 5
0
/// \brief Perform adjustment on the parameter type of a function.
///
/// This routine adjusts the given parameter type @p T to the actual
/// parameter type used by semantic analysis (C99 6.7.5.3p[7,8], 
/// C++ [dcl.fct]p3). The adjusted parameter type is returned. 
QualType Sema::adjustParameterType(QualType T) {
  // C99 6.7.5.3p7:
  if (T->isArrayType()) {
    // C99 6.7.5.3p7:
    //   A declaration of a parameter as "array of type" shall be
    //   adjusted to "qualified pointer to type", where the type
    //   qualifiers (if any) are those specified within the [ and ] of
    //   the array type derivation.
    return Context.getArrayDecayedType(T);
  } else if (T->isFunctionType())
    // C99 6.7.5.3p8:
    //   A declaration of a parameter as "function returning type"
    //   shall be adjusted to "pointer to function returning type", as
    //   in 6.3.2.1.
    return Context.getPointerType(T);

  return T;
}
Esempio n. 6
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);

  const ProgramState *state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();
  
  if (T->isArrayType() || T->isRecordType() || T->isVectorType()) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
    
    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }
    
    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      vals = getBasicVals().consVals(state->getSVal(cast<Expr>(*it), LCtx),
                                     vals);
    }
    
    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }
  
  if (Loc::isLocType(T) || T->isIntegerType()) {
    assert(IE->getNumInits() == 1);
    const Expr *initEx = IE->getInit(0);
    B.generateNode(IE, Pred, state->BindExpr(IE, LCtx,
                                             state->getSVal(initEx, LCtx)));
    return;
  }
  
  llvm_unreachable("unprocessed InitListExpr type");
}
Esempio n. 7
0
bool MigrationContext::isGCOwnedNonObjC(QualType T) {
  while (!T.isNull()) {
    if (const AttributedType *AttrT = T->getAs<AttributedType>()) {
      if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership)
        return !AttrT->getModifiedType()->isObjCRetainableType();
    }

    if (T->isArrayType())
      T = Pass.Ctx.getBaseElementType(T);
    else if (const PointerType *PT = T->getAs<PointerType>())
      T = PT->getPointeeType();
    else if (const ReferenceType *RT = T->getAs<ReferenceType>())
      T = RT->getPointeeType();
    else
      break;
  }

  return false;
}
Esempio n. 8
0
NamedDecl *Sema::LookupInlineAsmIdentifier(StringRef Name, SourceLocation Loc,
                                           unsigned &Length, unsigned &Size, 
                                           unsigned &Type, bool &IsVarDecl) {
  Length = 1;
  Size = 0;
  Type = 0;
  IsVarDecl = false;
  LookupResult Result(*this, &Context.Idents.get(Name), Loc,
                      Sema::LookupOrdinaryName);

  if (!LookupName(Result, getCurScope())) {
    // If we don't find anything, return null; the AsmParser will assume
    // it is a label of some sort.
    return 0;
  }

  if (!Result.isSingleResult()) {
    // FIXME: Diagnose result.
    return 0;
  }

  NamedDecl *ND = Result.getFoundDecl();
  if (isa<VarDecl>(ND) || isa<FunctionDecl>(ND)) {
    if (VarDecl *Var = dyn_cast<VarDecl>(ND)) {
      Type = Context.getTypeInfo(Var->getType()).first;
      QualType Ty = Var->getType();
      if (Ty->isArrayType()) {
        const ArrayType *ATy = Context.getAsArrayType(Ty);
        Length = Type / Context.getTypeInfo(ATy->getElementType()).first;
        Type /= Length; // Type is in terms of a single element.
      }
      Type /= 8; // Type is in terms of bits, but we want bytes.
      Size = Length * Type;
      IsVarDecl = true;
    }
    return ND;
  }

  // FIXME: Handle other kinds of results? (FieldDecl, etc.)
  // FIXME: Diagnose if we find something we can't handle, like a typedef.
  return 0;
}
Esempio n. 9
0
void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
  // Deal with various aggregate types as special cases:

  // Record types.
  if (auto recType = type->getAs<RecordType>()) {
    addTypedData(recType->getDecl(), begin);

  // Array types.
  } else if (type->isArrayType()) {
    // Incomplete array types (flexible array members?) don't provide
    // data to lay out, and the other cases shouldn't be possible.
    auto arrayType = CGM.getContext().getAsConstantArrayType(type);
    if (!arrayType) return;

    QualType eltType = arrayType->getElementType();
    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
    for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
      addTypedData(eltType, begin + i * eltSize);
    }

  // Complex types.
  } else if (auto complexType = type->getAs<ComplexType>()) {
    auto eltType = complexType->getElementType();
    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
    auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
    addTypedData(eltLLVMType, begin, begin + eltSize);
    addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);

  // Member pointer types.
  } else if (type->getAs<MemberPointerType>()) {
    // Just add it all as opaque.
    addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));

  // Everything else is scalar and should not convert as an LLVM aggregate.
  } else {
    // We intentionally convert as !ForMem because we want to preserve
    // that a type was an i1.
    auto llvmType = CGM.getTypes().ConvertType(type);
    addTypedData(llvmType, begin);
  }
}
Esempio n. 10
0
void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
  // <type-encoding> ::= <storage-class> <variable-type>
  // <storage-class> ::= 0  # private static member
  //                 ::= 1  # protected static member
  //                 ::= 2  # public static member
  //                 ::= 3  # global
  //                 ::= 4  # static local
  
  // The first character in the encoding (after the name) is the storage class.
  if (VD->isStaticDataMember()) {
    // If it's a static member, it also encodes the access level.
    switch (VD->getAccess()) {
      default:
      case AS_private: Out << '0'; break;
      case AS_protected: Out << '1'; break;
      case AS_public: Out << '2'; break;
    }
  }
  else if (!VD->isStaticLocal())
    Out << '3';
  else
    Out << '4';
  // Now mangle the type.
  // <variable-type> ::= <type> <cvr-qualifiers>
  //                 ::= <type> A # pointers, references, arrays
  // Pointers and references are odd. The type of 'int * const foo;' gets
  // mangled as 'QAHA' instead of 'PAHB', for example.
  QualType Ty = VD->getType();
  if (Ty->isPointerType() || Ty->isReferenceType()) {
    mangleType(Ty);
    Out << 'A';
  } else if (Ty->isArrayType()) {
    // Global arrays are funny, too.
    mangleType(cast<ArrayType>(Ty.getTypePtr()), true);
    Out << 'A';
  } else {
    mangleType(Ty.getLocalUnqualifiedType());
    mangleQualifiers(Ty.getLocalQualifiers(), false);
  }
}
Esempio n. 11
0
NamedDecl *Sema::LookupInlineAsmIdentifier(StringRef &LineBuf, SourceLocation Loc,
                                           InlineAsmIdentifierInfo &Info) {
  Info.clear();
  // FIXME: Temporary hack until the frontend parser is hooked up to parse 
  // variables.
  LineBuf = parseIdentifier(LineBuf);
  LookupResult Result(*this, &Context.Idents.get(LineBuf), Loc,
                      Sema::LookupOrdinaryName);

  if (!LookupName(Result, getCurScope())) {
    // If we don't find anything, return null; the AsmParser will assume
    // it is a label of some sort.
    return 0;
  }

  if (!Result.isSingleResult()) {
    // FIXME: Diagnose result.
    return 0;
  }

  NamedDecl *FoundDecl = Result.getFoundDecl();
  if (isa<FunctionDecl>(FoundDecl))
    return FoundDecl;
  if (VarDecl *Var = dyn_cast<VarDecl>(FoundDecl)) {
    QualType Ty = Var->getType();
    Info.Type = Info.Size = Context.getTypeSizeInChars(Ty).getQuantity();
    if (Ty->isArrayType()) {
      const ArrayType *ATy = Context.getAsArrayType(Ty);
      Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
      Info.Length = Info.Size / Info.Type;
    }
    Info.IsVarDecl = true;
    return FoundDecl;
  }

  // FIXME: Handle other kinds of results? (FieldDecl, etc.)
  // FIXME: Diagnose if we find something we can't handle, like a typedef.
  return 0;
}
Esempio n. 12
0
/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
/// pointer over the consumed characters.  This returns the resultant type.
static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context, 
                                  Builtin::Context::GetBuiltinTypeError &Error,
                                  bool AllowTypeModifiers = true) {
  // Modifiers.
  int HowLong = 0;
  bool Signed = false, Unsigned = false;
  
  // Read the modifiers first.
  bool Done = false;
  while (!Done) {
    switch (*Str++) {
    default: Done = true; --Str; break; 
    case 'S':
      assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
      assert(!Signed && "Can't use 'S' modifier multiple times!");
      Signed = true;
      break;
    case 'U':
      assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
      assert(!Unsigned && "Can't use 'S' modifier multiple times!");
      Unsigned = true;
      break;
    case 'L':
      assert(HowLong <= 2 && "Can't have LLLL modifier");
      ++HowLong;
      break;
    }
  }

  QualType Type;
  
  // Read the base type.
  switch (*Str++) {
  default: assert(0 && "Unknown builtin type letter!");
  case 'v':
    assert(HowLong == 0 && !Signed && !Unsigned &&
           "Bad modifiers used with 'v'!");
    Type = Context.VoidTy;
    break;
  case 'f':
    assert(HowLong == 0 && !Signed && !Unsigned &&
           "Bad modifiers used with 'f'!");
    Type = Context.FloatTy;
    break;
  case 'd':
    assert(HowLong < 2 && !Signed && !Unsigned &&
           "Bad modifiers used with 'd'!");
    if (HowLong)
      Type = Context.LongDoubleTy;
    else
      Type = Context.DoubleTy;
    break;
  case 's':
    assert(HowLong == 0 && "Bad modifiers used with 's'!");
    if (Unsigned)
      Type = Context.UnsignedShortTy;
    else
      Type = Context.ShortTy;
    break;
  case 'i':
    if (HowLong == 3)
      Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
    else if (HowLong == 2)
      Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
    else if (HowLong == 1)
      Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
    else
      Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
    break;
  case 'c':
    assert(HowLong == 0 && "Bad modifiers used with 'c'!");
    if (Signed)
      Type = Context.SignedCharTy;
    else if (Unsigned)
      Type = Context.UnsignedCharTy;
    else
      Type = Context.CharTy;
    break;
  case 'b': // boolean
    assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
    Type = Context.BoolTy;
    break;
  case 'z':  // size_t.
    assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
    Type = Context.getSizeType();
    break;
  case 'F':
    Type = Context.getCFConstantStringType();
    break;
  case 'a':
    Type = Context.getBuiltinVaListType();
    assert(!Type.isNull() && "builtin va list type not initialized!");
    break;
  case 'A':
    // This is a "reference" to a va_list; however, what exactly
    // this means depends on how va_list is defined. There are two
    // different kinds of va_list: ones passed by value, and ones
    // passed by reference.  An example of a by-value va_list is
    // x86, where va_list is a char*. An example of by-ref va_list
    // is x86-64, where va_list is a __va_list_tag[1]. For x86,
    // we want this argument to be a char*&; for x86-64, we want
    // it to be a __va_list_tag*.
    Type = Context.getBuiltinVaListType();
    assert(!Type.isNull() && "builtin va list type not initialized!");
    if (Type->isArrayType()) {
      Type = Context.getArrayDecayedType(Type);
    } else {
      Type = Context.getLValueReferenceType(Type);
    }
    break;
  case 'V': {
    char *End;
    
    unsigned NumElements = strtoul(Str, &End, 10);
    assert(End != Str && "Missing vector size");
    
    Str = End;
    
    QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
    Type = Context.getVectorType(ElementType, NumElements);
    break;
  }
  case 'P': {
    IdentifierInfo *II = &Context.Idents.get("FILE");
    DeclContext::lookup_result Lookup 
      = Context.getTranslationUnitDecl()->lookup(Context, II);
    if (Lookup.first != Lookup.second && isa<TypeDecl>(*Lookup.first)) {
      Type = Context.getTypeDeclType(cast<TypeDecl>(*Lookup.first));
      break;
    }
    else {
      Error = Builtin::Context::GE_Missing_FILE;
      return QualType();
    }
  }
  }
  
  if (!AllowTypeModifiers)
    return Type;
  
  Done = false;
  while (!Done) {
    switch (*Str++) {
      default: Done = true; --Str; break;
      case '*':
        Type = Context.getPointerType(Type);
        break;
      case '&':
        Type = Context.getLValueReferenceType(Type);
        break;
      // FIXME: There's no way to have a built-in with an rvalue ref arg.
      case 'C':
        Type = Type.getQualifiedType(QualType::Const);
        break;
    }
  }
  
  return Type;
}
Esempio n. 13
0
/// GetTypeForDeclarator - Convert the type for the specified
/// declarator to Type instances. Skip the outermost Skip type
/// objects.
QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, unsigned Skip) {
  bool OmittedReturnType = false;

  if (D.getContext() == Declarator::BlockLiteralContext
      && Skip == 0
      && !D.getDeclSpec().hasTypeSpecifier()
      && (D.getNumTypeObjects() == 0
          || (D.getNumTypeObjects() == 1
              && D.getTypeObject(0).Kind == DeclaratorChunk::Function)))
    OmittedReturnType = true;

  // long long is a C99 feature.
  if (!getLangOptions().C99 && !getLangOptions().CPlusPlus0x &&
      D.getDeclSpec().getTypeSpecWidth() == DeclSpec::TSW_longlong)
    Diag(D.getDeclSpec().getTypeSpecWidthLoc(), diag::ext_longlong);

  // Determine the type of the declarator. Not all forms of declarator
  // have a type.
  QualType T;
  switch (D.getKind()) {
  case Declarator::DK_Abstract:
  case Declarator::DK_Normal:
  case Declarator::DK_Operator: {
    const DeclSpec& DS = D.getDeclSpec();
    if (OmittedReturnType)
      // We default to a dependent type initially.  Can be modified by
      // the first return statement.
      T = Context.DependentTy;
    else {
      T = ConvertDeclSpecToType(DS);
      if (T.isNull())
        return T;
    }
    break;
  }

  case Declarator::DK_Constructor:
  case Declarator::DK_Destructor:
  case Declarator::DK_Conversion:
    // Constructors and destructors don't have return types. Use
    // "void" instead. Conversion operators will check their return
    // types separately.
    T = Context.VoidTy;
    break;
  }

  // The name we're declaring, if any.
  DeclarationName Name;
  if (D.getIdentifier())
    Name = D.getIdentifier();

  // Walk the DeclTypeInfo, building the recursive type as we go.
  // DeclTypeInfos are ordered from the identifier out, which is
  // opposite of what we want :).
  for (unsigned i = Skip, e = D.getNumTypeObjects(); i != e; ++i) {
    DeclaratorChunk &DeclType = D.getTypeObject(e-i-1+Skip);
    switch (DeclType.Kind) {
    default: assert(0 && "Unknown decltype!");
    case DeclaratorChunk::BlockPointer:
      // If blocks are disabled, emit an error.
      if (!LangOpts.Blocks)
        Diag(DeclType.Loc, diag::err_blocks_disable);
        
      if (DeclType.Cls.TypeQuals)
        Diag(D.getIdentifierLoc(), diag::err_qualified_block_pointer_type);
      if (!T.getTypePtr()->isFunctionType())
        Diag(D.getIdentifierLoc(), diag::err_nonfunction_block_type);
      else
        T = Context.getBlockPointerType(T);
      break;
    case DeclaratorChunk::Pointer:
      T = BuildPointerType(T, DeclType.Ptr.TypeQuals, DeclType.Loc, Name);
      break;
    case DeclaratorChunk::Reference:
      T = BuildReferenceType(T, DeclType.Ref.LValueRef,
                             DeclType.Ref.HasRestrict ? QualType::Restrict : 0,
                             DeclType.Loc, Name);
      break;
    case DeclaratorChunk::Array: {
      DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
      Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
      ArrayType::ArraySizeModifier ASM;
      if (ATI.isStar)
        ASM = ArrayType::Star;
      else if (ATI.hasStatic)
        ASM = ArrayType::Static;
      else
        ASM = ArrayType::Normal;
      T = BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals, DeclType.Loc, Name);
      break;
    }
    case DeclaratorChunk::Function: {
      // If the function declarator has a prototype (i.e. it is not () and
      // does not have a K&R-style identifier list), then the arguments are part
      // of the type, otherwise the argument list is ().
      const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
      
      // C99 6.7.5.3p1: The return type may not be a function or array type.
      if (T->isArrayType() || T->isFunctionType()) {
        Diag(DeclType.Loc, diag::err_func_returning_array_function) << T;
        T = Context.IntTy;
        D.setInvalidType(true);
      }
        
      if (FTI.NumArgs == 0) {
        if (getLangOptions().CPlusPlus) {
          // C++ 8.3.5p2: If the parameter-declaration-clause is empty, the
          // function takes no arguments.
          T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic,FTI.TypeQuals);
        } else if (FTI.isVariadic) {
          // We allow a zero-parameter variadic function in C if the
          // function is marked with the "overloadable"
          // attribute. Scan for this attribute now.
          bool Overloadable = false;
          for (const AttributeList *Attrs = D.getAttributes();
               Attrs; Attrs = Attrs->getNext()) {
            if (Attrs->getKind() == AttributeList::AT_overloadable) {
              Overloadable = true;
              break;
            }
          }

          if (!Overloadable)
            Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
          T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, 0);
        } else {
          // Simple void foo(), where the incoming T is the result type.
          T = Context.getFunctionNoProtoType(T);
        }
      } else if (FTI.ArgInfo[0].Param == 0) {
        // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function definition.
        Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);        
      } else {
        // Otherwise, we have a function with an argument list that is
        // potentially variadic.
        llvm::SmallVector<QualType, 16> ArgTys;
        
        for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
          ParmVarDecl *Param =
            cast<ParmVarDecl>(FTI.ArgInfo[i].Param.getAs<Decl>());
          QualType ArgTy = Param->getType();
          assert(!ArgTy.isNull() && "Couldn't parse type?");

          // Adjust the parameter type.
          assert((ArgTy == adjustParameterType(ArgTy)) && "Unadjusted type?");

          // Look for 'void'.  void is allowed only as a single argument to a
          // function with no other parameters (C99 6.7.5.3p10).  We record
          // int(void) as a FunctionProtoType with an empty argument list.
          if (ArgTy->isVoidType()) {
            // If this is something like 'float(int, void)', reject it.  'void'
            // is an incomplete type (C99 6.2.5p19) and function decls cannot
            // have arguments of incomplete type.
            if (FTI.NumArgs != 1 || FTI.isVariadic) {
              Diag(DeclType.Loc, diag::err_void_only_param);
              ArgTy = Context.IntTy;
              Param->setType(ArgTy);
            } else if (FTI.ArgInfo[i].Ident) {
              // Reject, but continue to parse 'int(void abc)'.
              Diag(FTI.ArgInfo[i].IdentLoc,
                   diag::err_param_with_void_type);
              ArgTy = Context.IntTy;
              Param->setType(ArgTy);
            } else {
              // Reject, but continue to parse 'float(const void)'.
              if (ArgTy.getCVRQualifiers())
                Diag(DeclType.Loc, diag::err_void_param_qualified);
              
              // Do not add 'void' to the ArgTys list.
              break;
            }
          } else if (!FTI.hasPrototype) {
            if (ArgTy->isPromotableIntegerType()) {
              ArgTy = Context.IntTy;
            } else if (const BuiltinType* BTy = ArgTy->getAsBuiltinType()) {
              if (BTy->getKind() == BuiltinType::Float)
                ArgTy = Context.DoubleTy;
            }
          }
          
          ArgTys.push_back(ArgTy);
        }
        T = Context.getFunctionType(T, &ArgTys[0], ArgTys.size(),
                                    FTI.isVariadic, FTI.TypeQuals);
      }
      break;
    }
    case DeclaratorChunk::MemberPointer:
      // The scope spec must refer to a class, or be dependent.
      DeclContext *DC = computeDeclContext(DeclType.Mem.Scope());
      QualType ClsType;
      // FIXME: Extend for dependent types when it's actually supported.
      // See ActOnCXXNestedNameSpecifier.
      if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC)) {
        ClsType = Context.getTagDeclType(RD);
      } else {
        if (DC) {
          Diag(DeclType.Mem.Scope().getBeginLoc(),
               diag::err_illegal_decl_mempointer_in_nonclass)
            << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
            << DeclType.Mem.Scope().getRange();
        }
        D.setInvalidType(true);
        ClsType = Context.IntTy;
      }

      // C++ 8.3.3p3: A pointer to member shall not pointer to ... a member
      //   with reference type, or "cv void."
      if (T->isReferenceType()) {
        Diag(DeclType.Loc, diag::err_illegal_decl_pointer_to_reference)
          << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name");
        D.setInvalidType(true);
        T = Context.IntTy;
      }
      if (T->isVoidType()) {
        Diag(DeclType.Loc, diag::err_illegal_decl_mempointer_to_void)
          << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name");
        T = Context.IntTy;
      }

      // Enforce C99 6.7.3p2: "Types other than pointer types derived from
      // object or incomplete types shall not be restrict-qualified."
      if ((DeclType.Mem.TypeQuals & QualType::Restrict) &&
          !T->isIncompleteOrObjectType()) {
        Diag(DeclType.Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
          << T;
        DeclType.Mem.TypeQuals &= ~QualType::Restrict;
      }

      T = Context.getMemberPointerType(T, ClsType.getTypePtr()).
                    getQualifiedType(DeclType.Mem.TypeQuals);

      break;
    }

    if (T.isNull()) {
      D.setInvalidType(true);
      T = Context.IntTy;
    }

    // See if there are any attributes on this declarator chunk.
    if (const AttributeList *AL = DeclType.getAttrs())
      ProcessTypeAttributeList(T, AL);
  }

  if (getLangOptions().CPlusPlus && T->isFunctionType()) {
    const FunctionProtoType *FnTy = T->getAsFunctionProtoType();
    assert(FnTy && "Why oh why is there not a FunctionProtoType here ?");

    // C++ 8.3.5p4: A cv-qualifier-seq shall only be part of the function type
    // for a nonstatic member function, the function type to which a pointer
    // to member refers, or the top-level function type of a function typedef
    // declaration.
    if (FnTy->getTypeQuals() != 0 &&
        D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
        ((D.getContext() != Declarator::MemberContext &&
          (!D.getCXXScopeSpec().isSet() ||
           !computeDeclContext(D.getCXXScopeSpec())->isRecord())) ||
         D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) {
      if (D.isFunctionDeclarator())
        Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_function_type);
      else
        Diag(D.getIdentifierLoc(),
             diag::err_invalid_qualified_typedef_function_type_use);

      // Strip the cv-quals from the type.
      T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(),
                                  FnTy->getNumArgs(), FnTy->isVariadic(), 0);
    }
  }
  
  // If there were any type attributes applied to the decl itself (not the
  // type, apply the type attribute to the type!)
  if (const AttributeList *Attrs = D.getAttributes())
    ProcessTypeAttributeList(T, Attrs);
  
  return T;
}
Esempio n. 14
0
ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
                                           SourceLocation TemplateKWLoc,
                                           UnqualifiedId &Id,
                                           llvm::InlineAsmIdentifierInfo &Info,
                                           bool IsUnevaluatedContext) {
  Info.clear();

  if (IsUnevaluatedContext)
    PushExpressionEvaluationContext(UnevaluatedAbstract,
                                    ReuseLambdaContextDecl);

  ExprResult Result = ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Id,
                                        /*trailing lparen*/ false,
                                        /*is & operand*/ false,
                                        /*CorrectionCandidateCallback=*/nullptr,
                                        /*IsInlineAsmIdentifier=*/ true);

  if (IsUnevaluatedContext)
    PopExpressionEvaluationContext();

  if (!Result.isUsable()) return Result;

  Result = CheckPlaceholderExpr(Result.get());
  if (!Result.isUsable()) return Result;

  // Referring to parameters is not allowed in naked functions.
  if (CheckNakedParmReference(Result.get(), *this))
    return ExprError();

  QualType T = Result.get()->getType();

  // For now, reject dependent types.
  if (T->isDependentType()) {
    Diag(Id.getLocStart(), diag::err_asm_incomplete_type) << T;
    return ExprError();
  }

  // Any sort of function type is fine.
  if (T->isFunctionType()) {
    return Result;
  }

  // Otherwise, it needs to be a complete type.
  if (RequireCompleteExprType(Result.get(), diag::err_asm_incomplete_type)) {
    return ExprError();
  }

  // Compute the type size (and array length if applicable?).
  Info.Type = Info.Size = Context.getTypeSizeInChars(T).getQuantity();
  if (T->isArrayType()) {
    const ArrayType *ATy = Context.getAsArrayType(T);
    Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
    Info.Length = Info.Size / Info.Type;
  }

  // We can work with the expression as long as it's not an r-value.
  if (!Result.get()->isRValue())
    Info.IsVarDecl = true;

  return Result;
}
Esempio n. 15
0
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
                                        llvm::Value *SrcPtr, QualType Ty,
                                        bool isVolatile, unsigned Alignment) {
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");

  if (getContext().getLangOptions().CPlusPlus) {
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
      assert((Record->hasTrivialCopyConstructor() || 
              Record->hasTrivialCopyAssignment() ||
              Record->hasTrivialMoveConstructor() ||
              Record->hasTrivialMoveAssignment()) &&
             "Trying to aggregate-copy a type without a trivial copy "
             "constructor or assignment operator");
      // Ignore empty classes in C++.
      if (Record->isEmpty())
        return;
    }
  }
  
  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
  // C99 6.5.16.1p3, which states "If the value being stored in an object is
  // read from another object that overlaps in anyway the storage of the first
  // object, then the overlap shall be exact and the two objects shall have
  // qualified or unqualified versions of a compatible type."
  //
  // memcpy is not defined if the source and destination pointers are exactly
  // equal, but other compilers do this optimization, and almost every memcpy
  // implementation handles this case safely.  If there is a libc that does not
  // safely handle this, we can add a target hook.

  // Get size and alignment info for this aggregate.
  std::pair<CharUnits, CharUnits> TypeInfo = 
    getContext().getTypeInfoInChars(Ty);

  if (!Alignment)
    Alignment = TypeInfo.second.getQuantity();

  // FIXME: Handle variable sized types.

  // FIXME: If we have a volatile struct, the optimizer can remove what might
  // appear to be `extra' memory ops:
  //
  // volatile struct { int i; } a, b;
  //
  // int main() {
  //   a = b;
  //   a = b;
  // }
  //
  // we need to use a different call here.  We use isVolatile to indicate when
  // either the source or the destination is volatile.

  llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
  llvm::Type *DBP =
    llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
  DestPtr = Builder.CreateBitCast(DestPtr, DBP);

  llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
  llvm::Type *SBP =
    llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
  SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);

  // Don't do any of the memmove_collectable tests if GC isn't set.
  if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
    // fall through
  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
    RecordDecl *Record = RecordTy->getDecl();
    if (Record->hasObjectMember()) {
      CharUnits size = TypeInfo.first;
      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
      llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 
                                                    SizeVal);
      return;
    }
  } else if (Ty->isArrayType()) {
    QualType BaseType = getContext().getBaseElementType(Ty);
    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
      if (RecordTy->getDecl()->hasObjectMember()) {
        CharUnits size = TypeInfo.first;
        llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
        llvm::Value *SizeVal = 
          llvm::ConstantInt::get(SizeTy, size.getQuantity());
        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 
                                                      SizeVal);
        return;
      }
    }
  }
  
  Builder.CreateMemCpy(DestPtr, SrcPtr,
                       llvm::ConstantInt::get(IntPtrTy, 
                                              TypeInfo.first.getQuantity()),
                       Alignment, isVolatile);
}
Esempio n. 16
0
// caller must free returned value
const EmuVal* eval_rexpr(const Expr* e){
	errs() << "\nDEBUG: about to eval rexpr:\n";
	e->dump();

	if(isa<IntegerLiteral>(e)){
		const IntegerLiteral *obj = (const IntegerLiteral*)e;
		APInt i = obj->getValue();
		if(i.slt(EMU_MIN_INT) || i.sgt(EMU_MAX_INT)){
			e->dump();
			cant_handle();
		}
		return new EmuNum<NUM_TYPE_INT>(i);
	} else if(isa<CharacterLiteral>(e)){
		const CharacterLiteral *obj = (const CharacterLiteral*)e;
		unsigned int i = obj->getValue();
		if(i > 127){
			e->dump();
			cant_handle();
		}
		return new EmuNum<NUM_TYPE_CHAR>(new APInt(8, i, true));
	} else if(isa<UnaryOperator>(e)){
		const UnaryOperator *obj = (const UnaryOperator*)e;
		const Expr* sub = obj->getSubExpr();
		const auto op = obj->getOpcode();
		switch(op){
		case UO_AddrOf:
		{
			lvalue arg = eval_lexpr(sub);
			return new EmuPtr(arg.ptr, e->getType());
		}
		case UO_LNot:
		case UO_Minus:
		{
			const EmuVal* arg = eval_rexpr(sub);
			if(!arg->obj_type->isIntegerType()){
				cant_cast();
			}
			if(op == UO_LNot){
				return ((const EmuNumGeneric*)arg)->lnot();
			} else if (op == UO_Minus){
				return ((const EmuNumGeneric*)arg)->neg();
			}
		}
		case UO_Deref:
		case UO_Extension:
		case UO_Imag:
		case UO_Real:
		case UO_Not:
		case UO_PostInc:
		case UO_PostDec:
		case UO_PreInc:
		case UO_PreDec:
		case UO_Plus:
		default:
			llvm::errs() << "Got opcode " << obj->getOpcode() << "\n";
			cant_handle();
		}
	} else if(isa<BinaryOperator>(e)){
		const BinaryOperator* ex = (const BinaryOperator*)e;
		BinaryOperatorKind op = ex->getOpcode();

		// right always an rexpr
		const EmuVal *right = eval_rexpr(ex->getRHS());

		switch(op){
		case BO_Assign:
		{
			lvalue left = eval_lexpr(ex->getLHS());
			const EmuVal* ans = right->cast_to(left.type);
			delete right;
			left.ptr.block->write(ans, left.ptr.offset);
			return ans;
		}
		case BO_LT:
		case BO_GT:
		case BO_LE:
		case BO_GE:
		case BO_EQ:
		case BO_NE:
		{
			const EmuVal *left = eval_rexpr(ex->getLHS());
			QualType tl = left->obj_type.getCanonicalType();
			QualType tr = right->obj_type.getCanonicalType();
			if(tl != IntType || tr != IntType){
				left->obj_type.dump();
				right->obj_type.dump();
				cant_handle();
			}
			const llvm::APInt* lval = &((const EmuNum<NUM_TYPE_INT>*)left)->val;
			llvm::APInt rval = ((const EmuNum<NUM_TYPE_INT>*)right)->val;
			int ans;
			if(lval->isNegative()){
				if(op == BO_LT)    ans = (lval->slt(rval))?1:0;
				else if(op==BO_GT) ans = (lval->sgt(rval))?1:0;
				else if(op==BO_LE) ans = (lval->sle(rval))?1:0;
				else if(op==BO_GE) ans = (lval->sge(rval))?1:0;
				else if(op==BO_EQ) ans = (lval->eq( rval))?1:0;
				else if(op==BO_NE) ans = (lval->ne( rval))?1:0;
			} else if(rval.isNegative()){
				if(op == BO_LT)    ans = 0;
				else if(op==BO_GT) ans = 1;
				else if(op==BO_LE) ans = 0;
				else if(op==BO_GE) ans = 1;
				else if(op==BO_EQ) ans = 0;
				else if(op==BO_NE) ans = 1;
			} else {
				if(op == BO_LT)    ans = (lval->ult(rval))?1:0;
				else if(op==BO_GT) ans = (lval->ugt(rval))?1:0;
				else if(op==BO_LE) ans = (lval->ule(rval))?1:0;
				else if(op==BO_GE) ans = (lval->uge(rval))?1:0;
				else if(op==BO_EQ) ans = (lval->eq( rval))?1:0;
				else if(op==BO_NE) ans = (lval->ne( rval))?1:0;
			}
			delete left;
			delete right;
			return new EmuNum<NUM_TYPE_INT>(APInt(32, apint_signed_repr(ans), true));
		}
		case BO_AddAssign:
		case BO_SubAssign:
		{
			lvalue left = eval_lexpr(ex->getLHS());
			QualType tl = left.type.getCanonicalType();
			QualType tr = right->obj_type.getCanonicalType();
			if(tl != IntType || tr != IntType){
				left.type.dump();
				right->obj_type.dump();
				cant_handle();
			}
			void* ptr = &((char*)left.ptr.block->data)[left.ptr.offset];
			size_t space = left.ptr.block->size;
			if(space < 4 || space-4 < left.ptr.offset){
				bad_memread();
			}
			const EmuNum<NUM_TYPE_INT> value(ptr);
			const EmuNum<NUM_TYPE_INT>* result;
			if(op == BO_AddAssign) result = value.add((const EmuNum<NUM_TYPE_INT>*)right);
			else                   result = value.sub((const EmuNum<NUM_TYPE_INT>*)right);
			left.ptr.block->write(result, left.ptr.offset);
			delete right;
			return result;
		}
		case BO_Add:
		case BO_Sub:
		case BO_Mul:
		case BO_Div:
		case BO_And:
		case BO_Or:
		{
			const EmuVal* left = eval_rexpr(ex->getLHS());
			if(!right->obj_type->isIntegerType()){
				right->obj_type.dump();
				cant_cast();
			}
			const EmuNumGeneric* trueright = (const EmuNumGeneric*)right;
			const EmuVal* retval;

			QualType tl = left->obj_type;
			// special case: add integer to pointer
			if(tl->isPointerType()){
				int n;
				if(op == BO_Add) n = trueright->val.getSExtValue();
				else if(op == BO_Sub) n = -trueright->val.getSExtValue();
				else err_exit("Undefined op on pointer");
				
				QualType sub = tl->getAs<PointerType>()->getPointeeType();
				int s = getSizeOf(sub);
				const EmuPtr* lp = (const EmuPtr*)left;
				retval = new EmuPtr(mem_ptr(lp->u.block,lp->offset+n*s), tl);
			} else if(tl->isIntegerType()){
				const EmuNumGeneric* trueleft = (const EmuNumGeneric*)left;
				if(op == BO_Add)      retval = trueleft->add(trueright);
				else if(op == BO_Sub) retval = trueleft->sub(trueright);
				else if(op == BO_Mul) retval = trueleft->mul(trueright);
				else if(op == BO_Div) retval = trueleft->div(trueright);
				else if(op == BO_Or) retval = trueleft->_or(trueright);
				else if(op == BO_And)retval = trueleft->_and(trueright);
				else cant_cast();
			} else {
				tl.dump();
				cant_cast();
			}

			delete left;
			delete right;

			return retval;
		}
		case BO_PtrMemD:
		case BO_PtrMemI:
		case BO_Rem:
		case BO_Shl:
		case BO_Shr:
		case BO_LAnd:
		case BO_Xor:
		case BO_LOr:
		case BO_MulAssign:
		case BO_DivAssign:
		case BO_RemAssign:
		case BO_ShlAssign:
		case BO_ShrAssign:
		case BO_AndAssign:
		case BO_XorAssign:
		case BO_OrAssign:
		case BO_Comma:
		default:
			e->dump();
			cant_handle();
		}
	} else if(isa<CastExpr>(e)){
		const CastExpr* expr = (const CastExpr*)e;
		const Expr* sub = expr->getSubExpr();
		switch(expr->getCastKind()){
		case CK_LValueToRValue:
			return from_lvalue(eval_lexpr(sub));
		case CK_NoOp:
			return eval_rexpr(sub);
		case CK_BitCast:
		{
			if(isa<ExplicitCastExpr>(e)){
				const ExplicitCastExpr* expr = (const ExplicitCastExpr*)e;
				return eval_rexpr(sub)->cast_to(expr->getTypeAsWritten());
			}
			// else ImplicitCastExpr
			return eval_rexpr(sub)->cast_to(e->getType());
		}
		case CK_IntegralCast:
		{
			return eval_rexpr(sub)->cast_to(expr->getType());
		}
		case CK_FunctionToPointerDecay:
		{
			lvalue l = eval_lexpr(sub);
			if(!l.type->isFunctionType()){
				e->dump();
				cant_cast();
			}
			return new EmuPtr(l.ptr, sources[curr_source]->getPointerType(l.type));
		}
		case CK_ArrayToPointerDecay:
		{
			lvalue l = eval_lexpr(sub);
			const EmuVal* ans = new EmuPtr(l.ptr, expr->getType());
			return ans;
		}
		case CK_BuiltinFnToFnPtr:
		{
			if(!isa<DeclRefExpr>(sub)){
				err_exit("Don't know how to convert builtin function");
			}
			std::string name = ((const DeclRefExpr*)sub)->getDecl()->getNameAsString();
			const EmuFunc* f = get_external_func(name, sub->getType());
			mem_block* ptr = new mem_block(MEM_TYPE_STATIC, f);
			delete f;
			return new EmuPtr(mem_ptr(ptr,0), expr->getType());
		}
		case CK_NullToPointer:
		{
			return new EmuPtr(mem_ptr(nullptr,0), expr->getType());
		}
		case CK_PointerToIntegral:
		{
			const EmuVal* ptr = eval_rexpr(sub);
			if(!ptr->obj_type->isPointerType()){
				err_exit("Expected pointer");
			}
			const EmuPtr* p = (const EmuPtr*)ptr;
			if(p->status != STATUS_DEFINED) cant_handle();
			uint64_t segment;
			uint64_t offset = p->offset;
			if(p->u.block == nullptr){
				segment = 0;
			} else {
				segment = p->u.block->id;
			}
			delete ptr;
			if((expr->getType()->getAs<BuiltinType>())->isSignedInteger()){
				return new EmuNum<NUM_TYPE_LONGLONG>(APInt(64, (segment << 32) + offset, true));
			} else {
				return new EmuNum<NUM_TYPE_ULONGLONG>(APInt(64, (segment << 32) + offset, false));				
			}
		}
		case CK_VectorSplat:
		case CK_IntegralToBoolean:
		case CK_IntegralToFloating:
		case CK_FloatingToIntegral:
		case CK_FloatingToBoolean:
		case CK_FloatingCast:
		case CK_CPointerToObjCPointerCast:
		case CK_BlockPointerToObjCPointerCast:
		case CK_AnyPointerToBlockPointerCast:
		case CK_ObjCObjectLValueCast:
		case CK_FloatingRealToComplex:
		case CK_FloatingComplexToReal:
		case CK_FloatingComplexToBoolean:
		case CK_FloatingComplexCast:
		case CK_FloatingComplexToIntegralComplex:
		case CK_IntegralRealToComplex:
		case CK_IntegralComplexToReal:
		case CK_IntegralComplexToBoolean:
		case CK_IntegralComplexCast:
		case CK_IntegralComplexToFloatingComplex:
		case CK_ARCProduceObject:
		case CK_ARCConsumeObject:
		case CK_ARCReclaimReturnedObject:
		case CK_ARCExtendBlockObject:
		case CK_AtomicToNonAtomic:
		case CK_NonAtomicToAtomic:
		case CK_CopyAndAutoreleaseBlockObject:
		case CK_ZeroToOCLEvent:
		case CK_AddressSpaceConversion:
		case CK_ReinterpretMemberPointer:
		case CK_UserDefinedConversion:
		case CK_ConstructorConversion:
		case CK_IntegralToPointer:
		case CK_PointerToBoolean:
		case CK_ToVoid:
		default:
			llvm::errs() << "\n\n";
			e->dump();
			cant_cast();
		}
	} else if(isa<CallExpr>(e)){
		const CallExpr* expr = (const CallExpr*)e;
		const Expr* const* args = expr->getArgs();
		const Expr* callee = expr->getCallee();

		llvm::errs() << "DOUG DEBUG: executing the following call:\n";
		callee->dump();

		const EmuVal* f = eval_rexpr(callee);
		if(f->status != STATUS_DEFINED || !f->obj_type->isFunctionPointerType()){
			f->obj_type.dump();
			err_exit("Calling an invalid function");
		}

		const EmuPtr* p = (const EmuPtr*)f;
		if(p->u.block->memtype == MEM_TYPE_EXTERN){
			err_exit("Tried to call an unimplemented function");
		}

		const EmuFunc* func = (const EmuFunc*)from_lvalue(lvalue(p->u.block, ((const PointerType*)p->obj_type.getTypePtr())->getPointeeType(), p->offset));
		uint32_t fid = func->func_id;
		
		const EmuVal* retval;

		add_stack_frame();
		if(fid < NUM_EXTERNAL_FUNCTIONS){
			if(is_lvalue_based_macro(fid)){
				// special handling for va_args stuff
				for(unsigned int i=0; i < expr->getNumArgs(); i++){
					const Expr* arg = args[i];
					while(isa<ImplicitCastExpr>(arg)){
						arg = ((const ImplicitCastExpr*)arg)->getSubExpr();
					}
					if(!isa<DeclRefExpr>(arg)){
						err_exit("Passed non-variable as lvalue to builtin macro");
					}
					std::string name = ((const DeclRefExpr*)arg)->getDecl()->getNameAsString();
					std::unordered_map<std::string,std::deque<std::pair<int,int> > >::const_iterator list = stack_var_map.find(name);
					if(list == stack_var_map.end()){
						err_exit("Can't find appropriate lvalue for macro");
					}
					const auto test = list->second;
					const auto item = test.back();
					const EmuVal* val = new EmuStackPos(item.first, item.second);
					mem_block* storage = new mem_block(MEM_TYPE_STACK, val);
					add_stack_var("", lvalue(storage,val->obj_type,0));
					delete val;
				}
			} else {
				// we are dealing with an external function
				for(unsigned int i=0; i < expr->getNumArgs(); i++){
					const EmuVal* val = eval_rexpr(args[i]);
					mem_block* storage = new mem_block(MEM_TYPE_STACK, val);
					add_stack_var("", lvalue(storage,val->obj_type,0));
					delete val;
				}
			}
			retval = call_external(fid);
		} else {
			const auto it = global_functions.find(fid);
			const FunctionDecl* defn = (const FunctionDecl*)it->second.second;

			for(unsigned int i=0; i < expr->getNumArgs(); i++){
				const EmuVal* val = eval_rexpr(args[i]);
				mem_block* storage = new mem_block(MEM_TYPE_STACK, val);
				std::string name;
				if(i >= defn->getNumParams()){
					name = ""; // relevant for later args of e.g. printf(char*, ...)
				} else {
					name = defn->getParamDecl(i)->getNameAsString();
				}
				llvm::errs() << "DOUG DEBUG: adding stack variable "<<name<<" for arg "<<i<<" of internal function call (numparams="<< defn->getNumParams() <<")\n";
				defn->dump();
				
				add_stack_var(name, lvalue(storage,val->obj_type,0));
				delete val;
			}

			int save = curr_source;
			curr_source = it->second.first;
			llvm::errs() << "DOUG DEBUG: actually executing:\n";
			defn->getBody()->dump();
			retval = exec_stmt(defn->getBody());
			llvm::errs() << "DOUG DEBUG: call returned with retval at "<<((const void*)retval)<<"\n";
			curr_source = save;
		}
		llvm::errs() << "DOUG DEBUG: popping frame leaving call\n";
		pop_stack_frame();
		return retval;
	} else if(isa<UnaryExprOrTypeTraitExpr>(e)){
		const UnaryExprOrTypeTraitExpr* expr = (const UnaryExprOrTypeTraitExpr*)e;
		switch(expr->getKind()){
		case UETT_SizeOf:
		{
			QualType qt = expr->getArgumentType();
			const EmuVal* fake = from_lvalue(lvalue(nullptr, qt, 0));
			uint64_t thesize = (uint64_t)fake->size();
			delete fake;
			return new EmuNum<NUM_TYPE_ULONG>(APInt(32, thesize, false));
		}
		case UETT_AlignOf:
		case UETT_VecStep:
		default:
			e->dump();
			cant_handle();
		}
	} else if(isa<InitListExpr>(e)){
		const InitListExpr* expr = (const InitListExpr*)e;
		unsigned int n = expr->getNumInits();
		QualType qt = expr->getType();
		if(qt->isArrayType()){
			const EmuPtr* array = (const EmuPtr*)from_lvalue(lvalue(nullptr, qt, 0));
			if(array->status != STATUS_DEFINED) cant_handle();
			size_t loc = 0;
			for(unsigned int i = 0; i < n; i++){
				const EmuVal* curr = eval_rexpr(expr->getInit(i));
				array->u.block->write(curr, loc);
				loc += curr->size();
				delete curr;
			}
			return array;
		} else if(qt->isStructureType()){
			unsigned int n = expr->getNumInits();
			const EmuVal** arr = new const EmuVal*[n];
			for(unsigned int i = 0; i < n; i++){
				arr[i] = eval_rexpr(expr->getInit(i));
			}
			return new EmuStruct(STATUS_DEFINED, qt, n, arr);
		}
		cant_handle();
	} else if(isa<ImplicitValueInitExpr>(e)){
		return zero_init(e->getType());
	} else if(isa<ParenExpr>(e)){
		return eval_rexpr(((const ParenExpr*)e)->getSubExpr());
	}
	e->dump();
	cant_handle();
}
Esempio n. 17
0
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
Action::OwningExprResult
Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep,
                                SourceLocation LParenLoc,
                                MultiExprArg exprs,
                                SourceLocation *CommaLocs,
                                SourceLocation RParenLoc) {
  assert(TypeRep && "Missing type!");
  QualType Ty = QualType::getFromOpaquePtr(TypeRep);
  unsigned NumExprs = exprs.size();
  Expr **Exprs = (Expr**)exprs.get();
  SourceLocation TyBeginLoc = TypeRange.getBegin();
  SourceRange FullRange = SourceRange(TyBeginLoc, RParenLoc);

  if (Ty->isDependentType() ||
      CallExpr::hasAnyTypeDependentArguments(Exprs, NumExprs)) {
    exprs.release();
    return Owned(new (Context) CXXTemporaryObjectExpr(0, Ty, TyBeginLoc,
                                                      Exprs, NumExprs,
                                                      RParenLoc));
  }


  // C++ [expr.type.conv]p1:
  // If the expression list is a single expression, the type conversion
  // expression is equivalent (in definedness, and if defined in meaning) to the
  // corresponding cast expression.
  //
  if (NumExprs == 1) {
    if (CheckCastTypes(TypeRange, Ty, Exprs[0]))
      return ExprError();
    exprs.release();
    return Owned(new (Context) CXXFunctionalCastExpr(Ty.getNonReferenceType(),
                                                     Ty, TyBeginLoc, Exprs[0],
                                                     RParenLoc));
  }

  if (const RecordType *RT = Ty->getAsRecordType()) {
    CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());

    if (NumExprs > 1 || Record->hasUserDeclaredConstructor()) {
      CXXConstructorDecl *Constructor
        = PerformInitializationByConstructor(Ty, Exprs, NumExprs,
                                             TypeRange.getBegin(),
                                             SourceRange(TypeRange.getBegin(),
                                                         RParenLoc),
                                             DeclarationName(),
                                             IK_Direct);

      if (!Constructor)
        return ExprError();

      exprs.release();
      return Owned(new (Context) CXXTemporaryObjectExpr(Constructor, Ty,
                                                        TyBeginLoc,  Exprs,
                                                        NumExprs, RParenLoc));
    }

    // Fall through to value-initialize an object of class type that
    // doesn't have a user-declared default constructor.
  }

  // C++ [expr.type.conv]p1:
  // If the expression list specifies more than a single value, the type shall
  // be a class with a suitably declared constructor.
  //
  if (NumExprs > 1)
    return ExprError(Diag(CommaLocs[0],
                          diag::err_builtin_func_cast_more_than_one_arg)
      << FullRange);

  assert(NumExprs == 0 && "Expected 0 expressions");

  // C++ [expr.type.conv]p2:
  // The expression T(), where T is a simple-type-specifier for a non-array
  // complete object type or the (possibly cv-qualified) void type, creates an
  // rvalue of the specified type, which is value-initialized.
  //
  if (Ty->isArrayType())
    return ExprError(Diag(TyBeginLoc,
                          diag::err_value_init_for_array_type) << FullRange);
  if (!Ty->isDependentType() && !Ty->isVoidType() &&
      RequireCompleteType(TyBeginLoc, Ty,
                          diag::err_invalid_incomplete_type_use, FullRange))
    return ExprError();

  if (RequireNonAbstractType(TyBeginLoc, Ty,
                             diag::err_allocation_of_abstract_type))
    return ExprError();
  
  exprs.release();
  return Owned(new (Context) CXXZeroInitValueExpr(Ty, TyBeginLoc, RParenLoc));
}
Esempio n. 18
0
// FIXME: should rewrite according to the cast kind.
SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
  castTy = Context.getCanonicalType(castTy);
  originalTy = Context.getCanonicalType(originalTy);
  if (val.isUnknownOrUndef() || castTy == originalTy)
    return val;

  // For const casts, just propagate the value.
  if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
    if (haveSimilarTypes(Context, Context.getPointerType(castTy),
                                  Context.getPointerType(originalTy)))
      return val;
  
  // Check for casts from pointers to integers.
  if (castTy->isIntegerType() && Loc::isLocType(originalTy))
    return evalCastFromLoc(cast<Loc>(val), castTy);

  // Check for casts from integers to pointers.
  if (Loc::isLocType(castTy) && originalTy->isIntegerType()) {
    if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
      if (const MemRegion *R = LV->getLoc().getAsRegion()) {
        StoreManager &storeMgr = StateMgr.getStoreManager();
        R = storeMgr.castRegion(R, castTy);
        return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
      }
      return LV->getLoc();
    }
    return dispatchCast(val, castTy);
  }

  // Just pass through function and block pointers.
  if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
    assert(Loc::isLocType(castTy));
    return val;
  }

  // Check for casts from array type to another type.
  if (originalTy->isArrayType()) {
    // We will always decay to a pointer.
    val = StateMgr.ArrayToPointer(cast<Loc>(val));

    // Are we casting from an array to a pointer?  If so just pass on
    // the decayed value.
    if (castTy->isPointerType())
      return val;

    // Are we casting from an array to an integer?  If so, cast the decayed
    // pointer value to an integer.
    assert(castTy->isIntegerType());

    // FIXME: Keep these here for now in case we decide soon that we
    // need the original decayed type.
    //    QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
    //    QualType pointerTy = C.getPointerType(elemTy);
    return evalCastFromLoc(cast<Loc>(val), castTy);
  }

  // Check for casts from a region to a specific type.
  if (const MemRegion *R = val.getAsRegion()) {
    // Handle other casts of locations to integers.
    if (castTy->isIntegerType())
      return evalCastFromLoc(loc::MemRegionVal(R), castTy);

    // FIXME: We should handle the case where we strip off view layers to get
    //  to a desugared type.
    if (!Loc::isLocType(castTy)) {
      // FIXME: There can be gross cases where one casts the result of a function
      // (that returns a pointer) to some other value that happens to fit
      // within that pointer value.  We currently have no good way to
      // model such operations.  When this happens, the underlying operation
      // is that the caller is reasoning about bits.  Conceptually we are
      // layering a "view" of a location on top of those bits.  Perhaps
      // we need to be more lazy about mutual possible views, even on an
      // SVal?  This may be necessary for bit-level reasoning as well.
      return UnknownVal();
    }

    // We get a symbolic function pointer for a dereference of a function
    // pointer, but it is of function type. Example:

    //  struct FPRec {
    //    void (*my_func)(int * x);
    //  };
    //
    //  int bar(int x);
    //
    //  int f1_a(struct FPRec* foo) {
    //    int x;
    //    (*foo->my_func)(&x);
    //    return bar(x)+1; // no-warning
    //  }

    assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
           originalTy->isBlockPointerType() || castTy->isReferenceType());

    StoreManager &storeMgr = StateMgr.getStoreManager();

    // Delegate to store manager to get the result of casting a region to a
    // different type.  If the MemRegion* returned is NULL, this expression
    // Evaluates to UnknownVal.
    R = storeMgr.castRegion(R, castTy);
    return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
  }

  return dispatchCast(val, castTy);
}
bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
                                               FieldChainInfo LocalChain) {
  assert(R->getValueType()->isRecordType() &&
         !R->getValueType()->isUnionType() &&
         "This method only checks non-union record objects!");

  const RecordDecl *RD =
      R->getValueType()->getAs<RecordType>()->getDecl()->getDefinition();
  assert(RD && "Referred record has no definition");

  bool ContainsUninitField = false;

  // Are all of this non-union's fields initialized?
  for (const FieldDecl *I : RD->fields()) {

    const auto FieldVal =
        State->getLValue(I, loc::MemRegionVal(R)).castAs<loc::MemRegionVal>();
    const auto *FR = FieldVal.getRegionAs<FieldRegion>();
    QualType T = I->getType();

    // If LocalChain already contains FR, then we encountered a cyclic
    // reference. In this case, region FR is already under checking at an
    // earlier node in the directed tree.
    if (LocalChain.contains(FR))
      return false;

    if (T->isStructureOrClassType()) {
      if (isNonUnionUninit(FR, LocalChain.add(RegularField(FR))))
        ContainsUninitField = true;
      continue;
    }

    if (T->isUnionType()) {
      if (isUnionUninit(FR)) {
        if (addFieldToUninits(LocalChain.add(RegularField(FR))))
          ContainsUninitField = true;
      } else
        IsAnyFieldInitialized = true;
      continue;
    }

    if (T->isArrayType()) {
      IsAnyFieldInitialized = true;
      continue;
    }

    if (T->isAnyPointerType() || T->isReferenceType() || T->isBlockPointerType()) {
      if (isPointerOrReferenceUninit(FR, LocalChain))
        ContainsUninitField = true;
      continue;
    }

    if (isPrimitiveType(T)) {
      SVal V = State->getSVal(FieldVal);

      if (isPrimitiveUninit(V)) {
        if (addFieldToUninits(LocalChain.add(RegularField(FR))))
          ContainsUninitField = true;
      }
      continue;
    }

    llvm_unreachable("All cases are handled!");
  }

  // Checking bases.
  // FIXME: As of now, because of `willObjectBeAnalyzedLater`, objects whose
  // type is a descendant of another type will emit warnings for uninitalized
  // inherited members.
  // This is not the only way to analyze bases of an object -- if we didn't
  // filter them out, and didn't analyze the bases, this checker would run for
  // each base of the object in order of base initailization and in theory would
  // find every uninitalized field. This approach could also make handling
  // diamond inheritances more easily.
  //
  // This rule (that a descendant type's cunstructor is responsible for
  // initializing inherited data members) is not obvious, and should it should
  // be.
  const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
  if (!CXXRD)
    return ContainsUninitField;

  for (const CXXBaseSpecifier &BaseSpec : CXXRD->bases()) {
    const auto *BaseRegion = State->getLValue(BaseSpec, R)
                                 .castAs<loc::MemRegionVal>()
                                 .getRegionAs<TypedValueRegion>();

    if (isNonUnionUninit(BaseRegion, LocalChain))
      ContainsUninitField = true;
  }

  return ContainsUninitField;
}
Esempio n. 20
0
DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D) {
  DSAVarData DVar;

  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
  // in a Construct, C/C++, predetermined, p.1]
  //  Variables appearing in threadprivate directives are threadprivate.
  if (D->getTLSKind() != VarDecl::TLS_None) {
    DVar.CKind = OMPC_threadprivate;
    return DVar;
  }
  if (Stack[0].SharingMap.count(D)) {
    DVar.RefExpr = Stack[0].SharingMap[D].RefExpr;
    DVar.CKind = OMPC_threadprivate;
    return DVar;
  }

  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
  // in a Construct, C/C++, predetermined, p.1]
  // Variables with automatic storage duration that are declared in a scope
  // inside the construct are private.
  OpenMPDirectiveKind Kind = getCurrentDirective();
  if (Kind != OMPD_parallel) {
    if (isOpenMPLocal(D, llvm::next(Stack.rbegin())) && D->isLocalVarDecl() &&
        (D->getStorageClass() == SC_Auto ||
         D->getStorageClass() == SC_None))
      DVar.CKind = OMPC_private;
      return DVar;
  }

  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
  // in a Construct, C/C++, predetermined, p.4]
  //  Static data memebers are shared.
  if (D->isStaticDataMember()) {
    // Variables with const-qualified type having no mutable member may be listed
    // in a firstprivate clause, even if they are static data members.
    DSAVarData DVarTemp = hasDSA(D, OMPC_firstprivate);
    if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
      return DVar;

    DVar.CKind = OMPC_shared;
    return DVar;
  }

  QualType Type = D->getType().getNonReferenceType().getCanonicalType();
  bool IsConstant = Type.isConstant(Actions.getASTContext());
  while (Type->isArrayType()) {
    QualType ElemType = cast<ArrayType>(Type.getTypePtr())->getElementType();
    Type = ElemType.getNonReferenceType().getCanonicalType();
  }
  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
  // in a Construct, C/C++, predetermined, p.6]
  //  Variables with const qualified type having no mutable member are
  //  shared.
  CXXRecordDecl *RD = Actions.getLangOpts().CPlusPlus ?
                                Type->getAsCXXRecordDecl() : 0;
  if (IsConstant &&
      !(Actions.getLangOpts().CPlusPlus && RD && RD->hasMutableFields())) {
    // Variables with const-qualified type having no mutable member may be
    // listed in a firstprivate clause, even if they are static data members.
    DSAVarData DVarTemp = hasDSA(D, OMPC_firstprivate);
    if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
      return DVar;

    DVar.CKind = OMPC_shared;
    return DVar;
  }

  // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
  // in a Construct, C/C++, predetermined, p.7]
  //  Variables with static storage duration that are declared in a scope
  //  inside the construct are shared.
  if (D->isStaticLocal()) {
    DVar.CKind = OMPC_shared;
    return DVar;
  }

  // Explicitly specified attributes and local variables with predetermined
  // attributes.
  if (Stack.back().SharingMap.count(D)) {
    DVar.RefExpr = Stack.back().SharingMap[D].RefExpr;
    DVar.CKind = Stack.back().SharingMap[D].Attributes;
  }

  return DVar;
}
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isMemberFunctionPointerType();
}
Esempio n. 22
0
File: CGDecl.cpp Progetto: aaasz/SHP
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;
  bool IsSimpleConstantInitializer = false;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      
      // If this value is an array or struct, is POD, and if the initializer is
      // a staticly determinable constant, try to optimize it.
      if (D.getInit() && !isByRef &&
          (Ty->isArrayType() || Ty->isRecordType()) &&
          Ty->isPODType() &&
          D.getInit()->isConstantInitializer(getContext())) {
        // If this variable is marked 'const', emit the value as a global.
        if (CGM.getCodeGenOpts().MergeAllConstants &&
            Ty.isConstant(getContext())) {
          EmitStaticBlockVarDecl(D);
          return;
        }
        
        IsSimpleConstantInitializer = true;
      }
      
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      if (isByRef)
        LTy = BuildByRefType(&D);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString());

      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class,
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }

    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");

      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);

      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;

      {
        // Push a cleanup block and restore the stack there.
        DelayedCleanupBlock scope(*this);

        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }

    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");

    // Allocate memory for the array.
    llvm::AllocaInst *VLA = 
      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
    VLA->setAlignment(getContext().getDeclAlignInBytes(&D));

    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");

    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef)
      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
                                    D.getNameAsString());

    bool isVolatile =
      getContext().getCanonicalType(D.getType()).isVolatileQualified();
    
    // If the initializer was a simple constant initializer, we can optimize it
    // in various ways.
    if (IsSimpleConstantInitializer) {
      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
      assert(Init != 0 && "Wasn't a simple constant init?");
      
      llvm::Value *AlignVal = 
        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align);
      const llvm::Type *IntPtr =
        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
      llvm::Value *SizeVal =
        llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty));

      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
      if (Loc->getType() != BP)
        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
      
      // If the initializer is all zeros, codegen with memset.
      if (isa<llvm::ConstantAggregateZero>(Init)) {
        llvm::Value *Zero =
          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
      } else {
        // Otherwise, create a temporary global with the initializer then 
        // memcpy from the global to the alloca.
        std::string Name = GetStaticDeclName(*this, D, ".");
        llvm::GlobalVariable *GV =
          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
                                   llvm::GlobalValue::InternalLinkage,
                                   Init, Name, 0, false, 0);
        GV->setAlignment(Align);

        llvm::Value *SrcPtr = GV;
        if (SrcPtr->getType() != BP)
          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
        
        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
      }
    } else if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
    } else {
      EmitAggExpr(Init, Loc, isVolatile);
    }
  }

  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    Builder.CreateStore(DeclPtr, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
    DtorTy = getContext().getBaseElementType(Array);
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        
        if (const ConstantArrayType *Array = 
              getContext().getAsConstantArrayType(Ty)) {
          {
            DelayedCleanupBlock Scope(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          
            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          }
        } else {
          {
            DelayedCleanupBlock Scope(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);

            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
          }
        }
      }
  }

  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();

    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
    assert(F && "Could not find function!");

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    //
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    {
      DelayedCleanupBlock scope(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    {
      DelayedCleanupBlock scope(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
    // FIXME: Turn this on and audit the codegen
    if (0 && Exceptions) {
      EHCleanupBlock Cleanup(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
  }
}
Esempio n. 23
0
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isObjCObjectType();
}