Exemple #1
0
static bool isCallbackArg(SVal V, QualType T) {
  // If the parameter is 0, it's harmless.
  if (V.isZeroConstant())
    return false;

  // If a parameter is a block or a callback, assume it can modify pointer.
  if (T->isBlockPointerType() ||
      T->isFunctionPointerType() ||
      T->isObjCSelType())
    return true;

  // Check if a callback is passed inside a struct (for both, struct passed by
  // reference and by value). Dig just one level into the struct for now.

  if (isa<PointerType>(T) || isa<ReferenceType>(T))
    T = T->getPointeeType();

  if (const RecordType *RT = T->getAsStructureType()) {
    const RecordDecl *RD = RT->getDecl();
    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
         I != E; ++I) {
      QualType FieldT = I->getType();
      if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
        return true;
    }
  }

  return false;
}
      bool Find(const TypedValueRegion *R) {
        QualType T = R->getValueType();
        if (const RecordType *RT = T->getAsStructureType()) {
          const RecordDecl *RD = RT->getDecl()->getDefinition();
          assert(RD && "Referred record has no definition");
          for (RecordDecl::field_iterator I =
               RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
            const FieldRegion *FR = MrMgr.getFieldRegion(&*I, R);
            FieldChain.push_back(&*I);
            T = I->getType();
            if (T->getAsStructureType()) {
              if (Find(FR))
                return true;
            }
            else {
              const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
              if (V.isUndef())
                return true;
            }
            FieldChain.pop_back();
          }
        }

        return false;
      }
Exemple #3
0
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
  const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
  assert(isTBAAPathStruct(QTy));

  if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
    return N;

  if (const RecordType *TTy = QTy->getAs<RecordType>()) {
    const RecordDecl *RD = TTy->getDecl()->getDefinition();

    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
    SmallVector <std::pair<uint64_t, llvm::MDNode*>, 4> Fields;
    unsigned idx = 0;
    const FieldDecl *LastFD = 0;
    bool IsMsStruct = RD->isMsStruct(Context);
    for (RecordDecl::field_iterator i = RD->field_begin(),
         e = RD->field_end(); i != e; ++i, ++idx) {
      if (IsMsStruct) {
        // Zero-length bitfields following non-bitfield members are ignored.
        if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
          --idx;
          continue;
        }
        LastFD = *i;
      }

      QualType FieldQTy = i->getType();
      llvm::MDNode *FieldNode;
      if (isTBAAPathStruct(FieldQTy))
        FieldNode = getTBAAStructTypeInfo(FieldQTy);
      else
        FieldNode = getTBAAInfo(FieldQTy);
      if (!FieldNode)
        return StructTypeMetadataCache[Ty] = NULL;
      Fields.push_back(std::make_pair(
          Layout.getFieldOffset(idx) / Context.getCharWidth(), FieldNode));
    }

    // TODO: This is using the RTTI name. Is there a better way to get
    // a unique string for a type?
    SmallString<256> OutName;
    llvm::raw_svector_ostream Out(OutName);
    MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
    Out.flush();
    // Create the struct type node with a vector of pairs (offset, type).
    return StructTypeMetadataCache[Ty] =
      MDHelper.createTBAAStructTypeNode(OutName, Fields);
  }

  return StructMetadataCache[Ty] = NULL;
}
Exemple #4
0
bool
CodeGenTBAA::CollectFields(uint64_t BaseOffset,
                           QualType QTy,
                           SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
                             Fields,
                           bool MayAlias) {
  /* Things not handled yet include: C++ base classes, bitfields, */

  if (const RecordType *TTy = QTy->getAs<RecordType>()) {
    const RecordDecl *RD = TTy->getDecl()->getDefinition();
    if (RD->hasFlexibleArrayMember())
      return false;

    // TODO: Handle C++ base classes.
    if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
      if (Decl->bases_begin() != Decl->bases_end())
        return false;

    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);

    unsigned idx = 0;
    const FieldDecl *LastFD = 0;
    bool IsMsStruct = RD->isMsStruct(Context);
    for (RecordDecl::field_iterator i = RD->field_begin(),
         e = RD->field_end(); i != e; ++i, ++idx) {
      if (IsMsStruct) {
        // Zero-length bitfields following non-bitfield members are ignored.
        if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
          --idx;
          continue;
        }
        LastFD = *i;
      }
      uint64_t Offset = BaseOffset +
                        Layout.getFieldOffset(idx) / Context.getCharWidth();
      QualType FieldQTy = i->getType();
      if (!CollectFields(Offset, FieldQTy, Fields,
                         MayAlias || TypeHasMayAlias(FieldQTy)))
        return false;
    }
    return true;
  }

  /* Otherwise, treat whatever it is as a field. */
  uint64_t Offset = BaseOffset;
  uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
  llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
  llvm::MDNode *TBAATag = CodeGenOpts.StructPathTBAA ?
                          getTBAAScalarTagInfo(TBAAInfo) : TBAAInfo;
  Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
  return true;
}
Exemple #5
0
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
/// non-zero bytes that will be stored when outputting the initializer for the
/// specified initializer expression.
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
  E = E->IgnoreParens();

  // 0 and 0.0 won't require any non-zero stores!
  if (isSimpleZero(E, CGF)) return CharUnits::Zero();

  // If this is an initlist expr, sum up the size of sizes of the (present)
  // elements.  If this is something weird, assume the whole thing is non-zero.
  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
  if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
    return CGF.getContext().getTypeSizeInChars(E->getType());
  
  // InitListExprs for structs have to be handled carefully.  If there are
  // reference members, we need to consider the size of the reference, not the
  // referencee.  InitListExprs for unions and arrays can't have references.
  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
    if (!RT->isUnionType()) {
      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
      CharUnits NumNonZeroBytes = CharUnits::Zero();
      
      unsigned ILEElement = 0;
      for (RecordDecl::field_iterator Field = SD->field_begin(),
           FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
        // We're done once we hit the flexible array member or run out of
        // InitListExpr elements.
        if (Field->getType()->isIncompleteArrayType() ||
            ILEElement == ILE->getNumInits())
          break;
        if (Field->isUnnamedBitfield())
          continue;

        const Expr *E = ILE->getInit(ILEElement++);
        
        // Reference values are always non-null and have the width of a pointer.
        if (Field->getType()->isReferenceType())
          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
              CGF.getContext().getTargetInfo().getPointerWidth(0));
        else
          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
      }
      
      return NumNonZeroBytes;
    }
  }
  
  
  CharUnits NumNonZeroBytes = CharUnits::Zero();
  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
  return NumNonZeroBytes;
}
Exemple #6
0
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
  const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
  assert(isTBAAPathStruct(QTy));

  if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
    return N;

  if (const RecordType *TTy = QTy->getAs<RecordType>()) {
    const RecordDecl *RD = TTy->getDecl()->getDefinition();

    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
    SmallVector <std::pair<uint64_t, llvm::MDNode*>, 4> Fields;
    // To reduce the size of MDNode for a given struct type, we only output
    // once for all the fields with the same scalar types.
    // Offsets for scalar fields in the type DAG are not used.
    llvm::SmallSet <llvm::MDNode*, 4> ScalarFieldTypes;
    unsigned idx = 0;
    for (RecordDecl::field_iterator i = RD->field_begin(),
         e = RD->field_end(); i != e; ++i, ++idx) {
      QualType FieldQTy = i->getType();
      llvm::MDNode *FieldNode;
      if (isTBAAPathStruct(FieldQTy))
        FieldNode = getTBAAStructTypeInfo(FieldQTy);
      else {
        FieldNode = getTBAAInfo(FieldQTy);
        // Ignore this field if the type already exists.
        if (ScalarFieldTypes.count(FieldNode))
          continue;
        ScalarFieldTypes.insert(FieldNode);
       }
      if (!FieldNode)
        return StructTypeMetadataCache[Ty] = NULL;
      Fields.push_back(std::make_pair(
          Layout.getFieldOffset(idx) / Context.getCharWidth(), FieldNode));
    }

    // TODO: This is using the RTTI name. Is there a better way to get
    // a unique string for a type?
    SmallString<256> OutName;
    llvm::raw_svector_ostream Out(OutName);
    MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
    Out.flush();
    // Create the struct type node with a vector of pairs (offset, type).
    return StructTypeMetadataCache[Ty] =
      MDHelper.createTBAAStructTypeNode(OutName, Fields);
  }

  return StructMetadataCache[Ty] = NULL;
}
void CGRecordLowering::accumulateFields() {
  for (RecordDecl::field_iterator Field = D->field_begin(),
                                  FieldEnd = D->field_end();
    Field != FieldEnd;)
    if (Field->isBitField()) {
      RecordDecl::field_iterator Start = Field;
      // Iterate to gather the list of bitfields.
      for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
      accumulateBitFields(Start, Field);
    } else {
      Members.push_back(MemberInfo(
          bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
          getStorageType(*Field), *Field));
      ++Field;
    }
}
Exemple #8
0
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
  const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
  assert(isTBAAPathStruct(QTy));

  if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
    return N;

  if (const RecordType *TTy = QTy->getAs<RecordType>()) {
    const RecordDecl *RD = TTy->getDecl()->getDefinition();

    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
    SmallVector <std::pair<llvm::MDNode*, uint64_t>, 4> Fields;
    unsigned idx = 0;
    for (RecordDecl::field_iterator i = RD->field_begin(),
         e = RD->field_end(); i != e; ++i, ++idx) {
      QualType FieldQTy = i->getType();
      llvm::MDNode *FieldNode;
      if (isTBAAPathStruct(FieldQTy))
        FieldNode = getTBAAStructTypeInfo(FieldQTy);
      else
        FieldNode = getTBAAInfo(FieldQTy);
      if (!FieldNode)
        return StructTypeMetadataCache[Ty] = NULL;
      Fields.push_back(std::make_pair(
          FieldNode, Layout.getFieldOffset(idx) / Context.getCharWidth()));
    }

    // TODO: This is using the RTTI name. Is there a better way to get
    // a unique string for a type?
    SmallString<256> OutName;
    if (Features.CPlusPlus) {
      // Don't use mangleCXXRTTIName for C code.
      llvm::raw_svector_ostream Out(OutName);
      MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
      Out.flush();
    } else {
      OutName = RD->getName();
    }
    // Create the struct type node with a vector of pairs (offset, type).
    return StructTypeMetadataCache[Ty] =
      MDHelper.createTBAAStructTypeNode(OutName, Fields);
  }

  return StructMetadataCache[Ty] = NULL;
}
Exemple #9
0
  //--------------------------------------------------------- 
  void VisitInitListExpr(InitListExpr *RHS) 
  {
    unsigned elementNo = 0;
    for (RecordDecl::field_iterator i = RD->field_begin(), 
         e = RD->field_end(); i != e; ++i)
    {
      // how to handle?
      if (i->isUnnamedBitfield())
        continue;
    
      compoundStmts.push_back(Assign_(
        MemberPoint_(Paren_(Clone_(LHS)), *i), 
        elementNo < RHS->getNumInits() ?        
          Clone_ (RHS->getInit(elementNo)) : 
          Int_(0)));

      elementNo++;
    }
  }
Exemple #10
0
bool
CodeGenTBAA::CollectFields(uint64_t BaseOffset,
                           QualType QTy,
                           SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
                             Fields,
                           bool MayAlias) {
  /* Things not handled yet include: C++ base classes, bitfields, */

  if (const RecordType *TTy = QTy->getAs<RecordType>()) {
    const RecordDecl *RD = TTy->getDecl()->getDefinition();
    if (RD->hasFlexibleArrayMember())
      return false;

    // TODO: Handle C++ base classes.
    if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
      if (Decl->bases_begin() != Decl->bases_end())
        return false;

    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);

    unsigned idx = 0;
    for (RecordDecl::field_iterator i = RD->field_begin(),
         e = RD->field_end(); i != e; ++i, ++idx) {
      uint64_t Offset = BaseOffset +
                        Layout.getFieldOffset(idx) / Context.getCharWidth();
      QualType FieldQTy = i->getType();
      if (!CollectFields(Offset, FieldQTy, Fields,
                         MayAlias || TypeHasMayAlias(FieldQTy)))
        return false;
    }
    return true;
  }

  /* Otherwise, treat whatever it is as a field. */
  uint64_t Offset = BaseOffset;
  uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
  llvm::MDNode *TBAAType = MayAlias ? getChar() : getTypeInfo(QTy);
  llvm::MDNode *TBAATag = getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
  Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
  return true;
}
Exemple #11
0
    bool GetFields( RecordDecl * rd, Obj * entries) {
     
        //check the fields of this struct, if any one of them is not understandable, then this struct becomes 'opaque'
        //that is, we insert the type, and link it to its llvm type, so it can be used in terra code
        //but none of its fields are exposed (since we don't understand the layout)
        bool opaque = false;
        for(RecordDecl::field_iterator it = rd->field_begin(), end = rd->field_end(); it != end; ++it) {
            if(it->isBitField() || it->isAnonymousStructOrUnion() || !it->getDeclName()) {
                opaque = true;
                continue;
            }
            DeclarationName declname = it->getDeclName();
            std::string declstr = declname.getAsString();
            QualType FT = it->getType();
            Obj fobj;
            if(!GetType(FT,&fobj)) {
                opaque = true;
                continue;
            }
            lua_newtable(L);
            fobj.push();
            lua_setfield(L,-2,"type");
            lua_pushstring(L,declstr.c_str());
            lua_setfield(L,-2,"field");
            entries->addentry();
        }
        return !opaque;

    }
Exemple #12
0
/// isSafeToConvert - Return true if it is safe to convert the specified record
/// decl to IR and lay it out, false if doing so would cause us to get into a
/// recursive compilation mess.
static bool 
isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
  // If we have already checked this type (maybe the same type is used by-value
  // multiple times in multiple structure fields, don't check again.
  if (!AlreadyChecked.insert(RD)) return true;
  
  const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
  
  // If this type is already laid out, converting it is a noop.
  if (CGT.isRecordLayoutComplete(Key)) return true;
  
  // If this type is currently being laid out, we can't recursively compile it.
  if (CGT.isRecordBeingLaidOut(Key))
    return false;
  
  // If this type would require laying out bases that are currently being laid
  // out, don't do it.  This includes virtual base classes which get laid out
  // when a class is translated, even though they aren't embedded by-value into
  // the class.
  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
    for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
         E = CRD->bases_end(); I != E; ++I)
      if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
                           CGT, AlreadyChecked))
        return false;
  }
  
  // If this type would require laying out members that are currently being laid
  // out, don't do it.
  for (RecordDecl::field_iterator I = RD->field_begin(),
       E = RD->field_end(); I != E; ++I)
    if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
      return false;
  
  // If there are no problems, lets do it.
  return true;
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");

  const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);

  llvm::Type *unionType = 0;
  CharUnits unionSize = CharUnits::Zero();
  CharUnits unionAlign = CharUnits::Zero();

  bool hasOnlyZeroSizedBitFields = true;
  bool checkedFirstFieldZeroInit = false;

  unsigned fieldNo = 0;
  for (RecordDecl::field_iterator field = D->field_begin(),
       fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
    assert(layout.getFieldOffset(fieldNo) == 0 &&
          "Union field offset did not start at the beginning of record!");
    llvm::Type *fieldType = LayoutUnionField(*field, layout);

    if (!fieldType)
      continue;

    if (field->getDeclName() && !checkedFirstFieldZeroInit) {
      CheckZeroInitializable(field->getType());
      checkedFirstFieldZeroInit = true;
    }

    hasOnlyZeroSizedBitFields = false;

    CharUnits fieldAlign = CharUnits::fromQuantity(
                          Types.getDataLayout().getABITypeAlignment(fieldType));
    CharUnits fieldSize = CharUnits::fromQuantity(
                             Types.getDataLayout().getTypeAllocSize(fieldType));

    if (fieldAlign < unionAlign)
      continue;

    if (fieldAlign > unionAlign || fieldSize > unionSize) {
      unionType = fieldType;
      unionAlign = fieldAlign;
      unionSize = fieldSize;
    }
  }

  // Now add our field.
  if (unionType) {
    AppendField(CharUnits::Zero(), unionType);

    if (getTypeAlignment(unionType) > layout.getAlignment()) {
      // We need a packed struct.
      Packed = true;
      unionAlign = CharUnits::One();
    }
  }
  if (unionAlign.isZero()) {
    (void)hasOnlyZeroSizedBitFields;
    assert(hasOnlyZeroSizedBitFields &&
           "0-align record did not have all zero-sized bit-fields!");
    unionAlign = CharUnits::One();
  }

  // Append tail padding.
  CharUnits recordSize = layout.getSize();
  if (recordSize > unionSize)
    AppendPadding(recordSize, unionAlign);
}
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                      RecordDecl::field_iterator FieldEnd) {
  // Run stores the first element of the current run of bitfields.  FieldEnd is
  // used as a special value to note that we don't have a current run.  A
  // bitfield run is a contiguous collection of bitfields that can be stored in
  // the same storage block.  Zero-sized bitfields and bitfields that would
  // cross an alignment boundary break a run and start a new one.
  RecordDecl::field_iterator Run = FieldEnd;
  // Tail is the offset of the first bit off the end of the current run.  It's
  // used to determine if the ASTRecordLayout is treating these two bitfields as
  // contiguous.  StartBitOffset is offset of the beginning of the Run.
  uint64_t StartBitOffset, Tail = 0;
  if (isDiscreteBitFieldABI()) {
    for (; Field != FieldEnd; ++Field) {
      uint64_t BitOffset = getFieldBitOffset(*Field);
      // Zero-width bitfields end runs.
      if (Field->isZeroLengthBitField(Context)) {
        Run = FieldEnd;
        continue;
      }
      llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
      // If we don't have a run yet, or don't live within the previous run's
      // allocated storage then we allocate some storage and start a new run.
      if (Run == FieldEnd || BitOffset >= Tail) {
        Run = Field;
        StartBitOffset = BitOffset;
        Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
        // Add the storage member to the record.  This must be added to the
        // record before the bitfield members so that it gets laid out before
        // the bitfields it contains get laid out.
        Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
      }
      // Bitfields get the offset of their storage but come afterward and remain
      // there after a stable sort.
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Field));
    }
    return;
  }

  // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
  // has legal integer width, and its bitfield offset is naturally aligned, it
  // is better to make the bitfield a separate storage component so as it can be
  // accessed directly with lower cost.
  auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
                                      uint64_t StartBitOffset) {
    if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
      return false;
    if (!DataLayout.isLegalInteger(OffsetInRecord))
      return false;
    // Make sure StartBitOffset is natually aligned if it is treated as an
    // IType integer.
     if (StartBitOffset %
            Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
        0)
      return false;
    return true;
  };

  // The start field is better as a single field run.
  bool StartFieldAsSingleRun = false;
  for (;;) {
    // Check to see if we need to start a new run.
    if (Run == FieldEnd) {
      // If we're out of fields, return.
      if (Field == FieldEnd)
        break;
      // Any non-zero-length bitfield can start a new run.
      if (!Field->isZeroLengthBitField(Context)) {
        Run = Field;
        StartBitOffset = getFieldBitOffset(*Field);
        Tail = StartBitOffset + Field->getBitWidthValue(Context);
        StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
                                                         StartBitOffset); 
      }
      ++Field;
      continue;
    }

    // If the start field of a new run is better as a single run, or
    // if current field (or consecutive fields) is better as a single run, or
    // if current field has zero width bitfield and either
    // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
    // true, or
    // if the offset of current field is inconsistent with the offset of
    // previous field plus its offset,
    // skip the block below and go ahead to emit the storage.
    // Otherwise, try to add bitfields to the run.
    if (!StartFieldAsSingleRun && Field != FieldEnd &&
        !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
        (!Field->isZeroLengthBitField(Context) ||
         (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
          !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
        Tail == getFieldBitOffset(*Field)) {
      Tail += Field->getBitWidthValue(Context);
      ++Field;
      continue;
    }

    // We've hit a break-point in the run and need to emit a storage field.
    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
    // Add the storage member to the record and set the bitfield info for all of
    // the bitfields in the run.  Bitfields get the offset of their storage but
    // come afterward and remain there after a stable sort.
    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    for (; Run != Field; ++Run)
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Run));
    Run = FieldEnd;
    StartFieldAsSingleRun = false;
  }
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");

  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);

  const llvm::Type *Ty = 0;
  uint64_t Size = 0;
  unsigned Align = 0;

  bool HasOnlyZeroSizedBitFields = true;
  
  unsigned FieldNo = 0;
  for (RecordDecl::field_iterator Field = D->field_begin(),
       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
    assert(Layout.getFieldOffset(FieldNo) == 0 &&
          "Union field offset did not start at the beginning of record!");

    if (Field->isBitField()) {
      uint64_t FieldSize =
        Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();

      // Ignore zero sized bit fields.
      if (FieldSize == 0)
        continue;

      // Add the bit field info.
      Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
    } else
      Types.addFieldInfo(*Field, 0);

    HasOnlyZeroSizedBitFields = false;
    
    const llvm::Type *FieldTy =
      Types.ConvertTypeForMemRecursive(Field->getType());
    unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
    uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);

    if (FieldAlign < Align)
      continue;

    if (FieldAlign > Align || FieldSize > Size) {
      Ty = FieldTy;
      Align = FieldAlign;
      Size = FieldSize;
    }
  }

  // Now add our field.
  if (Ty) {
    AppendField(0, Ty);

    if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
      // We need a packed struct.
      Packed = true;
      Align = 1;
    }
  }
  if (!Align) {
    assert(HasOnlyZeroSizedBitFields &&
           "0-align record did not have all zero-sized bit-fields!");
    Align = 1;
  }
  
  // Append tail padding.
  if (Layout.getSize() / 8 > Size)
    AppendPadding(Layout.getSize() / 8, Align);
}
Exemple #16
0
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                      RecordDecl::field_iterator FieldEnd) {
  // Run stores the first element of the current run of bitfields.  FieldEnd is
  // used as a special value to note that we don't have a current run.  A
  // bitfield run is a contiguous collection of bitfields that can be stored in
  // the same storage block.  Zero-sized bitfields and bitfields that would
  // cross an alignment boundary break a run and start a new one.
  RecordDecl::field_iterator Run = FieldEnd;
  // Tail is the offset of the first bit off the end of the current run.  It's
  // used to determine if the ASTRecordLayout is treating these two bitfields as
  // contiguous.  StartBitOffset is offset of the beginning of the Run.
  uint64_t StartBitOffset, Tail = 0;
  if (useMSABI()) {
    for (; Field != FieldEnd; ++Field) {
      uint64_t BitOffset = getFieldBitOffset(*Field);
      // Zero-width bitfields end runs.
      if (Field->getBitWidthValue(Context) == 0) {
        Run = FieldEnd;
        continue;
      }
      llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
      // If we don't have a run yet, or don't live within the previous run's
      // allocated storage then we allocate some storage and start a new run.
      if (Run == FieldEnd || BitOffset >= Tail) {
        Run = Field;
        StartBitOffset = BitOffset;
        Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
        // Add the storage member to the record.  This must be added to the
        // record before the bitfield members so that it gets laid out before
        // the bitfields it contains get laid out.
        Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
      }
      // Bitfields get the offset of their storage but come afterward and remain
      // there after a stable sort.
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Field));
    }
    return;
  }
  for (;;) {
    // Check to see if we need to start a new run.
    if (Run == FieldEnd) {
      // If we're out of fields, return.
      if (Field == FieldEnd)
        break;
      // Any non-zero-length bitfield can start a new run.
      if (Field->getBitWidthValue(Context) != 0) {
        Run = Field;
        StartBitOffset = getFieldBitOffset(*Field);
        Tail = StartBitOffset + Field->getBitWidthValue(Context);
      }
      ++Field;
      continue;
    }
    // Add bitfields to the run as long as they qualify.
    if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
        Tail == getFieldBitOffset(*Field)) {
      Tail += Field->getBitWidthValue(Context);
      ++Field;
      continue;
    }
    // We've hit a break-point in the run and need to emit a storage field.
    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
    // Add the storage member to the record and set the bitfield info for all of
    // the bitfields in the run.  Bitfields get the offset of their storage but
    // come afterward and remain there after a stable sort.
    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    for (; Run != Field; ++Run)
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Run));
    Run = FieldEnd;
  }
}
Exemple #17
0
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
  // (Length of globals? Chunks of zeroed-out space?).
  //
  // If we can, prefer a copy from a global; this is a lot less code for long
  // globals, and it's easier for the current optimizers to analyze.
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
    llvm::GlobalVariable* GV =
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
                             llvm::GlobalValue::InternalLinkage, C, "");
    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
    return;
  }
#endif
  if (E->hadArrayRangeDesignator()) {
    CGF.ErrorUnsupported(E, "GNU array range designator extension");
  }

  // Handle initialization of an array.
  if (E->getType()->isArrayType()) {
    const llvm::PointerType *APType =
      cast<llvm::PointerType>(DestPtr->getType());
    const llvm::ArrayType *AType =
      cast<llvm::ArrayType>(APType->getElementType());

    uint64_t NumInitElements = E->getNumInits();

    if (E->getNumInits() > 0) {
      QualType T1 = E->getType();
      QualType T2 = E->getInit(0)->getType();
      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
        EmitAggLoadOfLValue(E->getInit(0));
        return;
      }
    }

    uint64_t NumArrayElements = AType->getNumElements();
    QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
    ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();

    // FIXME: were we intentionally ignoring address spaces and GC attributes?

    for (uint64_t i = 0; i != NumArrayElements; ++i) {
      llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
      LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
      if (i < NumInitElements)
        EmitInitializationToLValue(E->getInit(i), LV, ElementType);

      else
        EmitNullInitializationToLValue(LV, ElementType);
    }
    return;
  }

  assert(E->getType()->isRecordType() && "Only support structs/unions here!");

  // Do struct initialization; this code just sets each individual member
  // to the approprate value.  This makes bitfield support automatic;
  // the disadvantage is that the generated code is more difficult for
  // the optimizer, especially with bitfields.
  unsigned NumInitElements = E->getNumInits();
  RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
  
  // If we're initializing the whole aggregate, just do it in place.
  // FIXME: This is a hack around an AST bug (PR6537).
  if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
    EmitInitializationToLValue(E->getInit(0),
                               CGF.MakeAddrLValue(DestPtr, E->getType()),
                               E->getType());
    return;
  }
  
  
  if (E->getType()->isUnionType()) {
    // Only initialize one field of a union. The field itself is
    // specified by the initializer list.
    if (!E->getInitializedFieldInUnion()) {
      // Empty union; we have nothing to do.

#ifndef NDEBUG
      // Make sure that it's really an empty and not a failure of
      // semantic analysis.
      for (RecordDecl::field_iterator Field = SD->field_begin(),
                                   FieldEnd = SD->field_end();
           Field != FieldEnd; ++Field)
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
      return;
    }

    // FIXME: volatility
    FieldDecl *Field = E->getInitializedFieldInUnion();
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);

    if (NumInitElements) {
      // Store the initializer into the field
      EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
    } else {
      // Default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }

    return;
  }

  // Here we iterate over the fields; this makes it simpler to both
  // default-initialize fields and skip over unnamed fields.
  unsigned CurInitVal = 0;
  for (RecordDecl::field_iterator Field = SD->field_begin(),
                               FieldEnd = SD->field_end();
       Field != FieldEnd; ++Field) {
    // We're done once we hit the flexible array member
    if (Field->getType()->isIncompleteArrayType())
      break;

    if (Field->isUnnamedBitfield())
      continue;

    // FIXME: volatility
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
    // We never generate write-barries for initialized fields.
    FieldLoc.setNonGC(true);
    if (CurInitVal < NumInitElements) {
      // Store the initializer into the field.
      EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
                                 Field->getType());
    } else {
      // We're out of initalizers; default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }
  }
}
Exemple #18
0
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
  // (Length of globals? Chunks of zeroed-out space?).
  //
  // If we can, prefer a copy from a global; this is a lot less code for long
  // globals, and it's easier for the current optimizers to analyze.
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
    llvm::GlobalVariable* GV =
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
                             llvm::GlobalValue::InternalLinkage, C, "");
    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
    return;
  }
#endif
  if (E->hadArrayRangeDesignator())
    CGF.ErrorUnsupported(E, "GNU array range designator extension");

  llvm::Value *DestPtr = Dest.getAddr();

  // Handle initialization of an array.
  if (E->getType()->isArrayType()) {
    llvm::PointerType *APType =
      cast<llvm::PointerType>(DestPtr->getType());
    llvm::ArrayType *AType =
      cast<llvm::ArrayType>(APType->getElementType());

    uint64_t NumInitElements = E->getNumInits();

    if (E->getNumInits() > 0) {
      QualType T1 = E->getType();
      QualType T2 = E->getInit(0)->getType();
      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
        EmitAggLoadOfLValue(E->getInit(0));
        return;
      }
    }

    uint64_t NumArrayElements = AType->getNumElements();
    assert(NumInitElements <= NumArrayElements);

    QualType elementType = E->getType().getCanonicalType();
    elementType = CGF.getContext().getQualifiedType(
                    cast<ArrayType>(elementType)->getElementType(),
                    elementType.getQualifiers() + Dest.getQualifiers());

    // DestPtr is an array*.  Construct an elementType* by drilling
    // down a level.
    llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
    llvm::Value *indices[] = { zero, zero };
    llvm::Value *begin =
      Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");

    // Exception safety requires us to destroy all the
    // already-constructed members if an initializer throws.
    // For that, we'll need an EH cleanup.
    QualType::DestructionKind dtorKind = elementType.isDestructedType();
    llvm::AllocaInst *endOfInit = 0;
    EHScopeStack::stable_iterator cleanup;
    llvm::Instruction *cleanupDominator = 0;
    if (CGF.needsEHCleanup(dtorKind)) {
      // In principle we could tell the cleanup where we are more
      // directly, but the control flow can get so varied here that it
      // would actually be quite complex.  Therefore we go through an
      // alloca.
      endOfInit = CGF.CreateTempAlloca(begin->getType(),
                                       "arrayinit.endOfInit");
      cleanupDominator = Builder.CreateStore(begin, endOfInit);
      CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
                                           CGF.getDestroyer(dtorKind));
      cleanup = CGF.EHStack.stable_begin();

    // Otherwise, remember that we didn't need a cleanup.
    } else {
      dtorKind = QualType::DK_none;
    }

    llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);

    // The 'current element to initialize'.  The invariants on this
    // variable are complicated.  Essentially, after each iteration of
    // the loop, it points to the last initialized element, except
    // that it points to the beginning of the array before any
    // elements have been initialized.
    llvm::Value *element = begin;

    // Emit the explicit initializers.
    for (uint64_t i = 0; i != NumInitElements; ++i) {
      // Advance to the next element.
      if (i > 0) {
        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");

        // Tell the cleanup that it needs to destroy up to this
        // element.  TODO: some of these stores can be trivially
        // observed to be unnecessary.
        if (endOfInit) Builder.CreateStore(element, endOfInit);
      }

      LValue elementLV = CGF.MakeAddrLValue(element, elementType);
      EmitInitializationToLValue(E->getInit(i), elementLV);
    }

    // Check whether there's a non-trivial array-fill expression.
    // Note that this will be a CXXConstructExpr even if the element
    // type is an array (or array of array, etc.) of class type.
    Expr *filler = E->getArrayFiller();
    bool hasTrivialFiller = true;
    if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
      assert(cons->getConstructor()->isDefaultConstructor());
      hasTrivialFiller = cons->getConstructor()->isTrivial();
    }

    // Any remaining elements need to be zero-initialized, possibly
    // using the filler expression.  We can skip this if the we're
    // emitting to zeroed memory.
    if (NumInitElements != NumArrayElements &&
        !(Dest.isZeroed() && hasTrivialFiller &&
          CGF.getTypes().isZeroInitializable(elementType))) {

      // Use an actual loop.  This is basically
      //   do { *array++ = filler; } while (array != end);

      // Advance to the start of the rest of the array.
      if (NumInitElements) {
        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
        if (endOfInit) Builder.CreateStore(element, endOfInit);
      }

      // Compute the end of the array.
      llvm::Value *end = Builder.CreateInBoundsGEP(begin,
                        llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
                                                   "arrayinit.end");

      llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
      llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");

      // Jump into the body.
      CGF.EmitBlock(bodyBB);
      llvm::PHINode *currentElement =
        Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
      currentElement->addIncoming(element, entryBB);

      // Emit the actual filler expression.
      LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
      if (filler)
        EmitInitializationToLValue(filler, elementLV);
      else
        EmitNullInitializationToLValue(elementLV);

      // Move on to the next element.
      llvm::Value *nextElement =
        Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");

      // Tell the EH cleanup that we finished with the last element.
      if (endOfInit) Builder.CreateStore(nextElement, endOfInit);

      // Leave the loop if we're done.
      llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
                                               "arrayinit.done");
      llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
      Builder.CreateCondBr(done, endBB, bodyBB);
      currentElement->addIncoming(nextElement, Builder.GetInsertBlock());

      CGF.EmitBlock(endBB);
    }

    // Leave the partial-array cleanup if we entered one.
    if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);

    return;
  }

  assert(E->getType()->isRecordType() && "Only support structs/unions here!");

  // Do struct initialization; this code just sets each individual member
  // to the approprate value.  This makes bitfield support automatic;
  // the disadvantage is that the generated code is more difficult for
  // the optimizer, especially with bitfields.
  unsigned NumInitElements = E->getNumInits();
  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
  
  if (record->isUnion()) {
    // Only initialize one field of a union. The field itself is
    // specified by the initializer list.
    if (!E->getInitializedFieldInUnion()) {
      // Empty union; we have nothing to do.

#ifndef NDEBUG
      // Make sure that it's really an empty and not a failure of
      // semantic analysis.
      for (RecordDecl::field_iterator Field = record->field_begin(),
                                   FieldEnd = record->field_end();
           Field != FieldEnd; ++Field)
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
      return;
    }

    // FIXME: volatility
    FieldDecl *Field = E->getInitializedFieldInUnion();

    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
    if (NumInitElements) {
      // Store the initializer into the field
      EmitInitializationToLValue(E->getInit(0), FieldLoc);
    } else {
      // Default-initialize to null.
      EmitNullInitializationToLValue(FieldLoc);
    }

    return;
  }

  // We'll need to enter cleanup scopes in case any of the member
  // initializers throw an exception.
  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
  llvm::Instruction *cleanupDominator = 0;

  // Here we iterate over the fields; this makes it simpler to both
  // default-initialize fields and skip over unnamed fields.
  unsigned curInitIndex = 0;
  for (RecordDecl::field_iterator field = record->field_begin(),
                               fieldEnd = record->field_end();
       field != fieldEnd; ++field) {
    // We're done once we hit the flexible array member.
    if (field->getType()->isIncompleteArrayType())
      break;

    // Always skip anonymous bitfields.
    if (field->isUnnamedBitfield())
      continue;

    // We're done if we reach the end of the explicit initializers, we
    // have a zeroed object, and the rest of the fields are
    // zero-initializable.
    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
        CGF.getTypes().isZeroInitializable(E->getType()))
      break;
    
    // FIXME: volatility
    LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
    // We never generate write-barries for initialized fields.
    LV.setNonGC(true);
    
    if (curInitIndex < NumInitElements) {
      // Store the initializer into the field.
      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
    } else {
      // We're out of initalizers; default-initialize to null
      EmitNullInitializationToLValue(LV);
    }

    // Push a destructor if necessary.
    // FIXME: if we have an array of structures, all explicitly
    // initialized, we can end up pushing a linear number of cleanups.
    bool pushedCleanup = false;
    if (QualType::DestructionKind dtorKind
          = field->getType().isDestructedType()) {
      assert(LV.isSimple());
      if (CGF.needsEHCleanup(dtorKind)) {
        if (!cleanupDominator)
          cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder

        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
                        CGF.getDestroyer(dtorKind), false);
        cleanups.push_back(CGF.EHStack.stable_begin());
        pushedCleanup = true;
      }
    }
    
    // If the GEP didn't get used because of a dead zero init or something
    // else, clean it up for -O0 builds and general tidiness.
    if (!pushedCleanup && LV.isSimple()) 
      if (llvm::GetElementPtrInst *GEP =
            dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
        if (GEP->use_empty())
          GEP->eraseFromParent();
  }

  // Deactivate all the partial cleanups in reverse order, which
  // generally means popping them.
  for (unsigned i = cleanups.size(); i != 0; --i)
    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);

  // Destroy the placeholder if we made one.
  if (cleanupDominator)
    cleanupDominator->eraseFromParent();
}
void FindBadConstructsConsumer::CheckCtorDtorWeight(
    SourceLocation record_location,
    CXXRecordDecl* record) {
  // We don't handle anonymous structs. If this record doesn't have a
  // name, it's of the form:
  //
  // struct {
  //   ...
  // } name_;
  if (record->getIdentifier() == NULL)
    return;

  // Count the number of templated base classes as a feature of whether the
  // destructor can be inlined.
  int templated_base_classes = 0;
  for (CXXRecordDecl::base_class_const_iterator it = record->bases_begin();
       it != record->bases_end();
       ++it) {
    if (it->getTypeSourceInfo()->getTypeLoc().getTypeLocClass() ==
        TypeLoc::TemplateSpecialization) {
      ++templated_base_classes;
    }
  }

  // Count the number of trivial and non-trivial member variables.
  int trivial_member = 0;
  int non_trivial_member = 0;
  int templated_non_trivial_member = 0;
  for (RecordDecl::field_iterator it = record->field_begin();
       it != record->field_end();
       ++it) {
    CountType(it->getType().getTypePtr(),
              &trivial_member,
              &non_trivial_member,
              &templated_non_trivial_member);
  }

  // Check to see if we need to ban inlined/synthesized constructors. Note
  // that the cutoffs here are kind of arbitrary. Scores over 10 break.
  int dtor_score = 0;
  // Deriving from a templated base class shouldn't be enough to trigger
  // the ctor warning, but if you do *anything* else, it should.
  //
  // TODO(erg): This is motivated by templated base classes that don't have
  // any data members. Somehow detect when templated base classes have data
  // members and treat them differently.
  dtor_score += templated_base_classes * 9;
  // Instantiating a template is an insta-hit.
  dtor_score += templated_non_trivial_member * 10;
  // The fourth normal class member should trigger the warning.
  dtor_score += non_trivial_member * 3;

  int ctor_score = dtor_score;
  // You should be able to have 9 ints before we warn you.
  ctor_score += trivial_member;

  if (ctor_score >= 10) {
    if (!record->hasUserDeclaredConstructor()) {
      emitWarning(record_location,
                  "Complex class/struct needs an explicit out-of-line "
                  "constructor.");
    } else {
      // Iterate across all the constructors in this file and yell if we
      // find one that tries to be inline.
      for (CXXRecordDecl::ctor_iterator it = record->ctor_begin();
           it != record->ctor_end();
           ++it) {
        if (it->hasInlineBody()) {
          if (it->isCopyConstructor() &&
              !record->hasUserDeclaredCopyConstructor()) {
            emitWarning(record_location,
                        "Complex class/struct needs an explicit out-of-line "
                        "copy constructor.");
          } else {
            emitWarning(it->getInnerLocStart(),
                        "Complex constructor has an inlined body.");
          }
        }
      }
    }
  }

  // The destructor side is equivalent except that we don't check for
  // trivial members; 20 ints don't need a destructor.
  if (dtor_score >= 10 && !record->hasTrivialDestructor()) {
    if (!record->hasUserDeclaredDestructor()) {
      emitWarning(record_location,
                  "Complex class/struct needs an explicit out-of-line "
                  "destructor.");
    } else if (CXXDestructorDecl* dtor = record->getDestructor()) {
      if (dtor->hasInlineBody()) {
        emitWarning(dtor->getInnerLocStart(),
                    "Complex destructor has an inline body.");
      }
    }
  }
}
Exemple #20
0
void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
  switch (getKind()) {
  case APValue::Uninitialized:
    Out << "<uninitialized>";
    return;
  case APValue::Int:
    if (Ty->isBooleanType())
      Out << (getInt().getBoolValue() ? "true" : "false");
    else
      Out << getInt();
    return;
  case APValue::Float:
    Out << GetApproxValue(getFloat());
    return;
  case APValue::Vector: {
    Out << '{';
    QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
    getVectorElt(0).printPretty(Out, Ctx, ElemTy);
    for (unsigned i = 1; i != getVectorLength(); ++i) {
      Out << ", ";
      getVectorElt(i).printPretty(Out, Ctx, ElemTy);
    }
    Out << '}';
    return;
  }
  case APValue::ComplexInt:
    Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
    return;
  case APValue::ComplexFloat:
    Out << GetApproxValue(getComplexFloatReal()) << "+"
        << GetApproxValue(getComplexFloatImag()) << "i";
    return;
  case APValue::LValue: {
    LValueBase Base = getLValueBase();
    if (!Base) {
      Out << "0";
      return;
    }

    bool IsReference = Ty->isReferenceType();
    QualType InnerTy
      = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
    if (InnerTy.isNull())
      InnerTy = Ty;

    if (!hasLValuePath()) {
      // No lvalue path: just print the offset.
      CharUnits O = getLValueOffset();
      CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
      if (!O.isZero()) {
        if (IsReference)
          Out << "*(";
        if (O % S) {
          Out << "(char*)";
          S = CharUnits::One();
        }
        Out << '&';
      } else if (!IsReference)
        Out << '&';

      if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
        Out << *VD;
      else
        Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy());
      if (!O.isZero()) {
        Out << " + " << (O / S);
        if (IsReference)
          Out << ')';
      }
      return;
    }

    // We have an lvalue path. Print it out nicely.
    if (!IsReference)
      Out << '&';
    else if (isLValueOnePastTheEnd())
      Out << "*(&";

    QualType ElemTy;
    if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
      Out << *VD;
      ElemTy = VD->getType();
    } else {
      const Expr *E = Base.get<const Expr*>();
      E->printPretty(Out, 0, Ctx.getPrintingPolicy());
      ElemTy = E->getType();
    }

    ArrayRef<LValuePathEntry> Path = getLValuePath();
    const CXXRecordDecl *CastToBase = 0;
    for (unsigned I = 0, N = Path.size(); I != N; ++I) {
      if (ElemTy->getAs<RecordType>()) {
        // The lvalue refers to a class type, so the next path entry is a base
        // or member.
        const Decl *BaseOrMember =
        BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
        if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
          CastToBase = RD;
          ElemTy = Ctx.getRecordType(RD);
        } else {
          const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
          Out << ".";
          if (CastToBase)
            Out << *CastToBase << "::";
          Out << *VD;
          ElemTy = VD->getType();
        }
      } else {
        // The lvalue must refer to an array.
        Out << '[' << Path[I].ArrayIndex << ']';
        ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
      }
    }

    // Handle formatting of one-past-the-end lvalues.
    if (isLValueOnePastTheEnd()) {
      // FIXME: If CastToBase is non-0, we should prefix the output with
      // "(CastToBase*)".
      Out << " + 1";
      if (IsReference)
        Out << ')';
    }
    return;
  }
  case APValue::Array: {
    const ArrayType *AT = Ctx.getAsArrayType(Ty);
    QualType ElemTy = AT->getElementType();
    Out << '{';
    if (unsigned N = getArrayInitializedElts()) {
      getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
      for (unsigned I = 1; I != N; ++I) {
        Out << ", ";
        if (I == 10) {
          // Avoid printing out the entire contents of large arrays.
          Out << "...";
          break;
        }
        getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
      }
    }
    Out << '}';
    return;
  }
  case APValue::Struct: {
    Out << '{';
    const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
    bool First = true;
    if (unsigned N = getStructNumBases()) {
      const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
      CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
      for (unsigned I = 0; I != N; ++I, ++BI) {
        assert(BI != CD->bases_end());
        if (!First)
          Out << ", ";
        getStructBase(I).printPretty(Out, Ctx, BI->getType());
        First = false;
      }
    }
    for (RecordDecl::field_iterator FI = RD->field_begin();
         FI != RD->field_end(); ++FI) {
      if (!First)
        Out << ", ";
      if (FI->isUnnamedBitfield()) continue;
      getStructField(FI->getFieldIndex()).
        printPretty(Out, Ctx, FI->getType());
      First = false;
    }
    Out << '}';
    return;
  }
  case APValue::Union:
    Out << '{';
    if (const FieldDecl *FD = getUnionField()) {
      Out << "." << *FD << " = ";
      getUnionValue().printPretty(Out, Ctx, FD->getType());
    }
    Out << '}';
    return;
  case APValue::MemberPointer:
    // FIXME: This is not enough to unambiguously identify the member in a
    // multiple-inheritance scenario.
    if (const ValueDecl *VD = getMemberPointerDecl()) {
      Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
      return;
    }
    Out << "0";
    return;
  case APValue::AddrLabelDiff:
    Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
    Out << " - ";
    Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
    return;
  }
  llvm_unreachable("Unknown APValue kind!");
}