bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  assert(!Alignment.isZero() && "Did not set alignment!");

  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);

  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
  if (RD)
    if (!LayoutNonVirtualBases(RD, Layout))
      return false;

  unsigned FieldNo = 0;
  
  for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
       FI != FE; ++FI, ++FieldNo) {
    FieldDecl *FD = *FI;

    // If this field is a bitfield, layout all of the consecutive
    // non-zero-length bitfields and the last zero-length bitfield; these will
    // all share storage.
    if (FD->isBitField()) {
      // If all we have is a zero-width bitfield, skip it.
      if (FD->getBitWidthValue(Types.getContext()) == 0)
        continue;

      // Layout this range of bitfields.
      if (!LayoutBitfields(Layout, FieldNo, FI, FE)) {
        assert(!Packed &&
               "Could not layout bitfields even with a packed LLVM struct!");
        return false;
      }
      assert(FI != FE && "Advanced past the last bitfield");
      continue;
    }

    if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) {
      assert(!Packed &&
             "Could not layout fields even with a packed LLVM struct!");
      return false;
    }
  }

  if (RD) {
    // We've laid out the non-virtual bases and the fields, now compute the
    // non-virtual base field types.
    if (!ComputeNonVirtualBaseType(RD)) {
      assert(!Packed && "Could not layout even with a packed LLVM struct!");
      return false;
    }

    // Lay out the virtual bases.  The MS ABI uses a different
    // algorithm here due to the lack of primary virtual bases.
    if (Types.getTarget().getCXXABI().hasPrimaryVBases()) {
      RD->getIndirectPrimaryBases(IndirectPrimaryBases);
      if (Layout.isPrimaryBaseVirtual())
        IndirectPrimaryBases.insert(Layout.getPrimaryBase());

      if (!LayoutVirtualBases(RD, Layout))
        return false;
    } else {
      if (!MSLayoutVirtualBases(RD, Layout))
        return false;
    }
  }
  
  // Append tail padding if necessary.
  AppendTailPadding(Layout.getSize());

  return true;
}
/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
                                            unsigned &FirstFieldNo,
                                            RecordDecl::field_iterator &FI,
                                            RecordDecl::field_iterator FE) {
  assert(FI != FE);
  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);

  unsigned CharAlign = Types.getTarget().getCharAlign();
  assert(FirstFieldOffset % CharAlign == 0 &&
         "First field offset is misaligned");
  CharUnits FirstFieldOffsetInBytes
    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

  unsigned StorageAlignment
    = llvm::MinAlign(Alignment.getQuantity(),
                     FirstFieldOffsetInBytes.getQuantity());

  if (FirstFieldOffset < NextFieldOffsetInBits) {
    CharUnits FieldOffsetInCharUnits =
      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

    // Try to resize the last base field.
    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
      llvm_unreachable("We must be able to resize the last base if we need to "
                       "pack bits into it.");

    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
    assert(FirstFieldOffset >= NextFieldOffsetInBits);
  }

  // Append padding if necessary.
  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
                CharUnits::One());

  // Find the last bitfield in a contiguous run of bitfields.
  RecordDecl::field_iterator BFI = FI;
  unsigned LastFieldNo = FirstFieldNo;
  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
  for (RecordDecl::field_iterator FJ = FI;
       (FJ != FE && (*FJ)->isBitField() &&
        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
    ++LastFieldNo;

    // We must use packed structs for packed fields, and also unnamed bit
    // fields since they don't affect the struct alignment.
    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
      return false;
  }
  RecordDecl::field_iterator BFE = llvm::next(FI);
  --LastFieldNo;
  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
  FieldDecl *LastFD = *FI;

  // Find the last bitfield's offset, add its size, and round it up to the
  // character alignment to compute the storage required.
  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
    llvm::RoundUpToAlignment(TotalBits, CharAlign));
  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);

  // Grow the storage to encompass any known padding in the layout when doing
  // so will make the storage a power-of-two. There are two cases when we can
  // do this. The first is when we have a subsequent field and can widen up to
  // its offset. The second is when the data size of the AST record layout is
  // past the end of the current storage. The latter is true when there is tail
  // padding on a struct and no members of a super class can be packed into it.
  //
  // Note that we widen the storage as much as possible here to express the
  // maximum latitude the language provides, and rely on the backend to lower
  // these in conjunction with shifts and masks to narrower operations where
  // beneficial.
  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
  if (BFE != FE)
    // If there are more fields to be laid out, the offset at the end of the
    // bitfield is the offset of the next field in the record.
    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
         "End offset is not past the end of the known storage bits.");
  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
  uint64_t LongBits = Types.getTarget().getLongWidth();
  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
  if (WidenedBits <= SpaceBits) {
    StorageBits = WidenedBits;
    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
  }

  unsigned FieldIndex = FieldTypes.size();
  AppendBytes(StorageBytes);

  // Now walk the bitfields associating them with this field of storage and
  // building up the bitfield specific info.
  unsigned FieldNo = FirstFieldNo;
  for (; BFI != BFE; ++BFI, ++FieldNo) {
    FieldDecl *FD = *BFI;
    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
    Fields[FD] = FieldIndex;
    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
                                             StorageBits, StorageAlignment);
  }
  FirstFieldNo = LastFieldNo;
  return true;
}