void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
                                    const ASTRecordLayout &layout) {
  // Unions are a special case.
  if (record->isUnion()) {
    for (auto field : record->fields()) {
      if (field->isBitField()) {
        addBitFieldData(field, begin, 0);
      } else {
        addTypedData(field->getType(), begin);
      }
    }
    return;
  }

  // Note that correctness does not rely on us adding things in
  // their actual order of layout; it's just somewhat more efficient
  // for the builder.

  // With that in mind, add "early" C++ data.
  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
  if (cxxRecord) {
    //   - a v-table pointer, if the class adds its own
    if (layout.hasOwnVFPtr()) {
      addTypedData(CGM.Int8PtrTy, begin);
    }

    //   - non-virtual bases
    for (auto &baseSpecifier : cxxRecord->bases()) {
      if (baseSpecifier.isVirtual()) continue;

      auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
      addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
    }

    //   - a vbptr if the class adds its own
    if (layout.hasOwnVBPtr()) {
      addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
    }
  }

  // Add fields.
  for (auto field : record->fields()) {
    auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
    if (field->isBitField()) {
      addBitFieldData(field, begin, fieldOffsetInBits);
    } else {
      addTypedData(field->getType(),
              begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
    }
  }

  // Add "late" C++ data:
  if (cxxRecord) {
    //   - virtual bases
    for (auto &vbaseSpecifier : cxxRecord->vbases()) {
      auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
      addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));      
    }
  }
}
/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
                                            unsigned &FirstFieldNo,
                                            RecordDecl::field_iterator &FI,
                                            RecordDecl::field_iterator FE) {
  assert(FI != FE);
  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);

  unsigned CharAlign = Types.getTarget().getCharAlign();
  assert(FirstFieldOffset % CharAlign == 0 &&
         "First field offset is misaligned");
  CharUnits FirstFieldOffsetInBytes
    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

  unsigned StorageAlignment
    = llvm::MinAlign(Alignment.getQuantity(),
                     FirstFieldOffsetInBytes.getQuantity());

  if (FirstFieldOffset < NextFieldOffsetInBits) {
    CharUnits FieldOffsetInCharUnits =
      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

    // Try to resize the last base field.
    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
      llvm_unreachable("We must be able to resize the last base if we need to "
                       "pack bits into it.");

    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
    assert(FirstFieldOffset >= NextFieldOffsetInBits);
  }

  // Append padding if necessary.
  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
                CharUnits::One());

  // Find the last bitfield in a contiguous run of bitfields.
  RecordDecl::field_iterator BFI = FI;
  unsigned LastFieldNo = FirstFieldNo;
  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
  for (RecordDecl::field_iterator FJ = FI;
       (FJ != FE && (*FJ)->isBitField() &&
        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
    ++LastFieldNo;

    // We must use packed structs for packed fields, and also unnamed bit
    // fields since they don't affect the struct alignment.
    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
      return false;
  }
  RecordDecl::field_iterator BFE = llvm::next(FI);
  --LastFieldNo;
  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
  FieldDecl *LastFD = *FI;

  // Find the last bitfield's offset, add its size, and round it up to the
  // character alignment to compute the storage required.
  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
    llvm::RoundUpToAlignment(TotalBits, CharAlign));
  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);

  // Grow the storage to encompass any known padding in the layout when doing
  // so will make the storage a power-of-two. There are two cases when we can
  // do this. The first is when we have a subsequent field and can widen up to
  // its offset. The second is when the data size of the AST record layout is
  // past the end of the current storage. The latter is true when there is tail
  // padding on a struct and no members of a super class can be packed into it.
  //
  // Note that we widen the storage as much as possible here to express the
  // maximum latitude the language provides, and rely on the backend to lower
  // these in conjunction with shifts and masks to narrower operations where
  // beneficial.
  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
  if (BFE != FE)
    // If there are more fields to be laid out, the offset at the end of the
    // bitfield is the offset of the next field in the record.
    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
         "End offset is not past the end of the known storage bits.");
  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
  uint64_t LongBits = Types.getTarget().getLongWidth();
  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
  if (WidenedBits <= SpaceBits) {
    StorageBits = WidenedBits;
    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
  }

  unsigned FieldIndex = FieldTypes.size();
  AppendBytes(StorageBytes);

  // Now walk the bitfields associating them with this field of storage and
  // building up the bitfield specific info.
  unsigned FieldNo = FirstFieldNo;
  for (; BFI != BFE; ++BFI, ++FieldNo) {
    FieldDecl *FD = *BFI;
    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
    Fields[FD] = FieldIndex;
    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
                                             StorageBits, StorageAlignment);
  }
  FirstFieldNo = LastFieldNo;
  return true;
}