예제 #1
0
/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
bool
CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
                                          const ASTRecordLayout &Layout) {
  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
       E = RD->bases_end(); I != E; ++I) {
    const CXXRecordDecl *BaseDecl = 
      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());

    // We only want to lay out virtual bases that aren't indirect primary bases
    // of some other base.
    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
      // Only lay out the base once.
      if (!LaidOutVirtualBases.insert(BaseDecl))
        continue;

      CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
      if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
        return false;
    }

    if (!BaseDecl->getNumVBases()) {
      // This base isn't interesting since it doesn't have any virtual bases.
      continue;
    }
    
    if (!LayoutVirtualBases(BaseDecl, Layout))
      return false;
  }
  return true;
}
예제 #2
0
bool
CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
                                             const ASTRecordLayout &Layout) {
  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();

  // If we have a primary base, lay it out first.
  if (PrimaryBase) {
    if (!Layout.isPrimaryBaseVirtual()) {
      if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
        return false;
    } else {
      if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
        return false;
    }

  // Otherwise, add a vtable / vf-table if the layout says to do so.
  } else if (Layout.hasOwnVFPtr()) {
    llvm::Type *FunctionType =
      llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
                              /*isVarArg=*/true);
    llvm::Type *VTableTy = FunctionType->getPointerTo();

    if (getTypeAlignment(VTableTy) > Alignment) {
      // FIXME: Should we allow this to happen in Sema?
      assert(!Packed && "Alignment is wrong even with packed struct!");
      return false;
    }

    assert(NextFieldOffset.isZero() &&
           "VTable pointer must come first!");
    AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
  }

  // Layout the non-virtual bases.
  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
       E = RD->bases_end(); I != E; ++I) {
    if (I->isVirtual())
      continue;

    const CXXRecordDecl *BaseDecl = 
      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());

    // We've already laid out the primary base.
    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
      continue;

    if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
      return false;
  }

  // Add a vb-table pointer if the layout insists.
    if (Layout.hasOwnVBPtr()) {
    CharUnits VBPtrOffset = Layout.getVBPtrOffset();
    llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
    AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
    AppendField(VBPtrOffset, Vbptr);
  }

  return true;
}
void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
                                        const ASTRecordLayout &Layout) {
  // Check if we need to add a vtable pointer.
  if (RD->isDynamicClass() && !Layout.getPrimaryBase()) {
    const llvm::Type *Int8PtrTy = 
      llvm::Type::getInt8PtrTy(Types.getLLVMContext());
    
    assert(NextFieldOffsetInBytes == 0 &&
           "Vtable pointer must come first!");
    AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
  }
}
예제 #4
0
void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
                                    const ASTRecordLayout &layout) {
  // Unions are a special case.
  if (record->isUnion()) {
    for (auto field : record->fields()) {
      if (field->isBitField()) {
        addBitFieldData(field, begin, 0);
      } else {
        addTypedData(field->getType(), begin);
      }
    }
    return;
  }

  // Note that correctness does not rely on us adding things in
  // their actual order of layout; it's just somewhat more efficient
  // for the builder.

  // With that in mind, add "early" C++ data.
  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
  if (cxxRecord) {
    //   - a v-table pointer, if the class adds its own
    if (layout.hasOwnVFPtr()) {
      addTypedData(CGM.Int8PtrTy, begin);
    }

    //   - non-virtual bases
    for (auto &baseSpecifier : cxxRecord->bases()) {
      if (baseSpecifier.isVirtual()) continue;

      auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
      addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
    }

    //   - a vbptr if the class adds its own
    if (layout.hasOwnVBPtr()) {
      addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
    }
  }

  // Add fields.
  for (auto field : record->fields()) {
    auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
    if (field->isBitField()) {
      addBitFieldData(field, begin, fieldOffsetInBits);
    } else {
      addTypedData(field->getType(),
              begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
    }
  }

  // Add "late" C++ data:
  if (cxxRecord) {
    //   - virtual bases
    for (auto &vbaseSpecifier : cxxRecord->vbases()) {
      auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
      addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));      
    }
  }
}
예제 #5
0
bool
CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
                                          const ASTRecordLayout &Layout) {
  if (!RD->getNumVBases())
    return true;

  // The vbases list is uniqued and ordered by a depth-first
  // traversal, which is what we need here.
  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
        E = RD->vbases_end(); I != E; ++I) {

    const CXXRecordDecl *BaseDecl = 
      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());

    CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
    if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
      return false;
  }
  return true;
}
예제 #6
0
/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
                                            unsigned &FirstFieldNo,
                                            RecordDecl::field_iterator &FI,
                                            RecordDecl::field_iterator FE) {
  assert(FI != FE);
  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);

  unsigned CharAlign = Types.getTarget().getCharAlign();
  assert(FirstFieldOffset % CharAlign == 0 &&
         "First field offset is misaligned");
  CharUnits FirstFieldOffsetInBytes
    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

  unsigned StorageAlignment
    = llvm::MinAlign(Alignment.getQuantity(),
                     FirstFieldOffsetInBytes.getQuantity());

  if (FirstFieldOffset < NextFieldOffsetInBits) {
    CharUnits FieldOffsetInCharUnits =
      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

    // Try to resize the last base field.
    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
      llvm_unreachable("We must be able to resize the last base if we need to "
                       "pack bits into it.");

    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
    assert(FirstFieldOffset >= NextFieldOffsetInBits);
  }

  // Append padding if necessary.
  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
                CharUnits::One());

  // Find the last bitfield in a contiguous run of bitfields.
  RecordDecl::field_iterator BFI = FI;
  unsigned LastFieldNo = FirstFieldNo;
  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
  for (RecordDecl::field_iterator FJ = FI;
       (FJ != FE && (*FJ)->isBitField() &&
        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
    ++LastFieldNo;

    // We must use packed structs for packed fields, and also unnamed bit
    // fields since they don't affect the struct alignment.
    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
      return false;
  }
  RecordDecl::field_iterator BFE = llvm::next(FI);
  --LastFieldNo;
  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
  FieldDecl *LastFD = *FI;

  // Find the last bitfield's offset, add its size, and round it up to the
  // character alignment to compute the storage required.
  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
    llvm::RoundUpToAlignment(TotalBits, CharAlign));
  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);

  // Grow the storage to encompass any known padding in the layout when doing
  // so will make the storage a power-of-two. There are two cases when we can
  // do this. The first is when we have a subsequent field and can widen up to
  // its offset. The second is when the data size of the AST record layout is
  // past the end of the current storage. The latter is true when there is tail
  // padding on a struct and no members of a super class can be packed into it.
  //
  // Note that we widen the storage as much as possible here to express the
  // maximum latitude the language provides, and rely on the backend to lower
  // these in conjunction with shifts and masks to narrower operations where
  // beneficial.
  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
  if (BFE != FE)
    // If there are more fields to be laid out, the offset at the end of the
    // bitfield is the offset of the next field in the record.
    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
         "End offset is not past the end of the known storage bits.");
  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
  uint64_t LongBits = Types.getTarget().getLongWidth();
  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
  if (WidenedBits <= SpaceBits) {
    StorageBits = WidenedBits;
    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
  }

  unsigned FieldIndex = FieldTypes.size();
  AppendBytes(StorageBytes);

  // Now walk the bitfields associating them with this field of storage and
  // building up the bitfield specific info.
  unsigned FieldNo = FirstFieldNo;
  for (; BFI != BFE; ++BFI, ++FieldNo) {
    FieldDecl *FD = *BFI;
    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
    Fields[FD] = FieldIndex;
    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
                                             StorageBits, StorageAlignment);
  }
  FirstFieldNo = LastFieldNo;
  return true;
}