CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
                                   bool Packed)
    : Types(Types), Context(Types.getContext()), D(D),
      RD(dyn_cast<CXXRecordDecl>(D)),
      Layout(Types.getContext().getASTRecordLayout(D)),
      DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
      IsZeroInitializableAsBase(true), Packed(Packed) {}
CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
                                        const FieldDecl *FD,
                                        uint64_t Offset, uint64_t Size,
                                        uint64_t StorageSize,
                                        uint64_t StorageAlignment) {
  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  CharUnits TypeSizeInBytes =
    CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);

  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();

  if (Size > TypeSizeInBits) {
    // We have a wide bit-field. The extra bits are only used for padding, so
    // if we have a bitfield of type T, with size N:
    //
    // T t : N;
    //
    // We can just assume that it's:
    //
    // T t : sizeof(T);
    //
    Size = TypeSizeInBits;
  }

  // Reverse the bit offsets for big endian machines. Because we represent
  // a bitfield as a single large integer load, we can imagine the bits
  // counting from the most-significant-bit instead of the
  // least-significant-bit.
  if (Types.getDataLayout().isBigEndian()) {
    Offset = StorageSize - (Offset + Size);
  }

  return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment);
}
Beispiel #3
0
/// isSafeToConvert - Return true if it is safe to convert the specified record
/// decl to IR and lay it out, false if doing so would cause us to get into a
/// recursive compilation mess.
static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
  // If no structs are being laid out, we can certainly do this one.
  if (CGT.noRecordsBeingLaidOut()) return true;
  
  llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
  return isSafeToConvert(RD, CGT, AlreadyChecked);
}
static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
                                          const FieldDecl *FD,
                                          uint64_t FieldOffset,
                                          uint64_t FieldSize) {
  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
  uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
  uint64_t TypeSizeInBits = TypeSizeInBytes * 8;

  unsigned StartBit = FieldOffset % TypeSizeInBits;
  bool IsSigned = FD->getType()->isSignedIntegerType();

  // The current policy is to always access the bit-field using the source type
  // of the bit-field. With the C bit-field rules, this implies that we always
  // use either one or two accesses, and two accesses can only occur with a
  // packed structure when the bit-field straddles an alignment boundary.
  CGBitFieldInfo::AccessInfo Components[2];

  unsigned LowBits = std::min(FieldSize, TypeSizeInBits - StartBit);
  bool NeedsHighAccess = LowBits != FieldSize;
  unsigned NumComponents = 1 + NeedsHighAccess;

  // FIXME: This access policy is probably wrong on big-endian systems.
  CGBitFieldInfo::AccessInfo &LowAccess = Components[0];
  LowAccess.FieldIndex = 0;
  LowAccess.FieldByteOffset =
    TypeSizeInBytes * ((FieldOffset / 8) / TypeSizeInBytes);
  LowAccess.FieldBitStart = StartBit;
  LowAccess.AccessWidth = TypeSizeInBits;
  // FIXME: This might be wrong!
  LowAccess.AccessAlignment = 0;
  LowAccess.TargetBitOffset = 0;
  LowAccess.TargetBitWidth = LowBits;

  if (NeedsHighAccess) {
    CGBitFieldInfo::AccessInfo &HighAccess = Components[1];
    HighAccess.FieldIndex = 0;
    HighAccess.FieldByteOffset = LowAccess.FieldByteOffset + TypeSizeInBytes;
    HighAccess.FieldBitStart = 0;
    HighAccess.AccessWidth = TypeSizeInBits;
    // FIXME: This might be wrong!
    HighAccess.AccessAlignment = 0;
    HighAccess.TargetBitOffset = LowBits;
    HighAccess.TargetBitWidth = FieldSize - LowBits;
  }

  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
}
Beispiel #5
0
/// isSafeToConvert - Return true if it is safe to convert the specified record
/// decl to IR and lay it out, false if doing so would cause us to get into a
/// recursive compilation mess.
static bool 
isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
  // If we have already checked this type (maybe the same type is used by-value
  // multiple times in multiple structure fields, don't check again.
  if (!AlreadyChecked.insert(RD)) return true;
  
  const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
  
  // If this type is already laid out, converting it is a noop.
  if (CGT.isRecordLayoutComplete(Key)) return true;
  
  // If this type is currently being laid out, we can't recursively compile it.
  if (CGT.isRecordBeingLaidOut(Key))
    return false;
  
  // If this type would require laying out bases that are currently being laid
  // out, don't do it.  This includes virtual base classes which get laid out
  // when a class is translated, even though they aren't embedded by-value into
  // the class.
  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
    for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
         E = CRD->bases_end(); I != E; ++I)
      if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
                           CGT, AlreadyChecked))
        return false;
  }
  
  // If this type would require laying out members that are currently being laid
  // out, don't do it.
  for (RecordDecl::field_iterator I = RD->field_begin(),
       E = RD->field_end(); I != E; ++I)
    if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
      return false;
  
  // If there are no problems, lets do it.
  return true;
}
CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
                                        const FieldDecl *FD,
                                        uint64_t Offset, uint64_t Size,
                                        uint64_t StorageSize,
                                        CharUnits StorageOffset) {
  // This function is vestigial from CGRecordLayoutBuilder days but is still 
  // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
  // when addressed will allow for the removal of this function.
  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  CharUnits TypeSizeInBytes =
    CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);

  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();

  if (Size > TypeSizeInBits) {
    // We have a wide bit-field. The extra bits are only used for padding, so
    // if we have a bitfield of type T, with size N:
    //
    // T t : N;
    //
    // We can just assume that it's:
    //
    // T t : sizeof(T);
    //
    Size = TypeSizeInBits;
  }

  // Reverse the bit offsets for big endian machines. Because we represent
  // a bitfield as a single large integer load, we can imagine the bits
  // counting from the most-significant-bit instead of the
  // least-significant-bit.
  if (Types.getDataLayout().isBigEndian()) {
    Offset = StorageSize - (Offset + Size);
  }

  return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
}
Beispiel #7
0
/// isSafeToConvert - Return true if it is safe to convert this field type,
/// which requires the structure elements contained by-value to all be
/// recursively safe to convert.
static bool
isSafeToConvert(QualType T, CodeGenTypes &CGT,
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
  // Strip off atomic type sugar.
  if (const auto *AT = T->getAs<AtomicType>())
    T = AT->getValueType();

  // If this is a record, check it.
  if (const auto *RT = T->getAs<RecordType>())
    return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);

  // If this is an array, check the elements, which are embedded inline.
  if (const auto *AT = CGT.getContext().getAsArrayType(T))
    return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);

  // Otherwise, there is no concern about transforming this.  We only care about
  // things that are contained by-value in a structure that can have another 
  // structure as a member.
  return true;
}
CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
                               const FieldDecl *FD,
                               uint64_t FieldOffset,
                               uint64_t FieldSize,
                               uint64_t ContainingTypeSizeInBits,
                               unsigned ContainingTypeAlign) {
  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  CharUnits TypeSizeInBytes =
    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);

  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();

  if (FieldSize > TypeSizeInBits) {
    // We have a wide bit-field. The extra bits are only used for padding, so
    // if we have a bitfield of type T, with size N:
    //
    // T t : N;
    //
    // We can just assume that it's:
    //
    // T t : sizeof(T);
    //
    FieldSize = TypeSizeInBits;
  }

  // in big-endian machines the first fields are in higher bit positions,
  // so revert the offset. The byte offsets are reversed(back) later.
  if (Types.getTargetData().isBigEndian()) {
    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
  }

  // Compute the access components. The policy we use is to start by attempting
  // to access using the width of the bit-field type itself and to always access
  // at aligned indices of that type. If such an access would fail because it
  // extends past the bound of the type, then we reduce size to the next smaller
  // power of two and retry. The current algorithm assumes pow2 sized types,
  // although this is easy to fix.
  //
  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  CGBitFieldInfo::AccessInfo Components[3];
  unsigned NumComponents = 0;
  unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.

  // If requested, widen the initial bit-field access to be register sized. The
  // theory is that this is most likely to allow multiple accesses into the same
  // structure to be coalesced, and that the backend should be smart enough to
  // narrow the store if no coalescing is ever done.
  //
  // The subsequent code will handle align these access to common boundaries and
  // guaranteeing that we do not access past the end of the structure.
  if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
    if (AccessWidth < Types.getTarget().getRegisterWidth())
      AccessWidth = Types.getTarget().getRegisterWidth();
  }

  // Round down from the field offset to find the first access position that is
  // at an aligned offset of the initial access type.
  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);

  // Adjust initial access size to fit within record.
  while (AccessWidth > Types.getTarget().getCharWidth() &&
         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
    AccessWidth >>= 1;
    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  }

  while (AccessedTargetBits < FieldSize) {
    // Check that we can access using a type of this size, without reading off
    // the end of the structure. This can occur with packed structures and
    // -fno-bitfield-type-align, for example.
    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
      // If so, reduce access size to the next smaller power-of-two and retry.
      AccessWidth >>= 1;
      assert(AccessWidth >= Types.getTarget().getCharWidth()
             && "Cannot access under byte size!");
      continue;
    }

    // Otherwise, add an access component.

    // First, compute the bits inside this access which are part of the
    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
    // in the target that we are reading.
    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
    uint64_t AccessBitsInFieldSize =
      std::min(AccessWidth + AccessStart,
               FieldOffset + FieldSize) - AccessBitsInFieldStart;

    assert(NumComponents < 3 && "Unexpected number of components!");
    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
    AI.FieldIndex = 0;
    // FIXME: We still follow the old access pattern of only using the field
    // byte offset. We should switch this once we fix the struct layout to be
    // pretty.

    // on big-endian machines we reverted the bit offset because first fields are
    // in higher bits. But this also reverts the bytes, so fix this here by reverting
    // the byte offset on big-endian machines.
    if (Types.getTargetData().isBigEndian()) {
      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
          ContainingTypeSizeInBits - AccessStart - AccessWidth);
    } else {
      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
    }
    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
    AI.AccessWidth = AccessWidth;
    AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
        llvm::MinAlign(ContainingTypeAlign, AccessStart));
    AI.TargetBitOffset = AccessedTargetBits;
    AI.TargetBitWidth = AccessBitsInFieldSize;

    AccessStart += AccessWidth;
    AccessedTargetBits += AI.TargetBitWidth;
  }