예제 #1
0
static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
                                          const FieldDecl *FD,
                                          uint64_t FieldOffset,
                                          uint64_t FieldSize) {
  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
  uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
  uint64_t TypeSizeInBits = TypeSizeInBytes * 8;

  unsigned StartBit = FieldOffset % TypeSizeInBits;
  bool IsSigned = FD->getType()->isSignedIntegerType();

  // The current policy is to always access the bit-field using the source type
  // of the bit-field. With the C bit-field rules, this implies that we always
  // use either one or two accesses, and two accesses can only occur with a
  // packed structure when the bit-field straddles an alignment boundary.
  CGBitFieldInfo::AccessInfo Components[2];

  unsigned LowBits = std::min(FieldSize, TypeSizeInBits - StartBit);
  bool NeedsHighAccess = LowBits != FieldSize;
  unsigned NumComponents = 1 + NeedsHighAccess;

  // FIXME: This access policy is probably wrong on big-endian systems.
  CGBitFieldInfo::AccessInfo &LowAccess = Components[0];
  LowAccess.FieldIndex = 0;
  LowAccess.FieldByteOffset =
    TypeSizeInBytes * ((FieldOffset / 8) / TypeSizeInBytes);
  LowAccess.FieldBitStart = StartBit;
  LowAccess.AccessWidth = TypeSizeInBits;
  // FIXME: This might be wrong!
  LowAccess.AccessAlignment = 0;
  LowAccess.TargetBitOffset = 0;
  LowAccess.TargetBitWidth = LowBits;

  if (NeedsHighAccess) {
    CGBitFieldInfo::AccessInfo &HighAccess = Components[1];
    HighAccess.FieldIndex = 0;
    HighAccess.FieldByteOffset = LowAccess.FieldByteOffset + TypeSizeInBytes;
    HighAccess.FieldBitStart = 0;
    HighAccess.AccessWidth = TypeSizeInBits;
    // FIXME: This might be wrong!
    HighAccess.AccessAlignment = 0;
    HighAccess.TargetBitOffset = LowBits;
    HighAccess.TargetBitWidth = FieldSize - LowBits;
  }

  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
}
CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
                               const FieldDecl *FD,
                               uint64_t FieldOffset,
                               uint64_t FieldSize,
                               uint64_t ContainingTypeSizeInBits,
                               unsigned ContainingTypeAlign) {
  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  CharUnits TypeSizeInBytes =
    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);

  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();

  if (FieldSize > TypeSizeInBits) {
    // We have a wide bit-field. The extra bits are only used for padding, so
    // if we have a bitfield of type T, with size N:
    //
    // T t : N;
    //
    // We can just assume that it's:
    //
    // T t : sizeof(T);
    //
    FieldSize = TypeSizeInBits;
  }

  // in big-endian machines the first fields are in higher bit positions,
  // so revert the offset. The byte offsets are reversed(back) later.
  if (Types.getTargetData().isBigEndian()) {
    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
  }

  // Compute the access components. The policy we use is to start by attempting
  // to access using the width of the bit-field type itself and to always access
  // at aligned indices of that type. If such an access would fail because it
  // extends past the bound of the type, then we reduce size to the next smaller
  // power of two and retry. The current algorithm assumes pow2 sized types,
  // although this is easy to fix.
  //
  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  CGBitFieldInfo::AccessInfo Components[3];
  unsigned NumComponents = 0;
  unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.

  // If requested, widen the initial bit-field access to be register sized. The
  // theory is that this is most likely to allow multiple accesses into the same
  // structure to be coalesced, and that the backend should be smart enough to
  // narrow the store if no coalescing is ever done.
  //
  // The subsequent code will handle align these access to common boundaries and
  // guaranteeing that we do not access past the end of the structure.
  if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
    if (AccessWidth < Types.getTarget().getRegisterWidth())
      AccessWidth = Types.getTarget().getRegisterWidth();
  }

  // Round down from the field offset to find the first access position that is
  // at an aligned offset of the initial access type.
  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);

  // Adjust initial access size to fit within record.
  while (AccessWidth > Types.getTarget().getCharWidth() &&
         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
    AccessWidth >>= 1;
    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  }

  while (AccessedTargetBits < FieldSize) {
    // Check that we can access using a type of this size, without reading off
    // the end of the structure. This can occur with packed structures and
    // -fno-bitfield-type-align, for example.
    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
      // If so, reduce access size to the next smaller power-of-two and retry.
      AccessWidth >>= 1;
      assert(AccessWidth >= Types.getTarget().getCharWidth()
             && "Cannot access under byte size!");
      continue;
    }

    // Otherwise, add an access component.

    // First, compute the bits inside this access which are part of the
    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
    // in the target that we are reading.
    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
    uint64_t AccessBitsInFieldSize =
      std::min(AccessWidth + AccessStart,
               FieldOffset + FieldSize) - AccessBitsInFieldStart;

    assert(NumComponents < 3 && "Unexpected number of components!");
    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
    AI.FieldIndex = 0;
    // FIXME: We still follow the old access pattern of only using the field
    // byte offset. We should switch this once we fix the struct layout to be
    // pretty.

    // on big-endian machines we reverted the bit offset because first fields are
    // in higher bits. But this also reverts the bytes, so fix this here by reverting
    // the byte offset on big-endian machines.
    if (Types.getTargetData().isBigEndian()) {
      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
          ContainingTypeSizeInBits - AccessStart - AccessWidth);
    } else {
      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
    }
    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
    AI.AccessWidth = AccessWidth;
    AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
        llvm::MinAlign(ContainingTypeAlign, AccessStart));
    AI.TargetBitOffset = AccessedTargetBits;
    AI.TargetBitWidth = AccessBitsInFieldSize;

    AccessStart += AccessWidth;
    AccessedTargetBits += AI.TargetBitWidth;
  }