void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                      RecordDecl::field_iterator FieldEnd) {
  // Run stores the first element of the current run of bitfields.  FieldEnd is
  // used as a special value to note that we don't have a current run.  A
  // bitfield run is a contiguous collection of bitfields that can be stored in
  // the same storage block.  Zero-sized bitfields and bitfields that would
  // cross an alignment boundary break a run and start a new one.
  RecordDecl::field_iterator Run = FieldEnd;
  // Tail is the offset of the first bit off the end of the current run.  It's
  // used to determine if the ASTRecordLayout is treating these two bitfields as
  // contiguous.  StartBitOffset is offset of the beginning of the Run.
  uint64_t StartBitOffset, Tail = 0;
  if (isDiscreteBitFieldABI()) {
    for (; Field != FieldEnd; ++Field) {
      uint64_t BitOffset = getFieldBitOffset(*Field);
      // Zero-width bitfields end runs.
      if (Field->isZeroLengthBitField(Context)) {
        Run = FieldEnd;
        continue;
      }
      llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
      // If we don't have a run yet, or don't live within the previous run's
      // allocated storage then we allocate some storage and start a new run.
      if (Run == FieldEnd || BitOffset >= Tail) {
        Run = Field;
        StartBitOffset = BitOffset;
        Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
        // Add the storage member to the record.  This must be added to the
        // record before the bitfield members so that it gets laid out before
        // the bitfields it contains get laid out.
        Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
      }
      // Bitfields get the offset of their storage but come afterward and remain
      // there after a stable sort.
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Field));
    }
    return;
  }

  // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
  // has legal integer width, and its bitfield offset is naturally aligned, it
  // is better to make the bitfield a separate storage component so as it can be
  // accessed directly with lower cost.
  auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
                                      uint64_t StartBitOffset) {
    if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
      return false;
    if (!DataLayout.isLegalInteger(OffsetInRecord))
      return false;
    // Make sure StartBitOffset is natually aligned if it is treated as an
    // IType integer.
     if (StartBitOffset %
            Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
        0)
      return false;
    return true;
  };

  // The start field is better as a single field run.
  bool StartFieldAsSingleRun = false;
  for (;;) {
    // Check to see if we need to start a new run.
    if (Run == FieldEnd) {
      // If we're out of fields, return.
      if (Field == FieldEnd)
        break;
      // Any non-zero-length bitfield can start a new run.
      if (!Field->isZeroLengthBitField(Context)) {
        Run = Field;
        StartBitOffset = getFieldBitOffset(*Field);
        Tail = StartBitOffset + Field->getBitWidthValue(Context);
        StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
                                                         StartBitOffset); 
      }
      ++Field;
      continue;
    }

    // If the start field of a new run is better as a single run, or
    // if current field (or consecutive fields) is better as a single run, or
    // if current field has zero width bitfield and either
    // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
    // true, or
    // if the offset of current field is inconsistent with the offset of
    // previous field plus its offset,
    // skip the block below and go ahead to emit the storage.
    // Otherwise, try to add bitfields to the run.
    if (!StartFieldAsSingleRun && Field != FieldEnd &&
        !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
        (!Field->isZeroLengthBitField(Context) ||
         (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
          !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
        Tail == getFieldBitOffset(*Field)) {
      Tail += Field->getBitWidthValue(Context);
      ++Field;
      continue;
    }

    // We've hit a break-point in the run and need to emit a storage field.
    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
    // Add the storage member to the record and set the bitfield info for all of
    // the bitfields in the run.  Bitfields get the offset of their storage but
    // come afterward and remain there after a stable sort.
    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    for (; Run != Field; ++Run)
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Run));
    Run = FieldEnd;
    StartFieldAsSingleRun = false;
  }
}
Ejemplo n.º 2
0
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
                                      RecordDecl::field_iterator FieldEnd) {
  // Run stores the first element of the current run of bitfields.  FieldEnd is
  // used as a special value to note that we don't have a current run.  A
  // bitfield run is a contiguous collection of bitfields that can be stored in
  // the same storage block.  Zero-sized bitfields and bitfields that would
  // cross an alignment boundary break a run and start a new one.
  RecordDecl::field_iterator Run = FieldEnd;
  // Tail is the offset of the first bit off the end of the current run.  It's
  // used to determine if the ASTRecordLayout is treating these two bitfields as
  // contiguous.  StartBitOffset is offset of the beginning of the Run.
  uint64_t StartBitOffset, Tail = 0;
  if (useMSABI()) {
    for (; Field != FieldEnd; ++Field) {
      uint64_t BitOffset = getFieldBitOffset(*Field);
      // Zero-width bitfields end runs.
      if (Field->getBitWidthValue(Context) == 0) {
        Run = FieldEnd;
        continue;
      }
      llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
      // If we don't have a run yet, or don't live within the previous run's
      // allocated storage then we allocate some storage and start a new run.
      if (Run == FieldEnd || BitOffset >= Tail) {
        Run = Field;
        StartBitOffset = BitOffset;
        Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
        // Add the storage member to the record.  This must be added to the
        // record before the bitfield members so that it gets laid out before
        // the bitfields it contains get laid out.
        Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
      }
      // Bitfields get the offset of their storage but come afterward and remain
      // there after a stable sort.
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Field));
    }
    return;
  }
  for (;;) {
    // Check to see if we need to start a new run.
    if (Run == FieldEnd) {
      // If we're out of fields, return.
      if (Field == FieldEnd)
        break;
      // Any non-zero-length bitfield can start a new run.
      if (Field->getBitWidthValue(Context) != 0) {
        Run = Field;
        StartBitOffset = getFieldBitOffset(*Field);
        Tail = StartBitOffset + Field->getBitWidthValue(Context);
      }
      ++Field;
      continue;
    }
    // Add bitfields to the run as long as they qualify.
    if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
        Tail == getFieldBitOffset(*Field)) {
      Tail += Field->getBitWidthValue(Context);
      ++Field;
      continue;
    }
    // We've hit a break-point in the run and need to emit a storage field.
    llvm::Type *Type = getIntNType(Tail - StartBitOffset);
    // Add the storage member to the record and set the bitfield info for all of
    // the bitfields in the run.  Bitfields get the offset of their storage but
    // come afterward and remain there after a stable sort.
    Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    for (; Run != Field; ++Run)
      Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                   MemberInfo::Field, nullptr, *Run));
    Run = FieldEnd;
  }
}