MCSection::iterator MCSection::getSubsectionInsertionPoint(unsigned Subsection) { if (Subsection == 0 && SubsectionFragmentMap.empty()) return end(); SmallVectorImpl<std::pair<unsigned, MCFragment *>>::iterator MI = std::lower_bound(SubsectionFragmentMap.begin(), SubsectionFragmentMap.end(), std::make_pair(Subsection, (MCFragment *)nullptr)); bool ExactMatch = false; if (MI != SubsectionFragmentMap.end()) { ExactMatch = MI->first == Subsection; if (ExactMatch) ++MI; } iterator IP; if (MI == SubsectionFragmentMap.end()) IP = end(); else IP = MI->second->getIterator(); if (!ExactMatch && Subsection != 0) { // The GNU as documentation claims that subsections have an alignment of 4, // although this appears not to be the case. MCFragment *F = new MCDataFragment(); SubsectionFragmentMap.insert(MI, std::make_pair(Subsection, F)); getFragmentList().insert(IP, F); F->setParent(this); } return IP; }
void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize, MCObjectWriter *OW) const { // Should NOP padding be written out before this fragment? unsigned BundlePadding = F.getBundlePadding(); if (BundlePadding > 0) { assert(isBundlingEnabled() && "Writing bundle padding with disabled bundling"); assert(F.hasInstructions() && "Writing bundle padding for a fragment without instructions"); unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); if (F.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { // If the padding itself crosses a bundle boundary, it must be emitted // in 2 pieces, since even nop instructions must not cross boundaries. // v--------------v <- BundleAlignSize // v---------v <- BundlePadding // ---------------------------- // | Prev |####|####| F | // ---------------------------- // ^-------------------^ <- TotalLength unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); if (!getBackend().writeNopData(DistanceToBoundary, OW)) report_fatal_error("unable to write NOP sequence of " + Twine(DistanceToBoundary) + " bytes"); BundlePadding -= DistanceToBoundary; } if (!getBackend().writeNopData(BundlePadding, OW)) report_fatal_error("unable to write NOP sequence of " + Twine(BundlePadding) + " bytes"); } }
bool MachObjectWriter:: IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm, const MCSymbolData &DataA, const MCFragment &FB, bool InSet, bool IsPCRel) const { if (InSet) return true; // The effective address is // addr(atom(A)) + offset(A) // - addr(atom(B)) - offset(B) // and the offsets are not relocatable, so the fixup is fully resolved when // addr(atom(A)) - addr(atom(B)) == 0. const MCSymbolData *A_Base = 0, *B_Base = 0; const MCSymbol &SA = DataA.getSymbol().AliasedSymbol(); const MCSection &SecA = SA.getSection(); const MCSection &SecB = FB.getParent()->getSection(); if (IsPCRel) { // The simple (Darwin, except on x86_64) way of dealing with this was to // assume that any reference to a temporary symbol *must* be a temporary // symbol in the same atom, unless the sections differ. Therefore, any PCrel // relocation to a temporary symbol (in the same section) is fully // resolved. This also works in conjunction with absolutized .set, which // requires the compiler to use .set to absolutize the differences between // symbols which the compiler knows to be assembly time constants, so we // don't need to worry about considering symbol differences fully resolved. if (!Asm.getBackend().hasReliableSymbolDifference()) { if (!SA.isTemporary() || !SA.isInSection() || &SecA != &SecB) return false; return true; } } else { if (!TargetObjectWriter->useAggressiveSymbolFolding()) return false; } const MCFragment &FA = *Asm.getSymbolData(SA).getFragment(); A_Base = FA.getAtom(); if (!A_Base) return false; B_Base = FB.getAtom(); if (!B_Base) return false; // If the atoms are the same, they are guaranteed to have the same address. if (A_Base == B_Base) return true; // Otherwise, we can't prove this is fully resolved. return false; }
void MCAsmLayout::EnsureValid(const MCFragment *F) const { MCSectionData &SD = *F->getParent(); MCFragment *Cur = LastValidFragment[&SD]; if (!Cur) Cur = &*SD.begin(); else Cur = Cur->getNextNode(); // Advance the layout position until the fragment is up-to-date. while (!isFragmentUpToDate(F)) { const_cast<MCAsmLayout*>(this)->LayoutFragment(Cur); Cur = Cur->getNextNode(); } }
void MCAsmLayout::ensureValid(const MCFragment *F) const { MCSectionData &SD = *F->getParent(); MCFragment *Cur = LastValidFragment[&SD]; if (!Cur) Cur = &*SD.begin(); else Cur = Cur->getNextNode(); // Advance the layout position until the fragment is valid. while (!isFragmentValid(F)) { assert(Cur && "Layout bookkeeping error"); const_cast<MCAsmLayout*>(this)->layoutFragment(Cur); Cur = Cur->getNextNode(); } }
bool MCObjectWriter::isSymbolRefDifferenceFullyResolvedImpl( const MCAssembler &Asm, const MCSymbol &SymA, const MCFragment &FB, bool InSet, bool IsPCRel) const { const MCSection &SecA = SymA.getSection(); const MCSection &SecB = *FB.getParent(); // On ELF and COFF A - B is absolute if A and B are in the same section. return &SecA == &SecB; }
void MCAsmLayout::EnsureValid(const MCFragment *F) const { // Advance the layout position until the fragment is up-to-date. while (!isFragmentUpToDate(F)) { // Advance to the next fragment. MCFragment *Cur = LastValidFragment; if (Cur) Cur = Cur->getNextNode(); if (!Cur) { unsigned NextIndex = 0; if (LastValidFragment) NextIndex = LastValidFragment->getParent()->getLayoutOrder() + 1; Cur = SectionOrder[NextIndex]->begin(); } const_cast<MCAsmLayout*>(this)->LayoutFragment(Cur); } }
void MCCodePadder::handleInstructionBegin(const MCInst &Inst) { if (!OS) return; // instruction was emitted outside a function assert(CurrHandledInstFragment == nullptr && "Can't start handling an " "instruction while still " "handling another instruction"); bool InsertionPoint = instructionRequiresInsertionPoint(Inst); assert((!InsertionPoint || OS->getCurrentFragment()->getKind() != MCFragment::FT_Align) && "Cannot insert padding nops right after an alignment fragment as it " "will ruin the alignment"); uint64_t PoliciesMask = MCPaddingFragment::PFK_None; if (ArePoliciesActive) { PoliciesMask = std::accumulate( CodePaddingPolicies.begin(), CodePaddingPolicies.end(), MCPaddingFragment::PFK_None, [&Inst](uint64_t Mask, const MCCodePaddingPolicy *Policy) -> uint64_t { return Policy->instructionRequiresPaddingFragment(Inst) ? (Mask | Policy->getKindMask()) : Mask; }); } MCFragment *CurrFragment = OS->getCurrentFragment(); // CurrFragment can be a previously created MCPaddingFragment. If so, let's // update it with the information we have, such as the instruction that it // should point to. bool needToUpdateCurrFragment = CurrFragment != nullptr && CurrFragment->getKind() == MCFragment::FT_Padding; if (InsertionPoint || PoliciesMask != MCPaddingFragment::PFK_None || needToUpdateCurrFragment) { // temporarily holding the fragment as CurrHandledInstFragment, to be // updated after the instruction will be written CurrHandledInstFragment = OS->getOrCreatePaddingFragment(); if (InsertionPoint) CurrHandledInstFragment->setAsInsertionPoint(); CurrHandledInstFragment->setPaddingPoliciesMask( CurrHandledInstFragment->getPaddingPoliciesMask() | PoliciesMask); } }
uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const { switch (F.getKind()) { case MCFragment::FT_Data: case MCFragment::FT_Relaxable: case MCFragment::FT_CompactEncodedInst: return cast<MCEncodedFragment>(F).getContents().size(); case MCFragment::FT_Fill: return cast<MCFillFragment>(F).getSize(); case MCFragment::FT_LEB: return cast<MCLEBFragment>(F).getContents().size(); case MCFragment::FT_Align: { const MCAlignFragment &AF = cast<MCAlignFragment>(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); // If we are padding with nops, force the padding to be larger than the // minimum nop size. if (Size > 0 && AF.hasEmitNops()) { while (Size % getBackend().getMinimumNopSize()) Size += AF.getAlignment(); } if (Size > AF.getMaxBytesToEmit()) return 0; return Size; } case MCFragment::FT_Org: { const MCOrgFragment &OF = cast<MCOrgFragment>(F); int64_t TargetLocation; if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, Layout)) report_fatal_error("expected assembly-time absolute expression"); // FIXME: We need a way to communicate this error. uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); int64_t Size = TargetLocation - FragmentOffset; if (Size < 0 || Size >= 0x40000000) report_fatal_error("invalid .org offset '" + Twine(TargetLocation) + "' (at offset '" + Twine(FragmentOffset) + "')"); return Size; } case MCFragment::FT_Dwarf: return cast<MCDwarfLineAddrFragment>(F).getContents().size(); case MCFragment::FT_DwarfFrame: return cast<MCDwarfCallFrameFragment>(F).getContents().size(); } llvm_unreachable("invalid fragment kind"); }
uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout, const MCFragment &F, uint64_t SectionAddress, uint64_t FragmentOffset) const { switch (F.getKind()) { case MCFragment::FT_Data: return cast<MCDataFragment>(F).getContents().size(); case MCFragment::FT_Fill: return cast<MCFillFragment>(F).getSize(); case MCFragment::FT_Inst: return cast<MCInstFragment>(F).getInstSize(); case MCFragment::FT_Align: { const MCAlignFragment &AF = cast<MCAlignFragment>(F); assert((!AF.hasOnlyAlignAddress() || !AF.getNextNode()) && "Invalid OnlyAlignAddress bit, not the last fragment!"); uint64_t Size = OffsetToAlignment(SectionAddress + FragmentOffset, AF.getAlignment()); // Honor MaxBytesToEmit. if (Size > AF.getMaxBytesToEmit()) return 0; return Size; } case MCFragment::FT_Org: { const MCOrgFragment &OF = cast<MCOrgFragment>(F); // FIXME: We should compute this sooner, we don't want to recurse here, and // we would like to be more functional. int64_t TargetLocation; if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout)) report_fatal_error("expected assembly-time absolute expression"); // FIXME: We need a way to communicate this error. int64_t Offset = TargetLocation - FragmentOffset; if (Offset < 0) report_fatal_error("invalid .org offset '" + Twine(TargetLocation) + "' (at offset '" + Twine(FragmentOffset) + "'"); return Offset; } } assert(0 && "invalid fragment kind"); return 0; }
MCPFRange &MCCodePadder::getJurisdiction(MCPaddingFragment *Fragment, MCAsmLayout &Layout) { auto JurisdictionLocation = FragmentToJurisdiction.find(Fragment); if (JurisdictionLocation != FragmentToJurisdiction.end()) return JurisdictionLocation->second; MCPFRange Jurisdiction; // Forward scanning the fragments in this section, starting from the given // fragments, and adding relevant MCPaddingFragments to the Jurisdiction for (MCFragment *CurrFragment = Fragment; CurrFragment != nullptr; CurrFragment = CurrFragment->getNextNode()) { MCPaddingFragment *CurrPaddingFragment = dyn_cast<MCPaddingFragment>(CurrFragment); if (CurrPaddingFragment == nullptr) continue; if (CurrPaddingFragment != Fragment && CurrPaddingFragment->isInsertionPoint()) // Found next insertion point Fragment. From now on it's its jurisdiction. break; for (const auto *Policy : CodePaddingPolicies) { if (CurrPaddingFragment->hasPaddingPolicy(Policy->getKindMask())) { Jurisdiction.push_back(CurrPaddingFragment); break; } } } auto InsertionResult = FragmentToJurisdiction.insert(std::make_pair(Fragment, Jurisdiction)); assert(InsertionResult.second && "Insertion to FragmentToJurisdiction failed"); return InsertionResult.first->second; }
bool MachObjectWriter::isSymbolRefDifferenceFullyResolvedImpl( const MCAssembler &Asm, const MCSymbol &SymA, const MCFragment &FB, bool InSet, bool IsPCRel) const { if (InSet) return true; // The effective address is // addr(atom(A)) + offset(A) // - addr(atom(B)) - offset(B) // and the offsets are not relocatable, so the fixup is fully resolved when // addr(atom(A)) - addr(atom(B)) == 0. const MCSymbol &SA = findAliasedSymbol(SymA); const MCSection &SecA = SA.getSection(); const MCSection &SecB = *FB.getParent(); if (IsPCRel) { // The simple (Darwin, except on x86_64) way of dealing with this was to // assume that any reference to a temporary symbol *must* be a temporary // symbol in the same atom, unless the sections differ. Therefore, any PCrel // relocation to a temporary symbol (in the same section) is fully // resolved. This also works in conjunction with absolutized .set, which // requires the compiler to use .set to absolutize the differences between // symbols which the compiler knows to be assembly time constants, so we // don't need to worry about considering symbol differences fully resolved. // // If the file isn't using sub-sections-via-symbols, we can make the // same assumptions about any symbol that we normally make about // assembler locals. bool hasReliableSymbolDifference = isX86_64(); if (!hasReliableSymbolDifference) { if (!SA.isInSection() || &SecA != &SecB || (!SA.isTemporary() && FB.getAtom() != SA.getFragment()->getAtom() && Asm.getSubsectionsViaSymbols())) return false; return true; } // For Darwin x86_64, there is one special case when the reference IsPCRel. // If the fragment with the reference does not have a base symbol but meets // the simple way of dealing with this, in that it is a temporary symbol in // the same atom then it is assumed to be fully resolved. This is needed so // a relocation entry is not created and so the static linker does not // mess up the reference later. else if(!FB.getAtom() && SA.isTemporary() && SA.isInSection() && &SecA == &SecB){ return true; } } // If they are not in the same section, we can't compute the diff. if (&SecA != &SecB) return false; const MCFragment *FA = SA.getFragment(); // Bail if the symbol has no fragment. if (!FA) return false; // If the atoms are the same, they are guaranteed to have the same address. if (FA->getAtom() == FB.getAtom()) return true; // Otherwise, we can't prove this is fully resolved. return false; }
/// \brief Write the fragment \p F to the output file. static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { MCObjectWriter *OW = &Asm.getWriter(); // FIXME: Embed in fragments instead? uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); Asm.writeFragmentPadding(F, FragmentSize, OW); // This variable (and its dummy usage) is to participate in the assert at // the end of the function. uint64_t Start = OW->getStream().tell(); (void) Start; ++stats::EmittedFragments; switch (F.getKind()) { case MCFragment::FT_Align: { ++stats::EmittedAlignFragments; const MCAlignFragment &AF = cast<MCAlignFragment>(F); assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); uint64_t Count = FragmentSize / AF.getValueSize(); // FIXME: This error shouldn't actually occur (the front end should emit // multiple .align directives to enforce the semantics it wants), but is // severe enough that we want to report it. How to handle this? if (Count * AF.getValueSize() != FragmentSize) report_fatal_error("undefined .align directive, value size '" + Twine(AF.getValueSize()) + "' is not a divisor of padding size '" + Twine(FragmentSize) + "'"); // See if we are aligning with nops, and if so do that first to try to fill // the Count bytes. Then if that did not fill any bytes or there are any // bytes left to fill use the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. if (AF.hasEmitNops()) { if (!Asm.getBackend().writeNopData(Count, OW)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); break; } // Otherwise, write out in multiples of the value size. for (uint64_t i = 0; i != Count; ++i) { switch (AF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->write8 (uint8_t (AF.getValue())); break; case 2: OW->write16(uint16_t(AF.getValue())); break; case 4: OW->write32(uint32_t(AF.getValue())); break; case 8: OW->write64(uint64_t(AF.getValue())); break; } } break; } case MCFragment::FT_Data: ++stats::EmittedDataFragments; OW->writeBytes(cast<MCDataFragment>(F).getContents()); break; case MCFragment::FT_Relaxable: ++stats::EmittedRelaxableFragments; OW->writeBytes(cast<MCRelaxableFragment>(F).getContents()); break; case MCFragment::FT_CompactEncodedInst: ++stats::EmittedCompactEncodedInstFragments; OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents()); break; case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; const MCFillFragment &FF = cast<MCFillFragment>(F); assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) { switch (FF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->write8 (uint8_t (FF.getValue())); break; case 2: OW->write16(uint16_t(FF.getValue())); break; case 4: OW->write32(uint32_t(FF.getValue())); break; case 8: OW->write64(uint64_t(FF.getValue())); break; } } break; } case MCFragment::FT_LEB: { const MCLEBFragment &LF = cast<MCLEBFragment>(F); OW->writeBytes(LF.getContents()); break; } case MCFragment::FT_SafeSEH: { const MCSafeSEHFragment &SF = cast<MCSafeSEHFragment>(F); OW->write32(SF.getSymbol()->getIndex()); break; } case MCFragment::FT_Org: { ++stats::EmittedOrgFragments; const MCOrgFragment &OF = cast<MCOrgFragment>(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) OW->write8(uint8_t(OF.getValue())); break; } case MCFragment::FT_Dwarf: { const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); OW->writeBytes(OF.getContents()); break; } case MCFragment::FT_DwarfFrame: { const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); OW->writeBytes(CF.getContents()); break; } case MCFragment::FT_Dummy: llvm_unreachable("Should not have been added"); } assert(OW->getStream().tell() - Start == FragmentSize && "The stream should advance by fragment size"); }
/// \brief Write the fragment \p F to the output file. static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { MCObjectWriter *OW = &Asm.getWriter(); // FIXME: Embed in fragments instead? uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); // Should NOP padding be written out before this fragment? unsigned BundlePadding = F.getBundlePadding(); if (BundlePadding > 0) { assert(Asm.isBundlingEnabled() && "Writing bundle padding with disabled bundling"); assert(F.hasInstructions() && "Writing bundle padding for a fragment without instructions"); unsigned TotalLength = BundlePadding + static_cast<unsigned>(FragmentSize); if (F.alignToBundleEnd() && TotalLength > Asm.getBundleAlignSize()) { // If the padding itself crosses a bundle boundary, it must be emitted // in 2 pieces, since even nop instructions must not cross boundaries. // v--------------v <- BundleAlignSize // v---------v <- BundlePadding // ---------------------------- // | Prev |####|####| F | // ---------------------------- // ^-------------------^ <- TotalLength unsigned DistanceToBoundary = TotalLength - Asm.getBundleAlignSize(); if (!Asm.getBackend().writeNopData(DistanceToBoundary, OW)) report_fatal_error("unable to write NOP sequence of " + Twine(DistanceToBoundary) + " bytes"); BundlePadding -= DistanceToBoundary; } if (!Asm.getBackend().writeNopData(BundlePadding, OW)) report_fatal_error("unable to write NOP sequence of " + Twine(BundlePadding) + " bytes"); } // This variable (and its dummy usage) is to participate in the assert at // the end of the function. uint64_t Start = OW->getStream().tell(); (void) Start; ++stats::EmittedFragments; switch (F.getKind()) { case MCFragment::FT_Align: { ++stats::EmittedAlignFragments; const MCAlignFragment &AF = cast<MCAlignFragment>(F); assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); uint64_t Count = FragmentSize / AF.getValueSize(); // FIXME: This error shouldn't actually occur (the front end should emit // multiple .align directives to enforce the semantics it wants), but is // severe enough that we want to report it. How to handle this? if (Count * AF.getValueSize() != FragmentSize) report_fatal_error("undefined .align directive, value size '" + Twine(AF.getValueSize()) + "' is not a divisor of padding size '" + Twine(FragmentSize) + "'"); // See if we are aligning with nops, and if so do that first to try to fill // the Count bytes. Then if that did not fill any bytes or there are any // bytes left to fill use the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. if (AF.hasEmitNops()) { if (!Asm.getBackend().writeNopData(Count, OW)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); break; } // Otherwise, write out in multiples of the value size. for (uint64_t i = 0; i != Count; ++i) { switch (AF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->Write8 (uint8_t (AF.getValue())); break; case 2: OW->Write16(uint16_t(AF.getValue())); break; case 4: OW->Write32(uint32_t(AF.getValue())); break; case 8: OW->Write64(uint64_t(AF.getValue())); break; } } break; } case MCFragment::FT_Data: ++stats::EmittedDataFragments; writeFragmentContents(F, OW); break; case MCFragment::FT_Relaxable: ++stats::EmittedRelaxableFragments; writeFragmentContents(F, OW); break; case MCFragment::FT_CompactEncodedInst: ++stats::EmittedCompactEncodedInstFragments; writeFragmentContents(F, OW); break; case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; const MCFillFragment &FF = cast<MCFillFragment>(F); assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) { switch (FF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->Write8 (uint8_t (FF.getValue())); break; case 2: OW->Write16(uint16_t(FF.getValue())); break; case 4: OW->Write32(uint32_t(FF.getValue())); break; case 8: OW->Write64(uint64_t(FF.getValue())); break; } } break; } case MCFragment::FT_LEB: { const MCLEBFragment &LF = cast<MCLEBFragment>(F); OW->WriteBytes(LF.getContents().str()); break; } case MCFragment::FT_Org: { ++stats::EmittedOrgFragments; const MCOrgFragment &OF = cast<MCOrgFragment>(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) OW->Write8(uint8_t(OF.getValue())); break; } case MCFragment::FT_Dwarf: { const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); OW->WriteBytes(OF.getContents().str()); break; } case MCFragment::FT_DwarfFrame: { const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); OW->WriteBytes(CF.getContents().str()); break; } } assert(OW->getStream().tell() - Start == FragmentSize && "The stream should advance by fragment size"); }
/// WriteFragmentData - Write the \p F data to the output file. static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { MCObjectWriter *OW = &Asm.getWriter(); uint64_t Start = OW->getStream().tell(); (void) Start; ++stats::EmittedFragments; // FIXME: Embed in fragments instead? uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); switch (F.getKind()) { case MCFragment::FT_Align: { MCAlignFragment &AF = cast<MCAlignFragment>(F); uint64_t Count = FragmentSize / AF.getValueSize(); assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); // FIXME: This error shouldn't actually occur (the front end should emit // multiple .align directives to enforce the semantics it wants), but is // severe enough that we want to report it. How to handle this? if (Count * AF.getValueSize() != FragmentSize) report_fatal_error("undefined .align directive, value size '" + Twine(AF.getValueSize()) + "' is not a divisor of padding size '" + Twine(FragmentSize) + "'"); // See if we are aligning with nops, and if so do that first to try to fill // the Count bytes. Then if that did not fill any bytes or there are any // bytes left to fill use the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. if (AF.hasEmitNops()) { if (!Asm.getBackend().writeNopData(Count, OW)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); break; } // Otherwise, write out in multiples of the value size. for (uint64_t i = 0; i != Count; ++i) { switch (AF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->Write8 (uint8_t (AF.getValue())); break; case 2: OW->Write16(uint16_t(AF.getValue())); break; case 4: OW->Write32(uint32_t(AF.getValue())); break; case 8: OW->Write64(uint64_t(AF.getValue())); break; } } break; } case MCFragment::FT_Data: { MCDataFragment &DF = cast<MCDataFragment>(F); assert(FragmentSize == DF.getContents().size() && "Invalid size!"); OW->WriteBytes(DF.getContents().str()); break; } case MCFragment::FT_Fill: { MCFillFragment &FF = cast<MCFillFragment>(F); assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) { switch (FF.getValueSize()) { default: llvm_unreachable("Invalid size!"); case 1: OW->Write8 (uint8_t (FF.getValue())); break; case 2: OW->Write16(uint16_t(FF.getValue())); break; case 4: OW->Write32(uint32_t(FF.getValue())); break; case 8: OW->Write64(uint64_t(FF.getValue())); break; } } break; } case MCFragment::FT_Inst: { MCInstFragment &IF = cast<MCInstFragment>(F); OW->WriteBytes(StringRef(IF.getCode().begin(), IF.getCode().size())); break; } case MCFragment::FT_LEB: { MCLEBFragment &LF = cast<MCLEBFragment>(F); OW->WriteBytes(LF.getContents().str()); break; } case MCFragment::FT_Org: { MCOrgFragment &OF = cast<MCOrgFragment>(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) OW->Write8(uint8_t(OF.getValue())); break; } case MCFragment::FT_Dwarf: { const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); OW->WriteBytes(OF.getContents().str()); break; } case MCFragment::FT_DwarfFrame: { const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); OW->WriteBytes(CF.getContents().str()); break; } } assert(OW->getStream().tell() - Start == FragmentSize); }
uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const { switch (F.getKind()) { case MCFragment::FT_Data: return cast<MCDataFragment>(F).getContents().size(); case MCFragment::FT_Relaxable: return cast<MCRelaxableFragment>(F).getContents().size(); case MCFragment::FT_CompactEncodedInst: return cast<MCCompactEncodedInstFragment>(F).getContents().size(); case MCFragment::FT_Fill: return cast<MCFillFragment>(F).getSize(); case MCFragment::FT_LEB: return cast<MCLEBFragment>(F).getContents().size(); case MCFragment::FT_SafeSEH: return 4; case MCFragment::FT_Align: { const MCAlignFragment &AF = cast<MCAlignFragment>(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); // If we are padding with nops, force the padding to be larger than the // minimum nop size. if (Size > 0 && AF.hasEmitNops()) { while (Size % getBackend().getMinimumNopSize()) Size += AF.getAlignment(); } if (Size > AF.getMaxBytesToEmit()) return 0; return Size; } case MCFragment::FT_Org: { const MCOrgFragment &OF = cast<MCOrgFragment>(F); MCValue Value; if (!OF.getOffset().evaluateAsValue(Value, Layout)) { getContext().reportError(OF.getLoc(), "expected assembly-time absolute expression"); return 0; } uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); int64_t TargetLocation = Value.getConstant(); if (const MCSymbolRefExpr *A = Value.getSymA()) { uint64_t Val; if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { getContext().reportError(OF.getLoc(), "expected absolute expression"); return 0; } TargetLocation += Val; } int64_t Size = TargetLocation - FragmentOffset; if (Size < 0 || Size >= 0x40000000) { getContext().reportError( OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + "' (at offset '" + Twine(FragmentOffset) + "')"); return 0; } return Size; } case MCFragment::FT_Dwarf: return cast<MCDwarfLineAddrFragment>(F).getContents().size(); case MCFragment::FT_DwarfFrame: return cast<MCDwarfCallFrameFragment>(F).getContents().size(); case MCFragment::FT_CVInlineLines: return cast<MCCVInlineLineTableFragment>(F).getContents().size(); case MCFragment::FT_CVDefRange: return cast<MCCVDefRangeFragment>(F).getContents().size(); case MCFragment::FT_Dummy: llvm_unreachable("Should not have been added"); } llvm_unreachable("invalid fragment kind"); }