void MCAssembler::WriteSectionData(const MCSectionData *SD, const MCAsmLayout &Layout, MCObjectWriter *OW) const { // Ignore virtual sections. if (getBackend().isVirtualSection(SD->getSection())) { assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!"); // Check that contents are only things legal inside a virtual section. for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) { switch (it->getKind()) { default: assert(0 && "Invalid fragment in virtual section!"); case MCFragment::FT_Data: { // Check that we aren't trying to write a non-zero contents (or fixups) // into a virtual section. This is to support clients which use standard // directives to fill the contents of virtual sections. MCDataFragment &DF = cast<MCDataFragment>(*it); assert(DF.fixup_begin() == DF.fixup_end() && "Cannot have fixups in virtual section!"); for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) assert(DF.getContents()[i] == 0 && "Invalid data value for virtual section!"); break; } case MCFragment::FT_Align: // Check that we aren't trying to write a non-zero value into a virtual // section. assert((!cast<MCAlignFragment>(it)->getValueSize() || !cast<MCAlignFragment>(it)->getValue()) && "Invalid align in virtual section!"); break; case MCFragment::FT_Fill: assert(!cast<MCFillFragment>(it)->getValueSize() && "Invalid fill in virtual section!"); break; } } return; } uint64_t Start = OW->getStream().tell(); (void) Start; for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) WriteFragmentData(*this, Layout, *it, OW); assert(OW->getStream().tell() - Start == Layout.getSectionFileSize(SD)); }
void SVMELFProgramWriter::writeSectionHeader(const MCAsmLayout &Layout, const MCSectionData *SD) { const MCSectionELF *SE = dyn_cast<MCSectionELF>(&SD->getSection()); assert(SE); uint32_t sh_type = SE->getType(); uint32_t sh_link = 0; uint32_t sh_info = 0; // Type-specific data finds itself in sh_link and sh_info switch (sh_type) { case ELF::SHT_SYMTAB: case ELF::SHT_DYNSYM: sh_link = EMB.getStringTableIndex(); sh_info = EMB.getLastLocalSymbolIndex(); break; default: break; } Write32(EMB.getSectionStringTableIndex(SE)); // sh_name Write32(sh_type); // sh_type Write32(SE->getFlags()); // sh_flags Write32(ML.getSectionMemAddress(SD)); // sh_addr Write32(ML.getSectionDiskOffset(SD)); // sh_offset Write32(Layout.getSectionFileSize(SD)); // sh_size Write32(sh_link); // sh_link Write32(sh_info); // sh_info Write32(SD->getAlignment()); // sh_addralign Write32(SE->getEntrySize()); // sh_entsize }
void MCAssembler::writeSectionData(const MCSectionData *SD, const MCAsmLayout &Layout) const { // Ignore virtual sections. if (SD->getSection().isVirtualSection()) { assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!"); // Check that contents are only things legal inside a virtual section. for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) { switch (it->getKind()) { default: llvm_unreachable("Invalid fragment in virtual section!"); case MCFragment::FT_Data: { // Check that we aren't trying to write a non-zero contents (or fixups) // into a virtual section. This is to support clients which use standard // directives to fill the contents of virtual sections. const MCDataFragment &DF = cast<MCDataFragment>(*it); assert(DF.fixup_begin() == DF.fixup_end() && "Cannot have fixups in virtual section!"); for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) if (DF.getContents()[i]) { if (auto *ELFSec = dyn_cast<const MCSectionELF>(&SD->getSection())) report_fatal_error("non-zero initializer found in section '" + ELFSec->getSectionName() + "'"); else report_fatal_error("non-zero initializer found in virtual section"); } break; } case MCFragment::FT_Align: // Check that we aren't trying to write a non-zero value into a virtual // section. assert((cast<MCAlignFragment>(it)->getValueSize() == 0 || cast<MCAlignFragment>(it)->getValue() == 0) && "Invalid align in virtual section!"); break; case MCFragment::FT_Fill: assert((cast<MCFillFragment>(it)->getValueSize() == 0 || cast<MCFillFragment>(it)->getValue() == 0) && "Invalid fill in virtual section!"); break; } } return; } uint64_t Start = getWriter().getStream().tell(); (void)Start; for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) writeFragment(*this, Layout, *it); assert(getWriter().getStream().tell() - Start == Layout.getSectionAddressSize(SD)); }
void MachObjectWriter::WriteSection(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCSectionData &SD, uint64_t FileOffset, uint64_t RelocationsStart, unsigned NumRelocations) { uint64_t SectionSize = Layout.getSectionAddressSize(&SD); // The offset is unused for virtual sections. if (SD.getSection().isVirtualSection()) { assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!"); FileOffset = 0; } // struct section (68 bytes) or // struct section_64 (80 bytes) uint64_t Start = OS.tell(); (void) Start; const MCSectionMachO &Section = cast<MCSectionMachO>(SD.getSection()); WriteBytes(Section.getSectionName(), 16); WriteBytes(Section.getSegmentName(), 16); if (is64Bit()) { Write64(getSectionAddress(&SD)); // address Write64(SectionSize); // size } else { Write32(getSectionAddress(&SD)); // address Write32(SectionSize); // size } Write32(FileOffset); unsigned Flags = Section.getTypeAndAttributes(); if (SD.hasInstructions()) Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS; assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!"); Write32(Log2_32(SD.getAlignment())); Write32(NumRelocations ? RelocationsStart : 0); Write32(NumRelocations); Write32(Flags); Write32(IndirectSymBase.lookup(&SD)); // reserved1 Write32(Section.getStubSize()); // reserved2 if (is64Bit()) Write32(0); // reserved3 assert(OS.tell() - Start == (is64Bit() ? macho::Section64Size : macho::Section32Size)); }
void MachObjectWriter::writeSection(const MCAsmLayout &Layout, const MCSection &Sec, uint64_t VMAddr, uint64_t FileOffset, unsigned Flags, uint64_t RelocationsStart, unsigned NumRelocations) { uint64_t SectionSize = Layout.getSectionAddressSize(&Sec); const MCSectionMachO &Section = cast<MCSectionMachO>(Sec); // The offset is unused for virtual sections. if (Section.isVirtualSection()) { assert(Layout.getSectionFileSize(&Sec) == 0 && "Invalid file size!"); FileOffset = 0; } // struct section (68 bytes) or // struct section_64 (80 bytes) uint64_t Start = getStream().tell(); (void) Start; writeBytes(Section.getSectionName(), 16); writeBytes(Section.getSegmentName(), 16); if (is64Bit()) { write64(VMAddr); // address write64(SectionSize); // size } else { write32(VMAddr); // address write32(SectionSize); // size } write32(FileOffset); assert(isPowerOf2_32(Section.getAlignment()) && "Invalid alignment!"); write32(Log2_32(Section.getAlignment())); write32(NumRelocations ? RelocationsStart : 0); write32(NumRelocations); write32(Flags); write32(IndirectSymBase.lookup(&Sec)); // reserved1 write32(Section.getStubSize()); // reserved2 if (is64Bit()) write32(0); // reserved3 assert(getStream().tell() - Start == (is64Bit() ? sizeof(MachO::section_64) : sizeof(MachO::section))); }
void MachObjectWriter::writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) { // Compute symbol table information and bind symbol indices. computeSymbolTable(Asm, LocalSymbolData, ExternalSymbolData, UndefinedSymbolData); unsigned NumSections = Asm.size(); const MCAssembler::VersionMinInfoType &VersionInfo = Layout.getAssembler().getVersionMinInfo(); // The section data starts after the header, the segment load command (and // section headers) and the symbol table. unsigned NumLoadCommands = 1; uint64_t LoadCommandsSize = is64Bit() ? sizeof(MachO::segment_command_64) + NumSections * sizeof(MachO::section_64): sizeof(MachO::segment_command) + NumSections * sizeof(MachO::section); // Add the deployment target version info load command size, if used. if (VersionInfo.Major != 0) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::version_min_command); } // Add the data-in-code load command size, if used. unsigned NumDataRegions = Asm.getDataRegions().size(); if (NumDataRegions) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::linkedit_data_command); } // Add the loh load command size, if used. uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout); uint64_t LOHSize = alignTo(LOHRawSize, is64Bit() ? 8 : 4); if (LOHSize) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::linkedit_data_command); } // Add the symbol table load command sizes, if used. unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() + UndefinedSymbolData.size(); if (NumSymbols) { NumLoadCommands += 2; LoadCommandsSize += (sizeof(MachO::symtab_command) + sizeof(MachO::dysymtab_command)); } // Add the linker option load commands sizes. for (const auto &Option : Asm.getLinkerOptions()) { ++NumLoadCommands; LoadCommandsSize += ComputeLinkerOptionsLoadCommandSize(Option, is64Bit()); } // Compute the total size of the section data, as well as its file size and vm // size. uint64_t SectionDataStart = (is64Bit() ? sizeof(MachO::mach_header_64) : sizeof(MachO::mach_header)) + LoadCommandsSize; uint64_t SectionDataSize = 0; uint64_t SectionDataFileSize = 0; uint64_t VMSize = 0; for (const MCSection &Sec : Asm) { uint64_t Address = getSectionAddress(&Sec); uint64_t Size = Layout.getSectionAddressSize(&Sec); uint64_t FileSize = Layout.getSectionFileSize(&Sec); FileSize += getPaddingSize(&Sec, Layout); VMSize = std::max(VMSize, Address + Size); if (Sec.isVirtualSection()) continue; SectionDataSize = std::max(SectionDataSize, Address + Size); SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize); } // The section data is padded to 4 bytes. // // FIXME: Is this machine dependent? unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4); SectionDataFileSize += SectionDataPadding; // Write the prolog, starting with the header and load command... writeHeader(MachO::MH_OBJECT, NumLoadCommands, LoadCommandsSize, Asm.getSubsectionsViaSymbols()); uint32_t Prot = MachO::VM_PROT_READ | MachO::VM_PROT_WRITE | MachO::VM_PROT_EXECUTE; writeSegmentLoadCommand("", NumSections, 0, VMSize, SectionDataStart, SectionDataSize, Prot, Prot); // ... and then the section headers. uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize; for (const MCSection &Section : Asm) { const auto &Sec = cast<MCSectionMachO>(Section); std::vector<RelAndSymbol> &Relocs = Relocations[&Sec]; unsigned NumRelocs = Relocs.size(); uint64_t SectionStart = SectionDataStart + getSectionAddress(&Sec); unsigned Flags = Sec.getTypeAndAttributes(); if (Sec.hasInstructions()) Flags |= MachO::S_ATTR_SOME_INSTRUCTIONS; writeSection(Layout, Sec, getSectionAddress(&Sec), SectionStart, Flags, RelocTableEnd, NumRelocs); RelocTableEnd += NumRelocs * sizeof(MachO::any_relocation_info); } // Write out the deployment target information, if it's available. if (VersionInfo.Major != 0) { assert(VersionInfo.Update < 256 && "unencodable update target version"); assert(VersionInfo.Minor < 256 && "unencodable minor target version"); assert(VersionInfo.Major < 65536 && "unencodable major target version"); uint32_t EncodedVersion = VersionInfo.Update | (VersionInfo.Minor << 8) | (VersionInfo.Major << 16); MachO::LoadCommandType LCType; switch (VersionInfo.Kind) { case MCVM_OSXVersionMin: LCType = MachO::LC_VERSION_MIN_MACOSX; break; case MCVM_IOSVersionMin: LCType = MachO::LC_VERSION_MIN_IPHONEOS; break; case MCVM_TvOSVersionMin: LCType = MachO::LC_VERSION_MIN_TVOS; break; case MCVM_WatchOSVersionMin: LCType = MachO::LC_VERSION_MIN_WATCHOS; break; } write32(LCType); write32(sizeof(MachO::version_min_command)); write32(EncodedVersion); write32(0); // reserved. } // Write the data-in-code load command, if used. uint64_t DataInCodeTableEnd = RelocTableEnd + NumDataRegions * 8; if (NumDataRegions) { uint64_t DataRegionsOffset = RelocTableEnd; uint64_t DataRegionsSize = NumDataRegions * 8; writeLinkeditLoadCommand(MachO::LC_DATA_IN_CODE, DataRegionsOffset, DataRegionsSize); } // Write the loh load command, if used. uint64_t LOHTableEnd = DataInCodeTableEnd + LOHSize; if (LOHSize) writeLinkeditLoadCommand(MachO::LC_LINKER_OPTIMIZATION_HINT, DataInCodeTableEnd, LOHSize); // Write the symbol table load command, if used. if (NumSymbols) { unsigned FirstLocalSymbol = 0; unsigned NumLocalSymbols = LocalSymbolData.size(); unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols; unsigned NumExternalSymbols = ExternalSymbolData.size(); unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols; unsigned NumUndefinedSymbols = UndefinedSymbolData.size(); unsigned NumIndirectSymbols = Asm.indirect_symbol_size(); unsigned NumSymTabSymbols = NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols; uint64_t IndirectSymbolSize = NumIndirectSymbols * 4; uint64_t IndirectSymbolOffset = 0; // If used, the indirect symbols are written after the section data. if (NumIndirectSymbols) IndirectSymbolOffset = LOHTableEnd; // The symbol table is written after the indirect symbol data. uint64_t SymbolTableOffset = LOHTableEnd + IndirectSymbolSize; // The string table is written after symbol table. uint64_t StringTableOffset = SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist)); writeSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols, StringTableOffset, StringTable.data().size()); writeDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols, FirstExternalSymbol, NumExternalSymbols, FirstUndefinedSymbol, NumUndefinedSymbols, IndirectSymbolOffset, NumIndirectSymbols); } // Write the linker options load commands. for (const auto &Option : Asm.getLinkerOptions()) writeLinkerOptionsLoadCommand(Option); // Write the actual section data. for (const MCSection &Sec : Asm) { Asm.writeSectionData(&Sec, Layout); uint64_t Pad = getPaddingSize(&Sec, Layout); WriteZeros(Pad); } // Write the extra padding. WriteZeros(SectionDataPadding); // Write the relocation entries. for (const MCSection &Sec : Asm) { // Write the section relocation entries, in reverse order to match 'as' // (approximately, the exact algorithm is more complicated than this). std::vector<RelAndSymbol> &Relocs = Relocations[&Sec]; for (const RelAndSymbol &Rel : make_range(Relocs.rbegin(), Relocs.rend())) { write32(Rel.MRE.r_word0); write32(Rel.MRE.r_word1); } } // Write out the data-in-code region payload, if there is one. for (MCAssembler::const_data_region_iterator it = Asm.data_region_begin(), ie = Asm.data_region_end(); it != ie; ++it) { const DataRegionData *Data = &(*it); uint64_t Start = getSymbolAddress(*Data->Start, Layout); uint64_t End = getSymbolAddress(*Data->End, Layout); DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind << " start: " << Start << "(" << Data->Start->getName() << ")" << " end: " << End << "(" << Data->End->getName() << ")" << " size: " << End - Start << "\n"); write32(Start); write16(End - Start); write16(Data->Kind); } // Write out the loh commands, if there is one. if (LOHSize) { #ifndef NDEBUG unsigned Start = getStream().tell(); #endif Asm.getLOHContainer().emit(*this, Layout); // Pad to a multiple of the pointer size. writeBytes("", OffsetToAlignment(LOHRawSize, is64Bit() ? 8 : 4)); assert(getStream().tell() - Start == LOHSize); } // Write the symbol table data, if used. if (NumSymbols) { // Write the indirect symbol entries. for (MCAssembler::const_indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it) { // Indirect symbols in the non-lazy symbol pointer section have some // special handling. const MCSectionMachO &Section = static_cast<const MCSectionMachO &>(*it->Section); if (Section.getType() == MachO::S_NON_LAZY_SYMBOL_POINTERS) { // If this symbol is defined and internal, mark it as such. if (it->Symbol->isDefined() && !it->Symbol->isExternal()) { uint32_t Flags = MachO::INDIRECT_SYMBOL_LOCAL; if (it->Symbol->isAbsolute()) Flags |= MachO::INDIRECT_SYMBOL_ABS; write32(Flags); continue; } } write32(it->Symbol->getIndex()); } // FIXME: Check that offsets match computed ones. // Write the symbol table entries. for (auto *SymbolData : {&LocalSymbolData, &ExternalSymbolData, &UndefinedSymbolData}) for (MachSymbolData &Entry : *SymbolData) writeNlist(Entry, Layout); // Write the string table. getStream() << StringTable.data(); } }
void MachObjectWriter::WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) { unsigned NumSections = Asm.size(); // The section data starts after the header, the segment load command (and // section headers) and the symbol table. unsigned NumLoadCommands = 1; uint64_t LoadCommandsSize = is64Bit() ? macho::SegmentLoadCommand64Size + NumSections * macho::Section64Size : macho::SegmentLoadCommand32Size + NumSections * macho::Section32Size; // Add the data-in-code load command size, if used. unsigned NumDataRegions = Asm.getDataRegions().size(); if (NumDataRegions) { ++NumLoadCommands; LoadCommandsSize += macho::LinkeditLoadCommandSize; } // Add the symbol table load command sizes, if used. unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() + UndefinedSymbolData.size(); if (NumSymbols) { NumLoadCommands += 2; LoadCommandsSize += (macho::SymtabLoadCommandSize + macho::DysymtabLoadCommandSize); } // Add the linker option load commands sizes. const std::vector<std::vector<std::string> > &LinkerOptions = Asm.getLinkerOptions(); for (unsigned i = 0, e = LinkerOptions.size(); i != e; ++i) { ++NumLoadCommands; LoadCommandsSize += ComputeLinkerOptionsLoadCommandSize(LinkerOptions[i], is64Bit()); } // Compute the total size of the section data, as well as its file size and vm // size. uint64_t SectionDataStart = (is64Bit() ? macho::Header64Size : macho::Header32Size) + LoadCommandsSize; uint64_t SectionDataSize = 0; uint64_t SectionDataFileSize = 0; uint64_t VMSize = 0; for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { const MCSectionData &SD = *it; uint64_t Address = getSectionAddress(&SD); uint64_t Size = Layout.getSectionAddressSize(&SD); uint64_t FileSize = Layout.getSectionFileSize(&SD); FileSize += getPaddingSize(&SD, Layout); VMSize = std::max(VMSize, Address + Size); if (SD.getSection().isVirtualSection()) continue; SectionDataSize = std::max(SectionDataSize, Address + Size); SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize); } // The section data is padded to 4 bytes. // // FIXME: Is this machine dependent? unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4); SectionDataFileSize += SectionDataPadding; // Write the prolog, starting with the header and load command... WriteHeader(NumLoadCommands, LoadCommandsSize, Asm.getSubsectionsViaSymbols()); WriteSegmentLoadCommand(NumSections, VMSize, SectionDataStart, SectionDataSize); // ... and then the section headers. uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize; for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { std::vector<macho::RelocationEntry> &Relocs = Relocations[it]; unsigned NumRelocs = Relocs.size(); uint64_t SectionStart = SectionDataStart + getSectionAddress(it); WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs); RelocTableEnd += NumRelocs * macho::RelocationInfoSize; } // Write the data-in-code load command, if used. uint64_t DataInCodeTableEnd = RelocTableEnd + NumDataRegions * 8; if (NumDataRegions) { uint64_t DataRegionsOffset = RelocTableEnd; uint64_t DataRegionsSize = NumDataRegions * 8; WriteLinkeditLoadCommand(macho::LCT_DataInCode, DataRegionsOffset, DataRegionsSize); } // Write the symbol table load command, if used. if (NumSymbols) { unsigned FirstLocalSymbol = 0; unsigned NumLocalSymbols = LocalSymbolData.size(); unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols; unsigned NumExternalSymbols = ExternalSymbolData.size(); unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols; unsigned NumUndefinedSymbols = UndefinedSymbolData.size(); unsigned NumIndirectSymbols = Asm.indirect_symbol_size(); unsigned NumSymTabSymbols = NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols; uint64_t IndirectSymbolSize = NumIndirectSymbols * 4; uint64_t IndirectSymbolOffset = 0; // If used, the indirect symbols are written after the section data. if (NumIndirectSymbols) IndirectSymbolOffset = DataInCodeTableEnd; // The symbol table is written after the indirect symbol data. uint64_t SymbolTableOffset = DataInCodeTableEnd + IndirectSymbolSize; // The string table is written after symbol table. uint64_t StringTableOffset = SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? macho::Nlist64Size : macho::Nlist32Size); WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols, StringTableOffset, StringTable.size()); WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols, FirstExternalSymbol, NumExternalSymbols, FirstUndefinedSymbol, NumUndefinedSymbols, IndirectSymbolOffset, NumIndirectSymbols); } // Write the linker options load commands. for (unsigned i = 0, e = LinkerOptions.size(); i != e; ++i) { WriteLinkerOptionsLoadCommand(LinkerOptions[i]); } // Write the actual section data. for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { Asm.writeSectionData(it, Layout); uint64_t Pad = getPaddingSize(it, Layout); for (unsigned int i = 0; i < Pad; ++i) Write8(0); } // Write the extra padding. WriteZeros(SectionDataPadding); // Write the relocation entries. for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { // Write the section relocation entries, in reverse order to match 'as' // (approximately, the exact algorithm is more complicated than this). std::vector<macho::RelocationEntry> &Relocs = Relocations[it]; for (unsigned i = 0, e = Relocs.size(); i != e; ++i) { Write32(Relocs[e - i - 1].Word0); Write32(Relocs[e - i - 1].Word1); } } // Write out the data-in-code region payload, if there is one. for (MCAssembler::const_data_region_iterator it = Asm.data_region_begin(), ie = Asm.data_region_end(); it != ie; ++it) { const DataRegionData *Data = &(*it); uint64_t Start = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), Layout); uint64_t End = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), Layout); DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind << " start: " << Start << "(" << Data->Start->getName() << ")" << " end: " << End << "(" << Data->End->getName() << ")" << " size: " << End - Start << "\n"); Write32(Start); Write16(End - Start); Write16(Data->Kind); } // Write the symbol table data, if used. if (NumSymbols) { // Write the indirect symbol entries. for (MCAssembler::const_indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it) { // Indirect symbols in the non lazy symbol pointer section have some // special handling. const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(it->SectionData->getSection()); if (Section.getType() == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) { // If this symbol is defined and internal, mark it as such. if (it->Symbol->isDefined() && !Asm.getSymbolData(*it->Symbol).isExternal()) { uint32_t Flags = macho::ISF_Local; if (it->Symbol->isAbsolute()) Flags |= macho::ISF_Absolute; Write32(Flags); continue; } } Write32(Asm.getSymbolData(*it->Symbol).getIndex()); } // FIXME: Check that offsets match computed ones. // Write the symbol table entries. for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i) WriteNlist(LocalSymbolData[i], Layout); for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i) WriteNlist(ExternalSymbolData[i], Layout); for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i) WriteNlist(UndefinedSymbolData[i], Layout); // Write the string table. OS << StringTable.str(); } }
void MachObjectWriter::WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) { unsigned NumSections = Asm.size(); const MCAssembler::VersionMinInfoType &VersionInfo = Layout.getAssembler().getVersionMinInfo(); // The section data starts after the header, the segment load command (and // section headers) and the symbol table. unsigned NumLoadCommands = 1; uint64_t LoadCommandsSize = is64Bit() ? sizeof(MachO::segment_command_64) + NumSections * sizeof(MachO::section_64): sizeof(MachO::segment_command) + NumSections * sizeof(MachO::section); // Add the deployment target version info load command size, if used. if (VersionInfo.Major != 0) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::version_min_command); } // Add the data-in-code load command size, if used. unsigned NumDataRegions = Asm.getDataRegions().size(); if (NumDataRegions) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::linkedit_data_command); } // Add the loh load command size, if used. uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout); uint64_t LOHSize = RoundUpToAlignment(LOHRawSize, is64Bit() ? 8 : 4); if (LOHSize) { ++NumLoadCommands; LoadCommandsSize += sizeof(MachO::linkedit_data_command); } // Add the symbol table load command sizes, if used. unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() + UndefinedSymbolData.size(); if (NumSymbols) { NumLoadCommands += 2; LoadCommandsSize += (sizeof(MachO::symtab_command) + sizeof(MachO::dysymtab_command)); } // Add the linker option load commands sizes. const std::vector<std::vector<std::string> > &LinkerOptions = Asm.getLinkerOptions(); for (unsigned i = 0, e = LinkerOptions.size(); i != e; ++i) { ++NumLoadCommands; LoadCommandsSize += ComputeLinkerOptionsLoadCommandSize(LinkerOptions[i], is64Bit()); } // Compute the total size of the section data, as well as its file size and vm // size. uint64_t SectionDataStart = (is64Bit() ? sizeof(MachO::mach_header_64) : sizeof(MachO::mach_header)) + LoadCommandsSize; uint64_t SectionDataSize = 0; uint64_t SectionDataFileSize = 0; uint64_t VMSize = 0; for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { const MCSectionData &SD = *it; uint64_t Address = getSectionAddress(&SD); uint64_t Size = Layout.getSectionAddressSize(&SD); uint64_t FileSize = Layout.getSectionFileSize(&SD); FileSize += getPaddingSize(&SD, Layout); VMSize = std::max(VMSize, Address + Size); if (SD.getSection().isVirtualSection()) continue; SectionDataSize = std::max(SectionDataSize, Address + Size); SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize); } // The section data is padded to 4 bytes. // // FIXME: Is this machine dependent? unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4); SectionDataFileSize += SectionDataPadding; // Write the prolog, starting with the header and load command... WriteHeader(NumLoadCommands, LoadCommandsSize, Asm.getSubsectionsViaSymbols()); WriteSegmentLoadCommand(NumSections, VMSize, SectionDataStart, SectionDataSize); // ... and then the section headers. uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize; for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { std::vector<MachO::any_relocation_info> &Relocs = Relocations[it]; unsigned NumRelocs = Relocs.size(); uint64_t SectionStart = SectionDataStart + getSectionAddress(it); WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs); RelocTableEnd += NumRelocs * sizeof(MachO::any_relocation_info); } // Write out the deployment target information, if it's available. if (VersionInfo.Major != 0) { assert(VersionInfo.Update < 256 && "unencodable update target version"); assert(VersionInfo.Minor < 256 && "unencodable minor target version"); assert(VersionInfo.Major < 65536 && "unencodable major target version"); uint32_t EncodedVersion = VersionInfo.Update | (VersionInfo.Minor << 8) | (VersionInfo.Major << 16); Write32(VersionInfo.Kind == MCVM_OSXVersionMin ? MachO::LC_VERSION_MIN_MACOSX : MachO::LC_VERSION_MIN_IPHONEOS); Write32(sizeof(MachO::version_min_command)); Write32(EncodedVersion); Write32(0); // reserved. } // Write the data-in-code load command, if used. uint64_t DataInCodeTableEnd = RelocTableEnd + NumDataRegions * 8; if (NumDataRegions) { uint64_t DataRegionsOffset = RelocTableEnd; uint64_t DataRegionsSize = NumDataRegions * 8; WriteLinkeditLoadCommand(MachO::LC_DATA_IN_CODE, DataRegionsOffset, DataRegionsSize); } // Write the loh load command, if used. uint64_t LOHTableEnd = DataInCodeTableEnd + LOHSize; if (LOHSize) WriteLinkeditLoadCommand(MachO::LC_LINKER_OPTIMIZATION_HINT, DataInCodeTableEnd, LOHSize); // Write the symbol table load command, if used. if (NumSymbols) { unsigned FirstLocalSymbol = 0; unsigned NumLocalSymbols = LocalSymbolData.size(); unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols; unsigned NumExternalSymbols = ExternalSymbolData.size(); unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols; unsigned NumUndefinedSymbols = UndefinedSymbolData.size(); unsigned NumIndirectSymbols = Asm.indirect_symbol_size(); unsigned NumSymTabSymbols = NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols; uint64_t IndirectSymbolSize = NumIndirectSymbols * 4; uint64_t IndirectSymbolOffset = 0; // If used, the indirect symbols are written after the section data. if (NumIndirectSymbols) IndirectSymbolOffset = LOHTableEnd; // The symbol table is written after the indirect symbol data. uint64_t SymbolTableOffset = LOHTableEnd + IndirectSymbolSize; // The string table is written after symbol table. uint64_t StringTableOffset = SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist)); WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols, StringTableOffset, StringTable.data().size()); WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols, FirstExternalSymbol, NumExternalSymbols, FirstUndefinedSymbol, NumUndefinedSymbols, IndirectSymbolOffset, NumIndirectSymbols); } // Write the linker options load commands. for (unsigned i = 0, e = LinkerOptions.size(); i != e; ++i) { WriteLinkerOptionsLoadCommand(LinkerOptions[i]); } // Write the actual section data. for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { Asm.writeSectionData(it, Layout); uint64_t Pad = getPaddingSize(it, Layout); for (unsigned int i = 0; i < Pad; ++i) Write8(0); } // Write the extra padding. WriteZeros(SectionDataPadding); // Write the relocation entries. for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { // Write the section relocation entries, in reverse order to match 'as' // (approximately, the exact algorithm is more complicated than this). std::vector<MachO::any_relocation_info> &Relocs = Relocations[it]; for (unsigned i = 0, e = Relocs.size(); i != e; ++i) { Write32(Relocs[e - i - 1].r_word0); Write32(Relocs[e - i - 1].r_word1); } } // Write out the data-in-code region payload, if there is one. for (MCAssembler::const_data_region_iterator it = Asm.data_region_begin(), ie = Asm.data_region_end(); it != ie; ++it) { const DataRegionData *Data = &(*it); uint64_t Start = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), Layout); uint64_t End = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), Layout); DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind << " start: " << Start << "(" << Data->Start->getName() << ")" << " end: " << End << "(" << Data->End->getName() << ")" << " size: " << End - Start << "\n"); Write32(Start); Write16(End - Start); Write16(Data->Kind); } // Write out the loh commands, if there is one. if (LOHSize) { #ifndef NDEBUG unsigned Start = OS.tell(); #endif Asm.getLOHContainer().Emit(*this, Layout); // Pad to a multiple of the pointer size. WriteBytes("", OffsetToAlignment(LOHRawSize, is64Bit() ? 8 : 4)); assert(OS.tell() - Start == LOHSize); } // Write the symbol table data, if used. if (NumSymbols) { // Write the indirect symbol entries. for (MCAssembler::const_indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it) { // Indirect symbols in the non-lazy symbol pointer section have some // special handling. const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(it->SectionData->getSection()); if (Section.getType() == MachO::S_NON_LAZY_SYMBOL_POINTERS) { // If this symbol is defined and internal, mark it as such. if (it->Symbol->isDefined() && !Asm.getSymbolData(*it->Symbol).isExternal()) { uint32_t Flags = MachO::INDIRECT_SYMBOL_LOCAL; if (it->Symbol->isAbsolute()) Flags |= MachO::INDIRECT_SYMBOL_ABS; Write32(Flags); continue; } } Write32(Asm.getSymbolData(*it->Symbol).getIndex()); } // FIXME: Check that offsets match computed ones. // Write the symbol table entries. for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i) WriteNlist(LocalSymbolData[i], Layout); for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i) WriteNlist(ExternalSymbolData[i], Layout); for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i) WriteNlist(UndefinedSymbolData[i], Layout); // Write the string table. OS << StringTable.data(); } }
void SVMELFProgramWriter::WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) { // First pass, allocate all non-debug sections and compute the // initial layout of the plaintext RWDATA segments. ML.AllocateSections(Asm, Layout); // Apply fixups that were stored in RecordRelocation ML.ApplyLateFixups(Asm, Layout); // Now we can know the final binary image of the RWDATA segments. Compress them. rwCompress(Asm, Layout, ML); ML.AllocateSections(Asm, Layout); if (ELFDebug) { // Allocate all debug sections last EMB.BuildSections(Asm, Layout, ML); ML.AllocateSections(Asm, Layout); } // Write header blocks writeELFHeader(Asm, Layout); for (int S = 0; S < SPS_DEBUG; ++S) writeProgramHeader((SVMProgramSection) S); // Write program data, sorted by SPS section int endS = ELFDebug ? SPS_NUM_SECTIONS : SPS_DEBUG; for (int S = 0; S < endS; ++S) { if (S == SPS_DEBUG) writeDebugMessage(); for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { const MCSectionData *SD = &*it; if (ML.getSectionKind(SD) != S) continue; if (Layout.getSectionFileSize(SD) == 0) continue; padToOffset(ML.getSectionDiskOffset(SD)); Asm.WriteSectionData(SD, Layout); } } if (ELFDebug) { // On debug binaries, generate section headers last padToOffset(SHOffset); // Dummy NULL section header (index 0) WriteZeros(sizeof(ELF::Elf32_Shdr)); for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) { const MCSectionData *SD = &*it; writeSectionHeader(Layout, SD); } } }