Exemplo n.º 1
0
void doPass(const Options& opts, ld::Internal& state)
{
	const bool log = false;
	
	// only make make __huge section in final linked images
	if ( opts.outputKind() == Options::kObjectFile )
		return;

	// only make make __huge section for x86_64
	if ( opts.architecture() != CPU_TYPE_X86_64 )
		return;

	// only needed if some (non-linkedit) atoms have an addresss >2GB from base address 
	state.usingHugeSections = false;
	uint64_t address = 0;
	for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
		ld::Internal::FinalSection* sect = *sit;
		if ( sect->type() == ld::Section::typePageZero )
			continue;
		if ( sect->type() == ld::Section::typeStack )
			continue;
		for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
			const ld::Atom* atom = *ait;
			if ( (address > 0x7FFFFFFFLL) && !sect->isSectionHidden() ) {
				state.usingHugeSections = true;
				if (log) fprintf(stderr, "atom: %s is >2GB (0x%09llX), so enabling huge mode\n", atom->name(), (unsigned long long) address);
				break;
			}
			address += atom->size();
		}
		if ( state.usingHugeSections )
			break;
	}
	if ( !state.usingHugeSections )
		return;

	// move all zero fill atoms that >1MB in size to a new __huge section
	ld::Internal::FinalSection* hugeSection = state.getFinalSection(ld::Section("__DATA", "__huge", ld::Section::typeZeroFill));
	for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
		ld::Internal::FinalSection* sect = *sit;
		if ( sect == hugeSection )
			continue;
		if ( sect->type() == ld::Section::typeZeroFill ) {
			bool movedSome = false;
			for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
				const ld::Atom* atom = *ait;
				if ( atom->size() > 1024*1024 ) {
					hugeSection->atoms.push_back(atom);
					if (log) fprintf(stderr, "moved to __huge: %s, size=%llu\n", atom->name(), (unsigned long long) atom->size());
					*ait = NULL;  // change atom to NULL for later bulk removal
					movedSome = true;
				}
			}
			if ( movedSome ) 
				sect->atoms.erase(std::remove_if(sect->atoms.begin(), sect->atoms.end(), NullAtom()), sect->atoms.end());
		}
	}

	
}
Exemplo n.º 2
0
void doPass(const Options& opts, ld::Internal& internal)
{
	const bool log = false;
	
	// only make got section in final linked images
	if ( opts.outputKind() == Options::kObjectFile )
		return;

	// walk all atoms and fixups looking for GOT-able references
	// don't create GOT atoms during this loop because that could invalidate the sections iterator
	std::vector<const ld::Atom*> atomsReferencingGOT;
	std::map<const ld::Atom*,ld::Atom*> gotMap;
	std::map<const ld::Atom*,bool>		weakImportMap;
	atomsReferencingGOT.reserve(128);
	for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
		ld::Internal::FinalSection* sect = *sit;
		for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin();  ait != sect->atoms.end(); ++ait) {
			const ld::Atom* atom = *ait;
			bool atomUsesGOT = false;
			const ld::Atom* targetOfGOT = NULL;
			bool targetIsWeakImport = false;
			for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
				if ( fit->firstInCluster() ) 
					targetOfGOT = NULL;
				switch ( fit->binding ) {
					case ld::Fixup::bindingsIndirectlyBound:
						targetOfGOT = internal.indirectBindingTable[fit->u.bindingIndex];
						targetIsWeakImport = fit->weakImport;
						break;
					case ld::Fixup::bindingDirectlyBound:
						targetOfGOT = fit->u.target;
						targetIsWeakImport = fit->weakImport;
						break;
                    default:
                        break;   
				}
				bool optimizable;
				if ( !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
					continue;
				if ( optimizable ) {
					// change from load of GOT entry to lea of target
					if ( log ) fprintf(stderr, "optimized GOT usage in %s to %s\n", atom->name(), targetOfGOT->name());
					switch ( fit->binding ) {
						case ld::Fixup::bindingsIndirectlyBound:
						case ld::Fixup::bindingDirectlyBound:
							fit->binding = ld::Fixup::bindingDirectlyBound;
							fit->u.target = targetOfGOT;
							switch ( fit->kind ) {
								case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
									fit->kind = ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA;
									break;
#if SUPPORT_ARCH_arm64
								case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
									fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21;
									break;
								case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
									fit->kind = ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12;
									break;
#endif
								default:
									assert(0 && "unsupported GOT reference kind");
									break;
							}
							break;
						default:
							assert(0 && "unsupported GOT reference");
							break;
					}
				}
				else {
					// remember that we need to use GOT in this function
					if ( log ) fprintf(stderr, "found GOT use in %s to %s\n", atom->name(), targetOfGOT->name());
					if ( !atomUsesGOT ) {
						atomsReferencingGOT.push_back(atom);
						atomUsesGOT = true;
					}
					gotMap[targetOfGOT] = NULL;
					// record weak_import attribute
					std::map<const ld::Atom*,bool>::iterator pos = weakImportMap.find(targetOfGOT);
					if ( pos == weakImportMap.end() ) {
						// target not in weakImportMap, so add
						if ( log ) fprintf(stderr, "weakImportMap[%s] = %d\n", targetOfGOT->name(), targetIsWeakImport);
						weakImportMap[targetOfGOT] = targetIsWeakImport; 
					}
					else {
						// target in weakImportMap, check for weakness mismatch
						if ( pos->second != targetIsWeakImport ) {
							// found mismatch
							switch ( opts.weakReferenceMismatchTreatment() ) {
								case Options::kWeakReferenceMismatchError:
									throwf("mismatching weak references for symbol: %s", targetOfGOT->name());
								case Options::kWeakReferenceMismatchWeak:
									pos->second = true;
									break;
								case Options::kWeakReferenceMismatchNonWeak:
									pos->second = false;
									break;
							}
						}
					}
				}
			}
		}
	}
	
	bool is64 = false;
	switch ( opts.architecture() ) {
#if SUPPORT_ARCH_i386
		case CPU_TYPE_I386:
			is64 = false;
			break;
#endif
#if SUPPORT_ARCH_x86_64
		case CPU_TYPE_X86_64:
			is64 = true;
			break;
#endif
#if SUPPORT_ARCH_arm_any
		case CPU_TYPE_ARM: 
			is64 = false;
			break;
#endif
#if SUPPORT_ARCH_arm64
		case CPU_TYPE_ARM64: 
			is64 = true;
			break;
#endif
	}
	
	// make GOT entries	
	for (std::map<const ld::Atom*,ld::Atom*>::iterator it = gotMap.begin(); it != gotMap.end(); ++it) {
		it->second = new GOTEntryAtom(internal, it->first, weakImportMap[it->first], is64);
	}
	
	// update atoms to use GOT entries
	for (std::vector<const ld::Atom*>::iterator it=atomsReferencingGOT.begin(); it != atomsReferencingGOT.end(); ++it) {
		const ld::Atom* atom = *it;
		const ld::Atom* targetOfGOT = NULL;
		ld::Fixup::iterator fitThatSetTarget = NULL;
		for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
			if ( fit->firstInCluster() ) {
				targetOfGOT = NULL;
				fitThatSetTarget = NULL;
			}
			switch ( fit->binding ) {
				case ld::Fixup::bindingsIndirectlyBound:
					targetOfGOT = internal.indirectBindingTable[fit->u.bindingIndex];
					fitThatSetTarget = fit;
					break;
				case ld::Fixup::bindingDirectlyBound:
					targetOfGOT = fit->u.target;
					fitThatSetTarget = fit;
					break;
                default:
                    break;    
			}
			bool optimizable;
			if ( (targetOfGOT == NULL) || !gotFixup(opts, internal, targetOfGOT, fit, &optimizable) )
				continue;
			if ( !optimizable ) {
				// GOT use not optimized away, update to bind to GOT entry
				assert(fitThatSetTarget != NULL);
				switch ( fitThatSetTarget->binding ) {
					case ld::Fixup::bindingsIndirectlyBound:
					case ld::Fixup::bindingDirectlyBound:
						fitThatSetTarget->binding = ld::Fixup::bindingDirectlyBound;
						fitThatSetTarget->u.target = gotMap[targetOfGOT];
						break;
					default:
						assert(0 && "unsupported GOT reference");
						break;
				}
			}
		}
	}
	
	// sort new atoms so links are consistent
	for (std::vector<ld::Internal::FinalSection*>::iterator sit=internal.sections.begin(); sit != internal.sections.end(); ++sit) {
		ld::Internal::FinalSection* sect = *sit;
		if ( sect->type() == ld::Section::typeNonLazyPointer ) {
			std::sort(sect->atoms.begin(), sect->atoms.end(), AtomByNameSorter());
		}
	}
}
Exemplo n.º 3
0
//
// The tail-call optimzation may result in a function ending in a jump (b) 
// to another functions.  At compile time the compiler does not know 
// if the target of the jump will be in the same mode (arm vs thumb).
// The arm/thumb instruction set has a way to change modes in a bl(x)
// insruction, but no instruction to change mode in a jump (b) instruction.
// In those rare cases, the linker needs to insert a shim of code to 
// make the mode switch.
//
void doPass(const Options& opts, ld::Internal& state)
{	
	// only make branch shims in final linked images
	if ( opts.outputKind() == Options::kObjectFile )
		return;

	// only ARM need branch islands
	if ( opts.architecture() != CPU_TYPE_ARM )
		return;
	
	const bool makingKextBundle = (opts.outputKind() == Options::kKextBundle);

	// scan all sections
	for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
		ld::Internal::FinalSection* sect = *sit;
		std::map<const Atom*, const Atom*> atomToThumbMap;
		std::map<const Atom*, const Atom*> thumbToAtomMap;
		std::vector<const Atom*> shims;
		// scan section for branch instructions that need to switch mode
		for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin();  ait != sect->atoms.end(); ++ait) {
			const ld::Atom* atom = *ait;
			const ld::Atom* target = NULL;
			bool targetIsProxy;
			for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
				switch ( fit->kind ) {
					case ld::Fixup::kindStoreTargetAddressThumbBranch22:
						extractTarget(fit, state, &target);
						targetIsProxy = (target->definition() == ld::Atom::definitionProxy);
						if ( ! target->isThumb() ) {
							const uint8_t* fixUpLocation = atom->rawContentPointer();
							// <rdar://problem/9544194> don't try to scan atom for branches if atom unwilling to supply raw content
							if ( fixUpLocation == NULL )
								break;
							fixUpLocation += fit->offsetInAtom;
							uint32_t instruction = *((uint32_t*)fixUpLocation);
							bool is_b = ((instruction & 0xD000F800) == 0x9000F000);
							// need shim for branch from thumb to arm, or for call to function outside kext
							if ( is_b || (targetIsProxy && makingKextBundle) ) {
								if ( _s_log ) fprintf(stderr, "need to add thumb->arm instr=0x%08X shim to %s for %s\n", instruction, target->name(), atom->name()); 
								const Atom* shim = NULL;
								std::map<const Atom*, const Atom*>::iterator pos = thumbToAtomMap.find(target);
								if ( pos == thumbToAtomMap.end() ) {
									if ( opts.archSupportsThumb2() ) {
										// <rdar://problem/9116044> make long-branch style shims for arm kexts
										if ( makingKextBundle && opts.allowTextRelocs() )
											shim = new NoPICThumb2ToArmShimAtom(target, *sect);
										else
											shim = new Thumb2ToArmShimAtom(target, *sect);
									}
									else {
										shim = new Thumb1ToArmShimAtom(target, *sect);
									}
									shims.push_back(shim);
									thumbToAtomMap[target] = shim;
								}
								else {
									shim = pos->second;
								}
								fit->binding = ld::Fixup::bindingDirectlyBound;
								fit->u.target = shim;
							}
						}
						break;
					case ld::Fixup::kindStoreTargetAddressARMBranch24:
						extractTarget(fit, state, &target);
						targetIsProxy = (target->definition() == ld::Atom::definitionProxy);
						if ( target->isThumb() || (targetIsProxy && makingKextBundle) ) {
							const uint8_t* fixUpLocation = atom->rawContentPointer();
							// <rdar://problem/9544194> don't try to scan atom for branches if atom unwilling to supply raw content
							if ( fixUpLocation == NULL )
								break;
							fixUpLocation += fit->offsetInAtom;
							uint32_t instruction = *((uint32_t*)fixUpLocation);
							bool is_b = ((instruction & 0x0F000000) == 0x0A000000) && ((instruction & 0xF0000000) != 0xF0000000);
							// need shim for branch from arm to thumb, or for call to function outside kext
							if ( is_b || (targetIsProxy && makingKextBundle) ) {
								if ( _s_log ) fprintf(stderr, "need to add arm->thumb instr=0x%08X shim to %s for %s\n", instruction, target->name(), atom->name()); 
								const Atom* shim = NULL;
								std::map<const Atom*, const Atom*>::iterator pos = atomToThumbMap.find(target);
								if ( pos == atomToThumbMap.end() ) {
									// <rdar://problem/9116044> make long-branch style shims for arm kexts
									if ( makingKextBundle && opts.allowTextRelocs() )
										shim = new NoPICARMtoThumbShimAtom(target, *sect);
									else
										shim = new ARMtoThumbShimAtom(target, *sect);
									shims.push_back(shim);
									atomToThumbMap[target] = shim;
								}
								else {
									shim = pos->second;
								}
								fit->binding = ld::Fixup::bindingDirectlyBound;
								fit->u.target = shim;
							}
						}
						break;
					
					//case ld::Fixup::kindStoreARMBranch24:
					//case ld::Fixup::kindStoreThumbBranch22:
					// Note: these fixups will only be seen if the the b/bl is to a symbol plus addend
					// for now we don't handle making shims.  If a shim is needed there will
					// be an error later.
					//	break;
					default:
						break;
				}
			}
		}

		// append all new shims to end of __text
		sect->atoms.insert(sect->atoms.end(), shims.begin(), shims.end());
	}
}