kern_return_t kxld_create_context(KXLDContext **_context, KXLDAllocateCallback allocate_callback, KXLDLoggingCallback logging_callback, KXLDFlags flags, cpu_type_t cputype, cpu_subtype_t cpusubtype, vm_size_t pagesize __KXLD_KERNEL_UNUSED) { kern_return_t rval = KERN_FAILURE; KXLDContext * context = NULL; KXLDArray * section_order = NULL; #if !KERNEL cpu_type_t * cputype_p = NULL; #endif check(_context); if (isOldInterface) { check(allocate_callback); } check(logging_callback); *_context = NULL; context = kxld_alloc(sizeof(*context)); require_action(context, finish, rval=KERN_RESOURCE_SHORTAGE); bzero(context, sizeof(*context)); context->flags = flags; context->allocate_callback = allocate_callback; context->cputype = cputype; context->cpusubtype = cpusubtype; #if !KERNEL if (pagesize) { kxld_set_cross_link_page_size(pagesize); } #endif /* !KERNEL */ kxld_set_logging_callback(logging_callback); context->kext = kxld_alloc(kxld_kext_sizeof()); require_action(context->kext, finish, rval=KERN_RESOURCE_SHORTAGE); bzero(context->kext, kxld_kext_sizeof()); /* Check if we already have an order array for this arch */ #if KXLD_USER_OR_OBJECT #if KERNEL context->section_order = s_section_order; #else /* In userspace, create the dictionary if it doesn't already exist */ if (!s_order_dict) { s_order_dict = kxld_alloc(sizeof(*s_order_dict)); require_action(s_order_dict, finish, rval=KERN_RESOURCE_SHORTAGE); bzero(s_order_dict, sizeof(*s_order_dict)); rval = kxld_dict_init(s_order_dict, kxld_dict_uint32_hash, kxld_dict_uint32_cmp, 0); require_noerr(rval, finish); } context->section_order = kxld_dict_find(s_order_dict, &cputype); #endif /* KERNEL */ /* Create an order array for this arch if needed */ if (!context->section_order) { section_order = kxld_alloc(sizeof(*section_order)); require_action(section_order, finish, rval=KERN_RESOURCE_SHORTAGE); bzero(section_order, sizeof(*section_order)); #if KERNEL s_section_order = section_order; #else /* In userspace, add the new array to the order dictionary */ cputype_p = kxld_alloc(sizeof(*cputype_p)); require_action(cputype_p, finish, rval=KERN_RESOURCE_SHORTAGE); *cputype_p = cputype; rval = kxld_dict_insert(s_order_dict, cputype_p, section_order); require_noerr(rval, finish); cputype_p = NULL; #endif /* KERNEL */ context->section_order = section_order; section_order = NULL; } #endif /* KXLD_USER_OR_OBJECT */ rval = KERN_SUCCESS; *_context = context; context = NULL; finish: if (context) kxld_destroy_context(context); if (section_order) kxld_free(section_order, sizeof(*section_order)); #if !KERNEL if (cputype_p) kxld_free(cputype_p, sizeof(*cputype_p)); #endif return rval; }
static kern_return_t patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, const KXLDDict *defined_symbols) { kern_return_t rval = KERN_FAILURE; KXLDSymtabIterator iter; const KXLDSymtab *symtab = NULL; const KXLDSym *metaclass = NULL; KXLDSym *super_metaclass_pointer = NULL; KXLDSym *final_sym = NULL; KXLDVTable *vtable = NULL; KXLDVTable *super_vtable = NULL; char class_name[KXLD_MAX_NAME_LEN]; char super_class_name[KXLD_MAX_NAME_LEN]; char vtable_name[KXLD_MAX_NAME_LEN]; char super_vtable_name[KXLD_MAX_NAME_LEN]; char final_sym_name[KXLD_MAX_NAME_LEN]; char *demangled_name1 = NULL; char *demangled_name2 = NULL; size_t demangled_length1 = 0;; size_t demangled_length2 = 0; size_t len = 0; u_int nvtables = 0; u_int npatched = 0; u_int nprogress = 0; boolean_t failure = FALSE; check(kext); check(patched_vtables); symtab = kxld_object_get_symtab(kext->kext); rval = create_vtable_index(kext); require_noerr(rval, finish); /* Find each super meta class pointer symbol */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_super_metaclass_pointer, FALSE); nvtables = kxld_symtab_iterator_get_num_remaining(&iter); while (npatched < nvtables) { npatched = 0; nprogress = 0; kxld_symtab_iterator_reset(&iter); while((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter))) { /* Get the class name from the smc pointer */ rval = kxld_sym_get_class_name_from_super_metaclass_pointer( super_metaclass_pointer, class_name, sizeof(class_name)); require_noerr(rval, finish); /* Get the vtable name from the class name */ rval = kxld_sym_get_vtable_name_from_class_name(class_name, vtable_name, sizeof(vtable_name)); require_noerr(rval, finish); /* Get the vtable and make sure it hasn't been patched */ vtable = kxld_dict_find(&kext->vtable_index, vtable_name); require_action(vtable, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, vtable_name, class_name)); if (!vtable->is_patched) { /* Find the SMCP's meta class symbol */ metaclass = get_metaclass_symbol_from_super_meta_class_pointer_symbol( kext, super_metaclass_pointer); require_action(metaclass, finish, rval=KERN_FAILURE); /* Get the super class name from the super metaclass */ rval = kxld_sym_get_class_name_from_metaclass(metaclass, super_class_name, sizeof(super_class_name)); require_noerr(rval, finish); /* Get the super vtable name from the class name */ rval = kxld_sym_get_vtable_name_from_class_name(super_class_name, super_vtable_name, sizeof(super_vtable_name)); require_noerr(rval, finish); /* Get the super vtable if it's been patched */ super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); if (failure) { const KXLDVTable *unpatched_super_vtable; unpatched_super_vtable = kxld_dict_find(&kext->vtable_index, super_vtable_name); /* If the parent's vtable hasn't been patched, warn that * this vtable is unpatchable because of the parent. */ if (!super_vtable) { kxld_log(kKxldLogPatching, kKxldLogErr, "The %s was not patched because its parent, " "the %s, was not %s.", kxld_demangle(vtable_name, &demangled_name1, &demangled_length1), kxld_demangle(super_vtable_name, &demangled_name2, &demangled_length2), (unpatched_super_vtable) ? "patchable" : "found"); } continue; } if (!super_vtable) continue; /* Get the final symbol's name from the super vtable */ rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name, final_sym_name, sizeof(final_sym_name)); require_noerr(rval, finish); /* Verify that the final symbol does not exist. First check * all the externally defined symbols, then check locally. */ final_sym = kxld_dict_find(defined_symbols, final_sym_name); if (!final_sym) { final_sym = kxld_symtab_get_locally_defined_symbol_by_name( symtab, final_sym_name); } if (final_sym) { kxld_log(kKxldLogPatching, kKxldLogErr, "Class '%s' is a subclass of final class '%s'.", kxld_demangle(class_name, &demangled_name1, &demangled_length1), kxld_demangle(super_class_name, &demangled_name2, &demangled_length2)); continue; } /* Patch the class's vtable */ rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); if (rval) continue; /* Add the class's vtable to the set of patched vtables */ rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); require_noerr(rval, finish); /* Get the meta vtable name from the class name */ rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, vtable_name, sizeof(vtable_name)); require_noerr(rval, finish); /* Get the meta vtable. Whether or not it should exist has already * been tested in create_vtables(), so if it doesn't exist and we're * still running, we can safely skip it. */ vtable = kxld_dict_find(&kext->vtable_index, vtable_name); if (!vtable) { ++nprogress; ++npatched; continue; } require_action(!vtable->is_patched, finish, rval=KERN_FAILURE); /* There is no way to look up a metaclass vtable at runtime, but * we know that every class's metaclass inherits directly from * OSMetaClass, so we just hardcode that vtable name here. */ len = strlcpy(super_vtable_name, kOSMetaClassVTableName, sizeof(super_vtable_name)); require_action(len == const_strlen(kOSMetaClassVTableName), finish, rval=KERN_FAILURE); /* Get the super meta vtable */ super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); require_action(super_vtable && super_vtable->is_patched, finish, rval=KERN_FAILURE); /* Patch the meta class's vtable */ rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); require_noerr(rval, finish); /* Add the MetaClass's vtable to the set of patched vtables */ rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); require_noerr(rval, finish); ++nprogress; } ++npatched; } require_action(!failure, finish, rval=KERN_FAILURE); failure = (nprogress == 0); } rval = KERN_SUCCESS; finish: if (demangled_name1) kxld_free(demangled_name1, demangled_length1); if (demangled_name2) kxld_free(demangled_name2, demangled_length2); return rval; }
static kern_return_t resolve_symbols(KXLDKext *kext, const KXLDDict *defined_symbols, const KXLDDict *obsolete_symbols) { kern_return_t rval = KERN_FAILURE; const KXLDSymtab *symtab = NULL; KXLDSymtabIterator iter; KXLDSym *sym = NULL; KXLDSym *defined_sym = NULL; const char *name = NULL; boolean_t tests_for_weak = FALSE; boolean_t error = FALSE; char *demangled_name = NULL; size_t demangled_length = 0; check(kext->kext); check(defined_symbols); check(obsolete_symbols); symtab = kxld_object_get_symtab(kext->kext); /* Check if the kext tests for weak symbols */ sym = kxld_symtab_get_symbol_by_name(symtab, KXLD_WEAK_TEST_SYMBOL); tests_for_weak = (sym != NULL); /* Check for duplicate symbols */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_exported, FALSE); while ((sym = kxld_symtab_iterator_get_next(&iter))) { defined_sym = kxld_dict_find(defined_symbols, sym->name); if (defined_sym) { /* Not a problem if the symbols have the same address */ if (defined_sym->link_addr == sym->link_addr) { continue; } if (!error) { error = TRUE; kxld_log(kKxldLogLinking, kKxldLogErr, "The following symbols were defined more than once:"); } kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s: %p - %p", kxld_demangle(sym->name, &demangled_name, &demangled_length), (void *) (uintptr_t) sym->link_addr, (void *) (uintptr_t) defined_sym->link_addr); } } require_noerr_action(error, finish, rval=KERN_FAILURE); /* Resolve undefined and indirect symbols */ /* Iterate over all unresolved symbols */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_unresolved, FALSE); while ((sym = kxld_symtab_iterator_get_next(&iter))) { /* Common symbols are not supported */ if (kxld_sym_is_common(sym)) { if (!error) { error = TRUE; if (kxld_object_target_supports_common_symbols(kext->kext)) { kxld_log(kKxldLogLinking, kKxldLogErr, "The following common symbols were not resolved:"); } else { kxld_log(kKxldLogLinking, kKxldLogErr, "Common symbols are not supported in kernel extensions. " "Use -fno-common to build your kext. " "The following are common symbols:"); } } kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", kxld_demangle(sym->name, &demangled_name, &demangled_length)); } else { /* Find the address of the defined symbol */ if (kxld_sym_is_undefined(sym)) { name = sym->name; } else { name = sym->alias; } defined_sym = kxld_dict_find(defined_symbols, name); /* Resolve the symbol. If a definition cannot be found, then: * 1) Psuedokexts log a warning and proceed * 2) Actual kexts delay the error until validation in case vtable * patching replaces the undefined symbol. */ if (defined_sym) { rval = kxld_sym_resolve(sym, defined_sym->link_addr); require_noerr(rval, finish); if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) { kxld_log(kKxldLogLinking, kKxldLogWarn, "This kext uses obsolete symbol %s.", kxld_demangle(name, &demangled_name, &demangled_length)); } } else if (kxld_sym_is_weak(sym)) { kxld_addr_t addr = 0; /* Make sure that the kext has referenced gOSKextUnresolved. */ require_action(tests_for_weak, finish, rval=KERN_FAILURE; kxld_log(kKxldLogLinking, kKxldLogErr, "This kext has weak references but does not test for " "them. Test for weak references with " "OSKextIsSymbolResolved().")); #if KERNEL /* Get the address of the default weak address. */ addr = (kxld_addr_t) &kext_weak_symbol_referenced; #else /* This is run during symbol generation only, so we only * need a filler value here. */ addr = 0xF00DD00D; #endif /* KERNEL */ rval = kxld_sym_resolve(sym, addr); require_noerr(rval, finish); } } } require_noerr_action(error, finish, rval=KERN_FAILURE); rval = KERN_SUCCESS; finish: if (demangled_name) kxld_free(demangled_name, demangled_length); return rval; }
static kern_return_t get_vtable_syms_from_smcp(KXLDKext *kext, const KXLDDict *defined_symbols, KXLDSym *super_metaclass_ptr_sym, KXLDSym **vtable_sym_out, KXLDSym **meta_vtable_sym_out) { kern_return_t rval = KERN_FAILURE; const KXLDSymtab *symtab = NULL; KXLDSym *vtable_sym = NULL; KXLDSym *meta_vtable_sym = NULL; char class_name[KXLD_MAX_NAME_LEN]; char vtable_name[KXLD_MAX_NAME_LEN]; char meta_vtable_name[KXLD_MAX_NAME_LEN]; char *demangled_name1 = NULL; char *demangled_name2 = NULL; size_t demangled_length1 = 0; size_t demangled_length2 = 0; check(kext); check(vtable_sym_out); check(meta_vtable_sym_out); require(!kxld_object_is_kernel(kext->kext), finish); symtab = kxld_object_get_symtab(kext->kext); /* Get the class name from the smc pointer */ rval = kxld_sym_get_class_name_from_super_metaclass_pointer( super_metaclass_ptr_sym, class_name, sizeof(class_name)); require_noerr(rval, finish); /* Get the vtable name from the class name */ rval = kxld_sym_get_vtable_name_from_class_name(class_name, vtable_name, sizeof(vtable_name)); require_noerr(rval, finish); /* Get the vtable symbol */ if (defined_symbols) { vtable_sym = kxld_dict_find(defined_symbols, vtable_name); } else { vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, vtable_name); } require_action(vtable_sym, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, vtable_name, class_name)); /* Get the meta vtable name from the class name */ rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, meta_vtable_name, sizeof(meta_vtable_name)); require_noerr(rval, finish); /* Get the meta vtable symbol */ if (defined_symbols) { meta_vtable_sym = kxld_dict_find(defined_symbols, meta_vtable_name); } else { meta_vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, meta_vtable_name); } if (!meta_vtable_sym) { if (kxld_object_target_supports_strict_patching(kext->kext)) { kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, meta_vtable_name, class_name); rval = KERN_FAILURE; goto finish; } else { kxld_log(kKxldLogPatching, kKxldLogErr, "Warning: " kKxldLogMissingVtable, kxld_demangle(meta_vtable_name, &demangled_name1, &demangled_length1), kxld_demangle(class_name, &demangled_name2, &demangled_length2)); } } *vtable_sym_out = vtable_sym; *meta_vtable_sym_out = meta_vtable_sym; rval = KERN_SUCCESS; finish: if (demangled_name1) kxld_free(demangled_name1, demangled_length1); if (demangled_name2) kxld_free(demangled_name2, demangled_length2); return rval; }
/******************************************************************************* * Initializes vtables by performing a reverse lookup on symbol values when * they exist in the vtable entry, and by looking through a matching relocation * entry when the vtable entry is NULL. * * Final linked images require this hybrid vtable initialization approach * because they are already internally resolved. This means that the vtables * contain valid entries to local symbols, but still have relocation entries for * external symbols. *******************************************************************************/ static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDRelocator *relocator, const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; kxld_addr_t entry_value = 0; u_long entry_offset = 0; u_int nentries = 0; u_int i = 0; char *demangled_name1 = NULL; size_t demangled_length1 = 0; check(vtable); check(vtable_sym); check(relocator); check(relocs); /* Find the first entry and its offset past the vtable padding */ (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* In a final linked image, a vtable slot is valid if it is nonzero * (meaning the userspace linker has already resolved it) or if it has * a relocation entry. We'll know the end of the vtable when we find a * slot that meets neither of these conditions. */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); if (!reloc) break; } ++nentries; entry_offset += vtable_entry_size; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Find the symbols for each vtable entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find a symbol, it means it is a locally-defined, * non-external symbol that has been stripped. We don't patch over * locally-defined symbols, so we leave the symbol as NULL and just * skip it. We won't be able to patch subclasses with this symbol, * but there isn't much we can do about that. */ if (entry_value) { reloc = NULL; sym = kxld_dict_find(defined_cxx_symbols, &entry_value); } else { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); require_action(reloc, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL); } tmpentry = kxld_array_get_item(&vtable->entries, i); tmpentry->unpatched.reloc = reloc; tmpentry->unpatched.sym = sym; } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes a vtable object by reading the symbol values out of the vtable * entries and performing reverse symbol lookups on those values. *******************************************************************************/ static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; kxld_addr_t entry_value = 0; u_long entry_offset; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int nentries = 0; u_int i = 0; check(vtable); check(relocator); (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* Count the number of entries (the vtable is null-terminated) */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) break; entry_offset += vtable_entry_size; ++nentries; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Look up the symbols for each entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find the symbol, it means that the virtual function was * defined inline. There's not much I can do about this; it just means * I can't patch this function. */ tmpentry = kxld_array_get_item(&vtable->entries, i); sym = kxld_dict_find(defined_cxx_symbols, &entry_value); if (sym) { tmpentry->patched.name = sym->name; tmpentry->patched.addr = sym->link_addr; } else { tmpentry->patched.name = NULL; tmpentry->patched.addr = 0; } } rval = KERN_SUCCESS; finish: return rval; }