static kern_return_t init_context(KXLDContext *context, u_int ndependencies) { kern_return_t rval = KERN_FAILURE; /* Create an array of objects large enough to hold an object * for every dependency, an interface for each dependency, and a kext. */ rval = kxld_array_init(&context->objects, kxld_object_sizeof(), 2 * ndependencies + 1); require_noerr(rval, finish); rval = kxld_array_init(&context->dependencies, kxld_kext_sizeof(), ndependencies); require_noerr(rval, finish); rval = kxld_dict_init(&context->defined_symbols_by_name, kxld_dict_string_hash, kxld_dict_string_cmp, 0); require_noerr(rval, finish); rval = kxld_dict_init(&context->defined_cxx_symbols_by_value, kxld_dict_kxldaddr_hash, kxld_dict_kxldaddr_cmp, 0); require_noerr(rval, finish); rval = kxld_dict_init(&context->obsolete_symbols_by_name, kxld_dict_string_hash, kxld_dict_string_cmp, 0); require_noerr(rval, finish); rval = kxld_dict_init(&context->vtables_by_name, kxld_dict_string_hash, kxld_dict_string_cmp, 0); require_noerr(rval, finish); rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * The defined symbols argument is optional. When supplied, create_vtables() * will look for vtable symbols in the defined_symbols dictionary. Otherwise, * it will look in the kext's symbol table for vtable symbols. * * We do this because there are two types of KXLDKext objects that call * create_vtables(), those that have been linked, and those that haven't. The * linked kexts export their symbols into the global symbol table that is used * for symbol resolution, so we can look there for vtable symbols without * having to index their local symbol table separately. * * Unlinked kexts haven't yet had their symbols exported into the global table, * so we have to index their local symbol table separately. *******************************************************************************/ static kern_return_t create_vtables(KXLDKext *kext, const KXLDDict *defined_cxx_symbols, const KXLDDict *defined_symbols) { kern_return_t rval = KERN_FAILURE; const KXLDSymtab *symtab = NULL; KXLDSymtabIterator iter; KXLDSym *sym = NULL; KXLDSym *vtable_sym = NULL; KXLDSym *meta_vtable_sym = NULL; KXLDVTable *vtable = NULL; KXLDVTable *meta_vtable = NULL; u_int i = 0; u_int nvtables = 0; if (kext->vtables_created) { rval = KERN_SUCCESS; goto finish; } symtab = kxld_object_get_symtab(kext->kext); if (kxld_object_is_linked(kext->kext)) { /* Create a vtable object for every vtable symbol */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_vtable, FALSE); nvtables = kxld_symtab_iterator_get_num_remaining(&iter); } else { /* We walk over the super metaclass pointer symbols because classes * with them are the only ones that need patching. Then we double the * number of vtables we're expecting, because every pointer will have a * class vtable and a MetaClass vtable. */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_super_metaclass_pointer, FALSE); nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; } rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); require_noerr(rval, finish); while ((sym = kxld_symtab_iterator_get_next(&iter))) { if (kxld_object_is_linked(kext->kext)) { vtable_sym = sym; meta_vtable_sym = NULL; meta_vtable = NULL; } else { rval = get_vtable_syms_from_smcp(kext, defined_symbols, sym, &vtable_sym, &meta_vtable_sym); require_noerr(rval, finish); } vtable = kxld_array_get_item(&kext->vtables, i++); rval = kxld_vtable_init(vtable, vtable_sym, kext->kext, defined_cxx_symbols); require_noerr(rval, finish); /* meta_vtable_sym will be null when we don't support strict * patching and can't find the metaclass vtable. If that's the * case, we just reduce the expect number of vtables by 1. */ if (!kxld_object_is_linked(kext->kext)) { if (meta_vtable_sym) { meta_vtable = kxld_array_get_item(&kext->vtables, i++); rval = kxld_vtable_init(meta_vtable, meta_vtable_sym, kext->kext, defined_cxx_symbols); require_noerr(rval, finish); } else { kxld_array_resize(&kext->vtables, --nvtables); meta_vtable = NULL; } } } require_action(i == kext->vtables.nitems, finish, rval=KERN_FAILURE); kext->vtables_created = TRUE; rval = KERN_SUCCESS; finish: return rval; }
kern_return_t kxld_reloc_create_macho(KXLDArray *relocarray, const KXLDRelocator *relocator, const struct relocation_info *srcs, u_int nsrcs) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; u_int nrelocs = 0; const struct relocation_info *src = NULL, *prev_src = NULL; const struct scattered_relocation_info *scatsrc = NULL, *prev_scatsrc = NULL; u_int i = 0; u_int reloc_index = 0; check(relocarray); check(srcs); /* If there are no relocation entries, just return */ if (!nsrcs) { rval = KERN_SUCCESS; goto finish; } /* Count the number of non-pair relocs */ nrelocs = count_relocatable_relocs(relocator, srcs, nsrcs); if (nrelocs) { /* Allocate the array of relocation entries */ rval = kxld_array_init(relocarray, sizeof(KXLDReloc), nrelocs); require_noerr(rval, finish); /* Initialize the relocation entries */ for (i = 0; i < nsrcs; ++i) { src = srcs + i; scatsrc = (const struct scattered_relocation_info *) src; /* A section-based relocation entry can be skipped for absolute * symbols. */ if (!(src->r_address & R_SCATTERED) && !(src->r_extern) && (R_ABS == src->r_symbolnum)) { continue; } /* Pull out the data from the relocation entries. The target_type * depends on the r_extern bit: * Scattered -> Section Lookup by Address * Local (not extern) -> Section by Index * Extern -> Symbolnum by Index */ reloc = kxld_array_get_item(relocarray, reloc_index++); if (src->r_address & R_SCATTERED) { reloc->address = scatsrc->r_address; reloc->pcrel = scatsrc->r_pcrel; reloc->length = scatsrc->r_length; reloc->reloc_type = scatsrc->r_type; reloc->target = scatsrc->r_value; reloc->target_type = KXLD_TARGET_LOOKUP; } else { reloc->address = src->r_address; reloc->pcrel = src->r_pcrel; reloc->length = src->r_length; reloc->reloc_type = src->r_type; reloc->target = src->r_symbolnum; if (0 == src->r_extern) { reloc->target_type = KXLD_TARGET_SECTNUM; reloc->target -= 1; } else { reloc->target_type = KXLD_TARGET_SYMBOLNUM; } } /* Find the pair entry if it exists */ if (relocator->reloc_has_pair(reloc->reloc_type)) { ++i; require_action(i < nsrcs, finish, rval=KERN_FAILURE); prev_src = src; src = srcs + i; prev_scatsrc = (const struct scattered_relocation_info *) prev_src; scatsrc = (const struct scattered_relocation_info *) src; if (src->r_address & R_SCATTERED) { require_action(relocator->reloc_is_pair( scatsrc->r_type, reloc->reloc_type), finish, rval=KERN_FAILURE); reloc->pair_target = scatsrc->r_value; reloc->pair_target_type = KXLD_TARGET_LOOKUP; } else { require_action(relocator->reloc_is_pair(src->r_type, reloc->reloc_type), finish, rval=KERN_FAILURE); if (src->r_extern) { reloc->pair_target = src->r_symbolnum; reloc->pair_target_type = KXLD_TARGET_SYMBOLNUM; } else { reloc->pair_target = src->r_address; reloc->pair_target_type = KXLD_TARGET_VALUE; } } } else { reloc->pair_target = 0; if (relocator->reloc_has_got(reloc->reloc_type)) { reloc->pair_target_type = KXLD_TARGET_GOT; } else { reloc->pair_target_type = KXLD_TARGET_NONE; } } } } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes vtables by performing a reverse lookup on symbol values when * they exist in the vtable entry, and by looking through a matching relocation * entry when the vtable entry is NULL. * * Final linked images require this hybrid vtable initialization approach * because they are already internally resolved. This means that the vtables * contain valid entries to local symbols, but still have relocation entries for * external symbols. *******************************************************************************/ static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDRelocator *relocator, const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; kxld_addr_t entry_value = 0; u_long entry_offset = 0; u_int nentries = 0; u_int i = 0; char *demangled_name1 = NULL; size_t demangled_length1 = 0; check(vtable); check(vtable_sym); check(relocator); check(relocs); /* Find the first entry and its offset past the vtable padding */ (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* In a final linked image, a vtable slot is valid if it is nonzero * (meaning the userspace linker has already resolved it) or if it has * a relocation entry. We'll know the end of the vtable when we find a * slot that meets neither of these conditions. */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); if (!reloc) break; } ++nentries; entry_offset += vtable_entry_size; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Find the symbols for each vtable entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find a symbol, it means it is a locally-defined, * non-external symbol that has been stripped. We don't patch over * locally-defined symbols, so we leave the symbol as NULL and just * skip it. We won't be able to patch subclasses with this symbol, * but there isn't much we can do about that. */ if (entry_value) { reloc = NULL; sym = kxld_dict_find(defined_cxx_symbols, &entry_value); } else { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); require_action(reloc, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL); } tmpentry = kxld_array_get_item(&vtable->entries, i); tmpentry->unpatched.reloc = reloc; tmpentry->unpatched.sym = sym; } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes a vtable object by reading the symbol values out of the vtable * entries and performing reverse symbol lookups on those values. *******************************************************************************/ static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; kxld_addr_t entry_value = 0; u_long entry_offset; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int nentries = 0; u_int i = 0; check(vtable); check(relocator); (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* Count the number of entries (the vtable is null-terminated) */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) break; entry_offset += vtable_entry_size; ++nentries; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Look up the symbols for each entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find the symbol, it means that the virtual function was * defined inline. There's not much I can do about this; it just means * I can't patch this function. */ tmpentry = kxld_array_get_item(&vtable->entries, i); sym = kxld_dict_find(defined_cxx_symbols, &entry_value); if (sym) { tmpentry->patched.name = sym->name; tmpentry->patched.addr = sym->link_addr; } else { tmpentry->patched.name = NULL; tmpentry->patched.addr = 0; } } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes a vtable object by matching up relocation entries to the vtable's * entries and finding the corresponding symbols. *******************************************************************************/ static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDSect *sect, const KXLDRelocator *relocator) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; KXLDVTableEntry *entry = NULL; KXLDSym *sym = NULL; kxld_addr_t vtable_base_offset = 0; kxld_addr_t entry_offset = 0; u_int i = 0; u_int nentries = 0; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int base_reloc_index = 0; u_int reloc_index = 0; check(vtable); check(vtable_sym); check(sect); check(relocator); /* Find the first entry past the vtable padding */ (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) + vtable_header_size; /* Find the relocation entry at the start of the vtable */ rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, vtable_base_offset, &base_reloc_index); require_noerr(rval, finish); /* Count the number of consecutive relocation entries to find the number of * vtable entries. For some reason, the __TEXT,__const relocations are * sorted in descending order, so we have to walk backwards. Also, make * sure we don't run off the end of the section's relocs. */ reloc_index = base_reloc_index; entry_offset = vtable_base_offset; reloc = kxld_array_get_item(§->relocs, reloc_index); while (reloc->address == entry_offset) { ++nentries; if (!reloc_index) break; --reloc_index; reloc = kxld_array_get_item(§->relocs, reloc_index); entry_offset += vtable_entry_size; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Find the symbols for each vtable entry */ for (i = 0; i < vtable->entries.nitems; ++i) { reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); entry = kxld_array_get_item(&vtable->entries, i); /* If we can't find a symbol, it means it is a locally-defined, * non-external symbol that has been stripped. We don't patch over * locally-defined symbols, so we leave the symbol as NULL and just * skip it. We won't be able to patch subclasses with this symbol, * but there isn't much we can do about that. */ sym = kxld_reloc_get_symbol(relocator, reloc, sect->data); entry->unpatched.sym = sym; entry->unpatched.reloc = reloc; } rval = KERN_SUCCESS; finish: return rval; }
u_int *item = 0; u_int test_num = 0; u_int idx = 0; u_int titems = 0; u_int storageTestItems[kNumStorageTestItems]; u_int i = 0; bzero(&array, sizeof(array)); kxld_set_logging_callback(kxld_test_log); kxld_set_logging_callback_data("kxld_array_test", NULL); kxld_log(0, 0, "%d: Initialize", ++test_num); titems = PAGE_SIZE / sizeof(u_int); rval = kxld_array_init(&array, sizeof(u_int), titems); assert(rval == KERN_SUCCESS); assert(array.nitems == titems); kxld_log(0, 0, "%d: Get item", ++test_num); idx = 0; item = kxld_array_get_item(&array, idx); assert(item); assert(item == kxld_array_get_slot(&array, idx)); idx = titems - 1; item = kxld_array_get_item(&array, idx); assert(item); assert(item == kxld_array_get_slot(&array, idx)); idx = titems;