static void clear_context(KXLDContext *context) { KXLDObject * object = NULL; KXLDKext * dep = NULL; u_int i = 0; check(context); kxld_kext_clear(context->kext); for (i = 0; i < context->objects.nitems; ++i) { object = kxld_array_get_item(&context->objects, i); kxld_object_clear(object); } kxld_array_reset(&context->objects); for (i = 0; i < context->dependencies.nitems; ++i) { dep = kxld_array_get_item(&context->dependencies, i); kxld_kext_clear(dep); } kxld_array_reset(&context->dependencies); kxld_dict_clear(&context->defined_symbols_by_name); kxld_dict_clear(&context->defined_cxx_symbols_by_value); kxld_dict_clear(&context->obsolete_symbols_by_name); kxld_dict_clear(&context->vtables_by_name); }
kern_return_t kxld_kext_export_vtables(KXLDKext *kext, const KXLDDict *defined_cxx_symbols, const KXLDDict *defined_symbols, KXLDDict *vtables) { kern_return_t rval = KERN_FAILURE; KXLDVTable *vtable = NULL; u_int i = 0; check(kext); check(defined_symbols); check(defined_cxx_symbols); check(vtables); rval = create_vtables(kext, defined_cxx_symbols, defined_symbols); require_noerr(rval, finish); for (i = 0; i < kext->vtables.nitems; ++i) { vtable = kxld_array_get_item(&kext->vtables, i); rval = kxld_dict_insert(vtables, vtable->name, vtable); require_noerr(rval, finish); } rval = KERN_SUCCESS; finish: return rval; }
static KXLDObject * get_object_for_file(KXLDContext *context, u_char *file, u_long size, const char *name) { KXLDObject *rval = NULL; KXLDObject *object = NULL; kern_return_t result = 0; u_int i = 0; for (i = 0; i < context->objects.nitems; ++i) { object = kxld_array_get_item(&context->objects, i); if (!kxld_object_get_file(object)) { result = kxld_object_init_from_macho(object, file, size, name, context->section_order, context->cputype, context->cpusubtype, context->flags); require_noerr(result, finish); rval = object; break; } if (kxld_object_get_file(object) == file) { rval = object; break; } } finish: return rval; }
static kern_return_t create_vtable_index(KXLDKext *kext) { kern_return_t rval = KERN_FAILURE; KXLDVTable *vtable = NULL; u_int i = 0; if (kext->vtable_index_created) { rval = KERN_SUCCESS; goto finish; } /* Map vtable names to the vtable structures */ rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash, kxld_dict_string_cmp, kext->vtables.nitems); require_noerr(rval, finish); for (i = 0; i < kext->vtables.nitems; ++i) { vtable = kxld_array_get_item(&kext->vtables, i); rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable); require_noerr(rval, finish); } kext->vtable_index_created = TRUE; rval = KERN_SUCCESS; finish: return rval; }
KXLDReloc * kxld_reloc_get_reloc_by_offset(const KXLDArray *relocs, kxld_addr_t offset) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; u_int i = 0; rval = kxld_reloc_get_reloc_index_by_offset(relocs, offset, &i); if (rval) goto finish; reloc = kxld_array_get_item(relocs, i); finish: return reloc; }
void kxld_kext_clear(KXLDKext *kext) { KXLDVTable *vtable = NULL; u_int i; check(kext); for (i = 0; i < kext->vtables.nitems; ++i) { vtable = kxld_array_get_item(&kext->vtables, i); kxld_vtable_clear(vtable); } kxld_array_reset(&kext->vtables); kxld_dict_clear(&kext->vtable_index); kext->kext = NULL; kext->interface = NULL; kext->vtables_created = FALSE; kext->vtable_index_created = FALSE; }
KXLDVTableEntry * kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset, boolean_t is_32_bit) { KXLDVTableEntry *rval = NULL; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int vtable_entry_idx = 0; (void) get_vtable_base_sizes(is_32_bit, &vtable_entry_size, &vtable_header_size); if (offset % vtable_entry_size) { goto finish; } vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size); rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx); finish: return rval; }
kern_return_t kxld_reloc_get_reloc_index_by_offset(const KXLDArray *relocs, kxld_size_t offset, u_int *idx) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; u_int i = 0; for (i = 0; i < relocs->nitems; ++i) { reloc = kxld_array_get_item(relocs, i); if (reloc->address == offset) break; } if (i >= relocs->nitems) { rval = KERN_FAILURE; goto finish; } *idx = i; rval = KERN_SUCCESS; finish: return rval; }
static kern_return_t init_kext_objects(KXLDContext *context, u_char *file, u_long size, const char *name, KXLDDependency *dependencies, u_int ndependencies) { kern_return_t rval = KERN_FAILURE; KXLDKext *kext = NULL; KXLDObject *kext_object = NULL; KXLDObject *interface_object = NULL; u_int i = 0; /* Create a kext object for each dependency. If it's a direct dependency, * export its symbols by name by value. If it's indirect, just export the * C++ symbols by value. */ for (i = 0; i < ndependencies; ++i) { kext = kxld_array_get_item(&context->dependencies, i); kext_object = NULL; interface_object = NULL; kext_object = get_object_for_file(context, dependencies[i].kext, dependencies[i].kext_size, dependencies[i].kext_name); require_action(kext_object, finish, rval=KERN_FAILURE); if (dependencies[i].interface) { interface_object = get_object_for_file(context, dependencies[i].interface, dependencies[i].interface_size, dependencies[i].interface_name); require_action(interface_object, finish, rval=KERN_FAILURE); } rval = kxld_kext_init(kext, kext_object, interface_object); require_noerr(rval, finish); if (dependencies[i].is_direct_dependency) { rval = kxld_kext_export_symbols(kext, &context->defined_symbols_by_name, &context->obsolete_symbols_by_name, &context->defined_cxx_symbols_by_value); require_noerr(rval, finish); } else { rval = kxld_kext_export_symbols(kext, /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, &context->defined_cxx_symbols_by_value); require_noerr(rval, finish); } } /* Export the vtables for all of the dependencies. */ for (i = 0; i < context->dependencies.nitems; ++i) { kext = kxld_array_get_item(&context->dependencies, i); rval = kxld_kext_export_vtables(kext, &context->defined_cxx_symbols_by_value, &context->defined_symbols_by_name, &context->vtables_by_name); require_noerr(rval, finish); } /* Create a kext object for the kext we're linking and export its locally * defined C++ symbols. */ kext_object = get_object_for_file(context, file, size, name); require_action(kext_object, finish, rval=KERN_FAILURE); rval = kxld_kext_init(context->kext, kext_object, /* interface */ NULL); require_noerr(rval, finish); rval = kxld_kext_export_symbols(context->kext, /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, &context->defined_cxx_symbols_by_value); require_noerr(rval, finish); rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * The defined symbols argument is optional. When supplied, create_vtables() * will look for vtable symbols in the defined_symbols dictionary. Otherwise, * it will look in the kext's symbol table for vtable symbols. * * We do this because there are two types of KXLDKext objects that call * create_vtables(), those that have been linked, and those that haven't. The * linked kexts export their symbols into the global symbol table that is used * for symbol resolution, so we can look there for vtable symbols without * having to index their local symbol table separately. * * Unlinked kexts haven't yet had their symbols exported into the global table, * so we have to index their local symbol table separately. *******************************************************************************/ static kern_return_t create_vtables(KXLDKext *kext, const KXLDDict *defined_cxx_symbols, const KXLDDict *defined_symbols) { kern_return_t rval = KERN_FAILURE; const KXLDSymtab *symtab = NULL; KXLDSymtabIterator iter; KXLDSym *sym = NULL; KXLDSym *vtable_sym = NULL; KXLDSym *meta_vtable_sym = NULL; KXLDVTable *vtable = NULL; KXLDVTable *meta_vtable = NULL; u_int i = 0; u_int nvtables = 0; if (kext->vtables_created) { rval = KERN_SUCCESS; goto finish; } symtab = kxld_object_get_symtab(kext->kext); if (kxld_object_is_linked(kext->kext)) { /* Create a vtable object for every vtable symbol */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_vtable, FALSE); nvtables = kxld_symtab_iterator_get_num_remaining(&iter); } else { /* We walk over the super metaclass pointer symbols because classes * with them are the only ones that need patching. Then we double the * number of vtables we're expecting, because every pointer will have a * class vtable and a MetaClass vtable. */ kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_super_metaclass_pointer, FALSE); nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; } rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); require_noerr(rval, finish); while ((sym = kxld_symtab_iterator_get_next(&iter))) { if (kxld_object_is_linked(kext->kext)) { vtable_sym = sym; meta_vtable_sym = NULL; meta_vtable = NULL; } else { rval = get_vtable_syms_from_smcp(kext, defined_symbols, sym, &vtable_sym, &meta_vtable_sym); require_noerr(rval, finish); } vtable = kxld_array_get_item(&kext->vtables, i++); rval = kxld_vtable_init(vtable, vtable_sym, kext->kext, defined_cxx_symbols); require_noerr(rval, finish); /* meta_vtable_sym will be null when we don't support strict * patching and can't find the metaclass vtable. If that's the * case, we just reduce the expect number of vtables by 1. */ if (!kxld_object_is_linked(kext->kext)) { if (meta_vtable_sym) { meta_vtable = kxld_array_get_item(&kext->vtables, i++); rval = kxld_vtable_init(meta_vtable, meta_vtable_sym, kext->kext, defined_cxx_symbols); require_noerr(rval, finish); } else { kxld_array_resize(&kext->vtables, --nvtables); meta_vtable = NULL; } } } require_action(i == kext->vtables.nitems, finish, rval=KERN_FAILURE); kext->vtables_created = TRUE; rval = KERN_SUCCESS; finish: return rval; }
kern_return_t kxld_reloc_create_macho(KXLDArray *relocarray, const KXLDRelocator *relocator, const struct relocation_info *srcs, u_int nsrcs) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; u_int nrelocs = 0; const struct relocation_info *src = NULL, *prev_src = NULL; const struct scattered_relocation_info *scatsrc = NULL, *prev_scatsrc = NULL; u_int i = 0; u_int reloc_index = 0; check(relocarray); check(srcs); /* If there are no relocation entries, just return */ if (!nsrcs) { rval = KERN_SUCCESS; goto finish; } /* Count the number of non-pair relocs */ nrelocs = count_relocatable_relocs(relocator, srcs, nsrcs); if (nrelocs) { /* Allocate the array of relocation entries */ rval = kxld_array_init(relocarray, sizeof(KXLDReloc), nrelocs); require_noerr(rval, finish); /* Initialize the relocation entries */ for (i = 0; i < nsrcs; ++i) { src = srcs + i; scatsrc = (const struct scattered_relocation_info *) src; /* A section-based relocation entry can be skipped for absolute * symbols. */ if (!(src->r_address & R_SCATTERED) && !(src->r_extern) && (R_ABS == src->r_symbolnum)) { continue; } /* Pull out the data from the relocation entries. The target_type * depends on the r_extern bit: * Scattered -> Section Lookup by Address * Local (not extern) -> Section by Index * Extern -> Symbolnum by Index */ reloc = kxld_array_get_item(relocarray, reloc_index++); if (src->r_address & R_SCATTERED) { reloc->address = scatsrc->r_address; reloc->pcrel = scatsrc->r_pcrel; reloc->length = scatsrc->r_length; reloc->reloc_type = scatsrc->r_type; reloc->target = scatsrc->r_value; reloc->target_type = KXLD_TARGET_LOOKUP; } else { reloc->address = src->r_address; reloc->pcrel = src->r_pcrel; reloc->length = src->r_length; reloc->reloc_type = src->r_type; reloc->target = src->r_symbolnum; if (0 == src->r_extern) { reloc->target_type = KXLD_TARGET_SECTNUM; reloc->target -= 1; } else { reloc->target_type = KXLD_TARGET_SYMBOLNUM; } } /* Find the pair entry if it exists */ if (relocator->reloc_has_pair(reloc->reloc_type)) { ++i; require_action(i < nsrcs, finish, rval=KERN_FAILURE); prev_src = src; src = srcs + i; prev_scatsrc = (const struct scattered_relocation_info *) prev_src; scatsrc = (const struct scattered_relocation_info *) src; if (src->r_address & R_SCATTERED) { require_action(relocator->reloc_is_pair( scatsrc->r_type, reloc->reloc_type), finish, rval=KERN_FAILURE); reloc->pair_target = scatsrc->r_value; reloc->pair_target_type = KXLD_TARGET_LOOKUP; } else { require_action(relocator->reloc_is_pair(src->r_type, reloc->reloc_type), finish, rval=KERN_FAILURE); if (src->r_extern) { reloc->pair_target = src->r_symbolnum; reloc->pair_target_type = KXLD_TARGET_SYMBOLNUM; } else { reloc->pair_target = src->r_address; reloc->pair_target_type = KXLD_TARGET_VALUE; } } } else { reloc->pair_target = 0; if (relocator->reloc_has_got(reloc->reloc_type)) { reloc->pair_target_type = KXLD_TARGET_GOT; } else { reloc->pair_target_type = KXLD_TARGET_NONE; } } } } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Patching vtables allows us to preserve binary compatibility across releases. *******************************************************************************/ kern_return_t kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable, KXLDObject *object) { kern_return_t rval = KERN_FAILURE; const KXLDSymtab *symtab = NULL; const KXLDSym *sym = NULL; KXLDVTableEntry *child_entry = NULL; KXLDVTableEntry *parent_entry = NULL; u_int symindex = 0; u_int i = 0; char *demangled_name1 = NULL; char *demangled_name2 = NULL; char *demangled_name3 = NULL; size_t demangled_length1 = 0; size_t demangled_length2 = 0; size_t demangled_length3 = 0; boolean_t failure = FALSE; check(vtable); check(super_vtable); symtab = kxld_object_get_symtab(object); require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS); require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); for (i = 0; i < super_vtable->entries.nitems; ++i) { child_entry = kxld_array_get_item(&vtable->entries, i); parent_entry = kxld_array_get_item(&super_vtable->entries, i); /* The child entry can be NULL when a locally-defined, non-external * symbol is stripped. We wouldn't patch this entry anyway, so we * just skip it. */ if (!child_entry->unpatched.sym) continue; /* It's possible for the patched parent entry not to have a symbol * (e.g. when the definition is inlined). We can't patch this entry no * matter what, so we'll just skip it and die later if it's a problem * (which is not likely). */ if (!parent_entry->patched.name) continue; /* 1) If the symbol is defined locally, do not patch */ if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue; /* 2) If the child is a pure virtual function, do not patch. * In general, we want to proceed with patching when the symbol is * externally defined because pad slots fall into this category. * The pure virtual function symbol is special case, as the pure * virtual property itself overrides the parent's implementation. */ if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue; /* 3) If the symbols are the same, do not patch */ if (streq(child_entry->unpatched.sym->name, parent_entry->patched.name)) { continue; } /* 4) If the parent vtable entry is a pad slot, and the child does not * match it, then the child was built against a newer version of the * libraries, so it is binary-incompatible. */ require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name), finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogParentOutOfDate, kxld_demangle(super_vtable->name, &demangled_name1, &demangled_length1), kxld_demangle(vtable->name, &demangled_name2, &demangled_length2))); #if KXLD_USER_OR_STRICT_PATCHING /* 5) If we are doing strict patching, we prevent kexts from declaring * virtual functions and not implementing them. We can tell if a * virtual function is declared but not implemented because we resolve * symbols before patching; an unimplemented function will still be * undefined at this point. We then look at whether the symbol has * the same class prefix as the vtable. If it does, the symbol was * declared as part of the class and not inherited, which means we * should not patch it. */ if (kxld_object_target_supports_strict_patching(object) && !kxld_sym_is_defined(child_entry->unpatched.sym)) { char class_name[KXLD_MAX_NAME_LEN]; char function_prefix[KXLD_MAX_NAME_LEN]; u_long function_prefix_len = 0; rval = kxld_sym_get_class_name_from_vtable_name(vtable->name, class_name, sizeof(class_name)); require_noerr(rval, finish); function_prefix_len = kxld_sym_get_function_prefix_from_class_name(class_name, function_prefix, sizeof(function_prefix)); require(function_prefix_len, finish); if (!strncmp(child_entry->unpatched.sym->name, function_prefix, function_prefix_len)) { failure = TRUE; kxld_log(kKxldLogPatching, kKxldLogErr, "The %s is unpatchable because its class declares the " "method '%s' without providing an implementation.", kxld_demangle(vtable->name, &demangled_name1, &demangled_length1), kxld_demangle(child_entry->unpatched.sym->name, &demangled_name2, &demangled_length2)); continue; } } #endif /* KXLD_USER_OR_STRICT_PATCHING */ /* 6) The child symbol is unresolved and different from its parent, so * we need to patch it up. We do this by modifying the relocation * entry of the vtable entry to point to the symbol of the parent * vtable entry. If that symbol does not exist (i.e. we got the data * from a link state object's vtable representation), then we create a * new symbol in the symbol table and point the relocation entry to * that. */ sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, parent_entry->patched.name); if (!sym) { rval = kxld_object_add_symbol(object, parent_entry->patched.name, parent_entry->patched.addr, &sym); require_noerr(rval, finish); } require_action(sym, finish, rval=KERN_FAILURE); rval = kxld_symtab_get_sym_index(symtab, sym, &symindex); require_noerr(rval, finish); rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex); require_noerr(rval, finish); kxld_log(kKxldLogPatching, kKxldLogDetail, "In vtable '%s', patching '%s' with '%s'.", kxld_demangle(vtable->name, &demangled_name1, &demangled_length1), kxld_demangle(child_entry->unpatched.sym->name, &demangled_name2, &demangled_length2), kxld_demangle(sym->name, &demangled_name3, &demangled_length3)); rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym); require_noerr(rval, finish); child_entry->unpatched.sym = sym; /* * The C++ ABI requires that functions be aligned on a 2-byte boundary: * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers * If the LSB of any virtual function's link address is 1, then the * compiler has violated that part of the ABI, and we're going to panic * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some * context. */ assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1)); } require_action(!failure, finish, rval=KERN_FAILURE); /* Change the vtable representation from the unpatched layout to the * patched layout. */ for (i = 0; i < vtable->entries.nitems; ++i) { char *name; kxld_addr_t addr; child_entry = kxld_array_get_item(&vtable->entries, i); if (child_entry->unpatched.sym) { name = child_entry->unpatched.sym->name; addr = child_entry->unpatched.sym->link_addr; } else { name = NULL; addr = 0; } child_entry->patched.name = name; child_entry->patched.addr = addr; } vtable->is_patched = TRUE; rval = KERN_SUCCESS; finish: if (demangled_name1) kxld_free(demangled_name1, demangled_length1); if (demangled_name2) kxld_free(demangled_name2, demangled_length2); if (demangled_name3) kxld_free(demangled_name3, demangled_length3); return rval; }
/******************************************************************************* * Initializes vtables by performing a reverse lookup on symbol values when * they exist in the vtable entry, and by looking through a matching relocation * entry when the vtable entry is NULL. * * Final linked images require this hybrid vtable initialization approach * because they are already internally resolved. This means that the vtables * contain valid entries to local symbols, but still have relocation entries for * external symbols. *******************************************************************************/ static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDRelocator *relocator, const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; kxld_addr_t entry_value = 0; u_long entry_offset = 0; u_int nentries = 0; u_int i = 0; char *demangled_name1 = NULL; size_t demangled_length1 = 0; check(vtable); check(vtable_sym); check(relocator); check(relocs); /* Find the first entry and its offset past the vtable padding */ (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* In a final linked image, a vtable slot is valid if it is nonzero * (meaning the userspace linker has already resolved it) or if it has * a relocation entry. We'll know the end of the vtable when we find a * slot that meets neither of these conditions. */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); if (!reloc) break; } ++nentries; entry_offset += vtable_entry_size; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Find the symbols for each vtable entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find a symbol, it means it is a locally-defined, * non-external symbol that has been stripped. We don't patch over * locally-defined symbols, so we leave the symbol as NULL and just * skip it. We won't be able to patch subclasses with this symbol, * but there isn't much we can do about that. */ if (entry_value) { reloc = NULL; sym = kxld_dict_find(defined_cxx_symbols, &entry_value); } else { reloc = kxld_reloc_get_reloc_by_offset(relocs, vtable_sym->base_addr + entry_offset); require_action(reloc, finish, rval=KERN_FAILURE; kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL); } tmpentry = kxld_array_get_item(&vtable->entries, i); tmpentry->unpatched.reloc = reloc; tmpentry->unpatched.sym = sym; } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes a vtable object by reading the symbol values out of the vtable * entries and performing reverse symbol lookups on those values. *******************************************************************************/ static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols) { kern_return_t rval = KERN_FAILURE; KXLDVTableEntry *tmpentry = NULL; KXLDSym *sym = NULL; kxld_addr_t entry_value = 0; u_long entry_offset; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int nentries = 0; u_int i = 0; check(vtable); check(relocator); (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); /* Count the number of entries (the vtable is null-terminated) */ entry_offset = vtable_header_size; while (1) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); if (!entry_value) break; entry_offset += vtable_entry_size; ++nentries; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Look up the symbols for each entry */ for (i = 0, entry_offset = vtable_header_size; i < vtable->entries.nitems; ++i, entry_offset += vtable_entry_size) { entry_value = kxld_relocator_get_pointer_at_addr(relocator, vtable->vtable, entry_offset); /* If we can't find the symbol, it means that the virtual function was * defined inline. There's not much I can do about this; it just means * I can't patch this function. */ tmpentry = kxld_array_get_item(&vtable->entries, i); sym = kxld_dict_find(defined_cxx_symbols, &entry_value); if (sym) { tmpentry->patched.name = sym->name; tmpentry->patched.addr = sym->link_addr; } else { tmpentry->patched.name = NULL; tmpentry->patched.addr = 0; } } rval = KERN_SUCCESS; finish: return rval; }
/******************************************************************************* * Initializes a vtable object by matching up relocation entries to the vtable's * entries and finding the corresponding symbols. *******************************************************************************/ static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDSect *sect, const KXLDRelocator *relocator) { kern_return_t rval = KERN_FAILURE; KXLDReloc *reloc = NULL; KXLDVTableEntry *entry = NULL; KXLDSym *sym = NULL; kxld_addr_t vtable_base_offset = 0; kxld_addr_t entry_offset = 0; u_int i = 0; u_int nentries = 0; u_int vtable_entry_size = 0; u_int vtable_header_size = 0; u_int base_reloc_index = 0; u_int reloc_index = 0; check(vtable); check(vtable_sym); check(sect); check(relocator); /* Find the first entry past the vtable padding */ (void) get_vtable_base_sizes(relocator->is_32_bit, &vtable_entry_size, &vtable_header_size); vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) + vtable_header_size; /* Find the relocation entry at the start of the vtable */ rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, vtable_base_offset, &base_reloc_index); require_noerr(rval, finish); /* Count the number of consecutive relocation entries to find the number of * vtable entries. For some reason, the __TEXT,__const relocations are * sorted in descending order, so we have to walk backwards. Also, make * sure we don't run off the end of the section's relocs. */ reloc_index = base_reloc_index; entry_offset = vtable_base_offset; reloc = kxld_array_get_item(§->relocs, reloc_index); while (reloc->address == entry_offset) { ++nentries; if (!reloc_index) break; --reloc_index; reloc = kxld_array_get_item(§->relocs, reloc_index); entry_offset += vtable_entry_size; } /* Allocate the symbol index */ rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); require_noerr(rval, finish); /* Find the symbols for each vtable entry */ for (i = 0; i < vtable->entries.nitems; ++i) { reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); entry = kxld_array_get_item(&vtable->entries, i); /* If we can't find a symbol, it means it is a locally-defined, * non-external symbol that has been stripped. We don't patch over * locally-defined symbols, so we leave the symbol as NULL and just * skip it. We won't be able to patch subclasses with this symbol, * but there isn't much we can do about that. */ sym = kxld_reloc_get_symbol(relocator, reloc, sect->data); entry->unpatched.sym = sym; entry->unpatched.reloc = reloc; } rval = KERN_SUCCESS; finish: return rval; }
bzero(&array, sizeof(array)); kxld_set_logging_callback(kxld_test_log); kxld_set_logging_callback_data("kxld_array_test", NULL); kxld_log(0, 0, "%d: Initialize", ++test_num); titems = PAGE_SIZE / sizeof(u_int); rval = kxld_array_init(&array, sizeof(u_int), titems); assert(rval == KERN_SUCCESS); assert(array.nitems == titems); kxld_log(0, 0, "%d: Get item", ++test_num); idx = 0; item = kxld_array_get_item(&array, idx); assert(item); assert(item == kxld_array_get_slot(&array, idx)); idx = titems - 1; item = kxld_array_get_item(&array, idx); assert(item); assert(item == kxld_array_get_slot(&array, idx)); idx = titems; item = kxld_array_get_item(&array, idx); assert(!item); /* We allocated the max number of items that could be stored in a page, * so get_slot() and get_item() are equivalent. */ assert(item == kxld_array_get_slot(&array, idx));