/********************************************************************* * This function takes a dependency list containing a series of * already-loaded module names, followed by a single name for a module * that hasn't yet been loaded. It invokes kld_load_from_memory() to * build symbol info for the already-loaded modules, and then finally * loads the actually requested module. *********************************************************************/ static kern_return_t load_kmod(OSArray * dependencyList) { kern_return_t result = KERN_SUCCESS; unsigned int num_dependencies = 0; kmod_info_t ** kmod_dependencies = NULL; unsigned int i; OSString * requestedKmodName; // don't release const char * requested_kmod_name; OSString * currentKmodName; // don't release char * kmod_address; unsigned long kmod_size; struct mach_header * kmod_header; unsigned long kld_result; int do_kld_unload = 0; kmod_info_t * kmod_info_freeme = 0; kmod_info_t * kmod_info = 0; kmod_t kmod_id; /* Separate the requested kmod from its dependencies. */ i = dependencyList->getCount(); if (i == 0) { IOLog("load_kmod(): Called with empty list.\n"); LOG_DELAY(); result = KERN_FAILURE; goto finish; } else { i--; // make i be the index of the last entry } requestedKmodName = OSDynamicCast(OSString, dependencyList->getObject(i)); if (!requestedKmodName) { IOLog("load_kmod(): Called with invalid list of kmod names.\n"); LOG_DELAY(); result = KERN_FAILURE; goto finish; } requested_kmod_name = requestedKmodName->getCStringNoCopy(); dependencyList->removeObject(i); /* If the requested kmod is already loaded, there's no work to do. */ kmod_info_freeme = kmod_lookupbyname_locked(requested_kmod_name); if (kmod_info_freeme) { // FIXME: Need to check for version mismatch if already loaded. result = KERN_SUCCESS; goto finish; } /* Do the KLD loads for the already-loaded modules in order to get * their symbols. */ kld_address_func(&address_for_loaded_kmod); num_dependencies = dependencyList->getCount(); kmod_dependencies = (kmod_info_t **)kalloc(num_dependencies * sizeof(kmod_info_t *)); if (!kmod_dependencies) { IOLog("load_kmod(): Failed to allocate memory for dependency array " "during load of kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } bzero(kmod_dependencies, num_dependencies * sizeof(kmod_info_t *)); for (i = 0; i < num_dependencies; i++) { currentKmodName = OSDynamicCast(OSString, dependencyList->getObject(i)); if (!currentKmodName) { IOLog("load_kmod(): Invalid dependency name at index %d for " "kmod \"%s\".\n", i, requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } const char * current_kmod_name = currentKmodName->getCStringNoCopy(); // These globals are needed by the kld_address functions g_current_kmod_info = kmod_lookupbyname_locked(current_kmod_name); g_current_kmod_name = current_kmod_name; if (!g_current_kmod_info) { IOLog("load_kmod(): Missing dependency \"%s\".\n", current_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } /* Record the current kmod as a dependency of the requested * one. This will be used in building references after the * load is complete. */ kmod_dependencies[i] = g_current_kmod_info; /* If the current kmod's size is zero it means that we have a * fake in-kernel dependency. If so then don't have to arrange * for its symbol table to be reloaded as it is * part of the kernel's symbol table.. */ if (!g_current_kmod_info->size) continue; if (!kld_file_merge_OSObjects(current_kmod_name)) { IOLog("load_kmod(): Can't merge OSObjects \"%s\".\n", current_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } kmod_address = (char *) kld_file_getaddr(current_kmod_name, (long *) &kmod_size); if (!kmod_address) { IOLog("load_kmod() failed for dependency kmod " "\"%s\".\n", current_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } kld_result = kld_load_from_memory(&kmod_header, current_kmod_name, kmod_address, kmod_size); if (kld_result) { do_kld_unload = 1; } if (!kld_result || !link_load_address) { IOLog("kld_load_from_memory() failed for dependency kmod " "\"%s\".\n", current_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } kld_forget_symbol("_kmod_info"); } /***** * Now that we've done all the dependencies, which should have already * been loaded, we do the last requested module, which should not have * already been loaded. */ kld_address_func(&alloc_for_kmod); g_current_kmod_name = requested_kmod_name; g_current_kmod_info = 0; // there is no kmod yet if (!map_and_patch(requested_kmod_name)) { IOLog("load_kmod: map_and_patch() failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } kmod_address = (char *) kld_file_getaddr(requested_kmod_name, (long *) &kmod_size); if (!kmod_address) { IOLog("load_kmod: kld_file_getaddr() failed internal error " "on \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } kld_result = kld_load_from_memory(&kmod_header, requested_kmod_name, kmod_address, kmod_size); if (kld_result) { do_kld_unload = 1; } if (!kld_result || !link_load_address) { IOLog("load_kmod(): kld_load_from_memory() failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } /* Copy the linked header and image into the vm_allocated buffer. * Move each onto the appropriate page-aligned boundary as given * by the global link_... variables. */ bzero((char *)link_buffer_address, link_buffer_size); // bcopy() is (from, to, length) bcopy((char *)kmod_header, (char *)link_buffer_address, link_header_size); bcopy((char *)kmod_header + link_header_size, (char *)link_buffer_address + round_page_32(link_header_size), link_load_size - link_header_size); /* Get the kmod_info struct for the newly-loaded kmod. */ if (!kld_lookup("_kmod_info", (unsigned long *)&kmod_info)) { IOLog("kld_lookup() of \"_kmod_info\" failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } if (!stamp_kmod(requested_kmod_name, kmod_info)) { // stamp_kmod() logs a meaningful message result = KERN_FAILURE; goto finish; } /* kld_lookup of _kmod_info yielded the actual linked address, * so now that we've copied the data into its real place, * we can set this stuff. */ kmod_info->address = link_buffer_address; kmod_info->size = link_buffer_size; kmod_info->hdr_size = round_page_32(link_header_size); /* We've written data and instructions, so *flush* the data cache * and *invalidate* the instruction cache. */ flush_dcache64((addr64_t)link_buffer_address, link_buffer_size, false); invalidate_icache64((addr64_t)link_buffer_address, link_buffer_size, false); /* Register the new kmod with the kernel proper. */ if (kmod_create_internal(kmod_info, &kmod_id) != KERN_SUCCESS) { IOLog("load_kmod(): kmod_create() failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); result = KERN_FAILURE; goto finish; } #if DEBUG IOLog("kmod id %d successfully created at 0x%lx, size %ld.\n", (unsigned int)kmod_id, link_buffer_address, link_buffer_size); LOG_DELAY(); #endif /* DEBUG */ /* Record dependencies for the newly-loaded kmod. */ for (i = 0; i < num_dependencies; i++) { kmod_info_t * cur_dependency_info; kmod_t packed_id; cur_dependency_info = kmod_dependencies[i]; packed_id = KMOD_PACK_IDS(kmod_id, cur_dependency_info->id); if (kmod_retain(packed_id) != KERN_SUCCESS) { IOLog("load_kmod(): kmod_retain() failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); kmod_destroy_internal(kmod_id); result = KERN_FAILURE; goto finish; } } /* Start the kmod (which invokes constructors for I/O Kit * drivers. */ // kmod_start_or_stop(id, start?, user data, datalen) if (kmod_start_or_stop(kmod_id, 1, 0, 0) != KERN_SUCCESS) { IOLog("load_kmod(): kmod_start_or_stop() failed for " "kmod \"%s\".\n", requested_kmod_name); LOG_DELAY(); kmod_destroy_internal(kmod_id); result = KERN_FAILURE; goto finish; } finish: if (kmod_info_freeme) { kfree((unsigned int)kmod_info_freeme, sizeof(kmod_info_t)); } /* Only do a kld_unload_all() if at least one load happened. */ if (do_kld_unload) { kld_unload_all(/* deallocate sets */ 1); } /* If the link failed, blow away the allocated link buffer. */ if (result != KERN_SUCCESS && link_buffer_address) { vm_deallocate(kernel_map, link_buffer_address, link_buffer_size); } if (kmod_dependencies) { for (i = 0; i < num_dependencies; i++) { if (kmod_dependencies[i]) { kfree((unsigned int)kmod_dependencies[i], sizeof(kmod_info_t)); } } kfree((unsigned int)kmod_dependencies, num_dependencies * sizeof(kmod_info_t *)); } /* Reset these static global variables for the next call. */ g_current_kmod_name = NULL; g_current_kmod_info = NULL; link_buffer_address = 0; link_load_address = 0; link_load_size = 0; link_buffer_size = 0; link_header_size = 0; return result; }
kern_return_t copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which) { unsigned int lop, csize; int bothphys = 0; KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64, (unsigned)snk64, size, which, 0); if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */ panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) bothphys = 1; /* both are physical */ while (size) { if (bothphys) { lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */ if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */ } else { /* * only need to compute the resid for the physical page * address... we don't care about where we start/finish in * the virtual since we just call the normal copyin/copyout */ if (which & cppvPsrc) lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); else lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); } csize = size; /* Assume we can copy it all */ if (lop < size) csize = lop; /* Nope, we can't do it all */ #if 0 /* * flush_dcache64 is currently a nop on the i386... * it's used when copying to non-system memory such * as video capture cards... on PPC there was a need * to flush due to how we mapped this memory... not * sure if it's needed on i386. */ if (which & cppvFsrc) flush_dcache64(src64, csize, 1); /* If requested, flush source before move */ if (which & cppvFsnk) flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */ #endif if (bothphys) bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */ else { if (copyio_phys(src64, snk64, csize, which)) return (KERN_FAILURE); } #if 0 if (which & cppvFsrc) flush_dcache64(src64, csize, 1); /* If requested, flush source after move */ if (which & cppvFsnk) flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */ #endif size -= csize; /* Calculate what is left */ snk64 += csize; /* Bump sink to next physical address */ src64 += csize; /* Bump source to next physical address */ } KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64, (unsigned)snk64, size, which, 0); return KERN_SUCCESS; }