/* * fileport_walk * * Description: Invoke the action function on every fileport in the task. * * This could be more efficient if we refactored mach_port_names() * so that (a) it didn't compute the type information unless asked * and (b) it could be asked to -not- unwire/copyout the memory * and (c) if we could ask for port names by kobject type. Not * clear that it's worth all that complexity, though. * * Parameters: task The target task * action The function to invoke on each fileport * arg Anonymous pointer to caller state. */ kern_return_t fileport_walk(task_t task, int (*action)(mach_port_name_t, struct fileglob *, void *arg), void *arg) { mach_port_name_t *names; mach_msg_type_number_t ncnt, tcnt; vm_map_copy_t map_copy_names, map_copy_types; vm_map_address_t map_names; kern_return_t kr; uint_t i; int rval; /* * mach_port_names returns the 'name' and 'types' in copied-in * form. Discard 'types' immediately, then copyout 'names' * back into the kernel before walking the array. */ kr = mach_port_names(task->itk_space, (mach_port_name_t **)&map_copy_names, &ncnt, (mach_port_type_t **)&map_copy_types, &tcnt); if (kr != KERN_SUCCESS) return (kr); vm_map_copy_discard(map_copy_types); kr = vm_map_copyout(ipc_kernel_map, &map_names, map_copy_names); if (kr != KERN_SUCCESS) { vm_map_copy_discard(map_copy_names); return (kr); } names = (mach_port_name_t *)(uintptr_t)map_names; for (rval = 0, i = 0; i < ncnt; i++) if (fileport_invoke(task, names[i], action, arg, &rval) == KERN_SUCCESS && -1 == rval) break; /* early termination clause */ vm_deallocate(ipc_kernel_map, (vm_address_t)names, ncnt * sizeof (*names)); return (KERN_SUCCESS); }
/* * Write to TTY. * No locks may be held. * Calls device start routine; must already be on master if * device needs to run on master. */ io_return_t char_write( register struct tty * tp, register io_req_t ior) { spl_t s; register int count; register char *data; vm_offset_t addr; io_return_t rc = D_SUCCESS; data = ior->io_data; count = ior->io_count; if (count == 0) return rc; if (!(ior->io_op & IO_INBAND)) { /* * Copy out-of-line data into kernel address space. * Since data is copied as page list, it will be * accessible. */ vm_map_copy_t copy = (vm_map_copy_t) data; kern_return_t kr; kr = vm_map_copyout(device_io_map, &addr, copy); if (kr != KERN_SUCCESS) return kr; data = (char *) addr; } /* * Check for tty operating. */ s = spltty(); simple_lock(&tp->t_lock); if ((tp->t_state & TS_CARR_ON) == 0) { if ((tp->t_state & TS_ONDELAY) == 0) { /* * No delayed writes - tell caller that device is down */ rc = D_IO_ERROR; goto out; } if (ior->io_mode & D_NOWAIT) { rc = D_WOULD_BLOCK; goto out; } } /* * Copy data into the output buffer. * Report the amount not copied. */ ior->io_residual = b_to_q(data, count, &tp->t_outq); /* * Start hardware output. */ tp->t_state &= ~TS_TTSTOP; tty_output(tp); if (tp->t_outq.c_cc > TTHIWAT(tp) || (tp->t_state & TS_CARR_ON) == 0) { /* * Do not send reply until some characters have been sent. */ ior->io_dev_ptr = (char *)tp; queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done); rc = D_IO_QUEUED; } out: simple_unlock(&tp->t_lock); splx(s); if (!(ior->io_op & IO_INBAND)) (void) vm_deallocate(device_io_map, addr, ior->io_count); return rc; }
/* * Loads a symbol table for an external file into the kernel debugger. * The symbol table data is an array of characters. It is assumed that * the caller and the kernel debugger agree on its format. */ kern_return_t host_load_symbol_table( host_t host, task_t task, char * name, pointer_t symtab, unsigned int symtab_count) { kern_return_t result; vm_offset_t symtab_start; vm_offset_t symtab_end; vm_map_t map; vm_map_copy_t symtab_copy_object; if (host == HOST_NULL) return (KERN_INVALID_ARGUMENT); /* * Copy the symbol table array into the kernel. * We make a copy of the copy object, and clear * the old one, so that returning error will not * deallocate the data twice. */ symtab_copy_object = (vm_map_copy_t) symtab; result = vm_map_copyout( kernel_map, &symtab_start, vm_map_copy_copy(symtab_copy_object)); if (result != KERN_SUCCESS) return (result); symtab_end = symtab_start + symtab_count; /* * Add the symbol table. * Do not keep a reference for the task map. XXX */ if (task == TASK_NULL) map = VM_MAP_NULL; else map = task->map; if (!X_db_sym_init((char *)symtab_start, (char *)symtab_end, name, (char *)map)) { /* * Not enough room for symbol table - failure. */ (void) vm_deallocate(kernel_map, symtab_start, symtab_count); return (KERN_FAILURE); } /* * Wire down the symbol table */ (void) vm_map_pageable(kernel_map, symtab_start, round_page(symtab_end), VM_PROT_READ|VM_PROT_WRITE); /* * Discard the original copy object */ vm_map_copy_discard(symtab_copy_object); return (KERN_SUCCESS); }
/********************************************************************* * IMPORTANT: Once we have done the vm_map_copyout(), we *must* return * KERN_SUCCESS or the kernel map gets messed up (reason as yet * unknown). We use op_result to return the real result of our work. *********************************************************************/ kern_return_t kext_request( host_priv_t hostPriv, /* in only */ uint32_t clientLogSpec, /* in only */ vm_offset_t requestIn, /* in only */ mach_msg_type_number_t requestLengthIn, /* out only */ vm_offset_t * responseOut, /* out only */ mach_msg_type_number_t * responseLengthOut, /* out only */ vm_offset_t * logDataOut, /* out only */ mach_msg_type_number_t * logDataLengthOut, /* out only */ kern_return_t * op_result) { kern_return_t result = KERN_FAILURE; vm_map_address_t map_addr = 0; // do not free/deallocate char * request = NULL; // must vm_deallocate mkext2_header * mkextHeader = NULL; // do not release bool isMkext = false; char * response = NULL; // must kmem_free uint32_t responseLength = 0; char * logData = NULL; // must kmem_free uint32_t logDataLength = 0; /* MIG doesn't pass "out" parameters as empty, so clear them immediately * just in case, or MIG will try to copy out bogus data. */ *op_result = KERN_FAILURE; *responseOut = NULL; *responseLengthOut = 0; *logDataOut = NULL; *logDataLengthOut = 0; /* Check for input. Don't discard what isn't there, though. */ if (!requestLengthIn || !requestIn) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, "Invalid request from user space (no data)."); *op_result = KERN_INVALID_ARGUMENT; goto finish; } /* Once we have done the vm_map_copyout(), we *must* return KERN_SUCCESS * or the kernel map gets messed up (reason as yet unknown). We will use * op_result to return the real result of our work. */ result = vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)requestIn); if (result != KERN_SUCCESS) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, "vm_map_copyout() failed for request from user space."); vm_map_copy_discard((vm_map_copy_t)requestIn); goto finish; } request = CAST_DOWN(char *, map_addr); /* Check if request is an mkext; this is always a load request * and requires root access. If it isn't an mkext, see if it's * an XML request, and check the request to see if that requires * root access. */ if (requestLengthIn > sizeof(mkext2_header)) { mkextHeader = (mkext2_header *)request; if (MKEXT_GET_MAGIC(mkextHeader) == MKEXT_MAGIC && MKEXT_GET_SIGNATURE(mkextHeader) == MKEXT_SIGN) { isMkext = true; } } if (isMkext) { #ifdef SECURE_KERNEL // xxx - something tells me if we have a secure kernel we don't even // xxx - want to log a message here. :-) *op_result = KERN_NOT_SUPPORTED; goto finish; #else // xxx - can we find out if calling task is kextd? // xxx - can we find the name of the calling task? if (hostPriv == HOST_PRIV_NULL) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogLoadFlag | kOSKextLogIPCFlag, "Attempt by non-root process to load a kext."); *op_result = kOSKextReturnNotPrivileged; goto finish; } *op_result = OSKext::loadFromMkext((OSKextLogSpec)clientLogSpec, request, requestLengthIn, &logData, &logDataLength); #endif /* defined(SECURE_KERNEL) */ } else { /* If the request isn't an mkext, then is should be XML. Parse it * if possible and hand the request over to OSKext. */ *op_result = OSKext::handleRequest(hostPriv, (OSKextLogSpec)clientLogSpec, request, requestLengthIn, &response, &responseLength, &logData, &logDataLength); } if (response && responseLength > 0) { kern_return_t copyin_result; copyin_result = vm_map_copyin(kernel_map, CAST_USER_ADDR_T(response), responseLength, /* src_destroy */ false, (vm_map_copy_t *)responseOut); if (copyin_result == KERN_SUCCESS) { *responseLengthOut = responseLength; } else { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, "Failed to copy response to request from user space."); *op_result = copyin_result; // xxx - should we map to our own code? *responseOut = NULL; *responseLengthOut = 0; goto finish; } } if (logData && logDataLength > 0) { kern_return_t copyin_result; copyin_result = vm_map_copyin(kernel_map, CAST_USER_ADDR_T(logData), logDataLength, /* src_destroy */ false, (vm_map_copy_t *)logDataOut); if (copyin_result == KERN_SUCCESS) { *logDataLengthOut = logDataLength; } else { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, "Failed to copy log data for request from user space."); *op_result = copyin_result; // xxx - should we map to our own code? *logDataOut = NULL; *logDataLengthOut = 0; goto finish; } } finish: if (request) { (void)vm_deallocate(kernel_map, (vm_offset_t)request, requestLengthIn); } if (response) { kmem_free(kernel_map, (vm_offset_t)response, responseLength); } if (logData) { kmem_free(kernel_map, (vm_offset_t)logData, logDataLength); } return result; }