void ipc_task_reset( task_t task) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; int i; new_kport = ipc_port_alloc_kernel(); if (new_kport == IP_NULL) panic("ipc_task_reset"); itk_lock(task); old_kport = task->itk_self; if (old_kport == IP_NULL) { /* the task is already terminated (can this happen?) */ itk_unlock(task); ipc_port_dealloc_kernel(new_kport); return; } task->itk_self = new_kport; old_sself = task->itk_sself; task->itk_sself = ipc_port_make_send(new_kport); ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (!task->exc_actions[i].privileged) { old_exc_actions[i] = task->exc_actions[i].port; task->exc_actions[i].port = IP_NULL; } else { old_exc_actions[i] = IP_NULL; } }/* for */ if (IP_VALID(task->itk_debug_control)) { ipc_port_release_send(task->itk_debug_control); } task->itk_debug_control = IP_NULL; itk_unlock(task); /* release the naked send rights */ if (IP_VALID(old_sself)) ipc_port_release_send(old_sself); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(old_exc_actions[i])) { ipc_port_release_send(old_exc_actions[i]); } }/* for */ /* destroy the kernel port */ ipc_port_dealloc_kernel(old_kport); }
void ipc_task_disable( task_t task) { ipc_port_t kport; ipc_port_t nport; ipc_port_t rport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); nport = task->itk_nself; if (nport != IP_NULL) ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); rport = task->itk_resume; if (rport != IP_NULL) { /* * From this point onwards this task is no longer accepting * resumptions. * * There are still outstanding suspensions on this task, * even as it is being torn down. Disconnect the task * from the rport, thereby "orphaning" the rport. The rport * itself will go away only when the last suspension holder * destroys his SO right to it -- when he either * exits, or tries to actually use that last SO right to * resume this (now non-existent) task. */ ipc_kobject_set(rport, IKO_NULL, IKOT_NONE); } itk_unlock(task); }
ipc_port_t retrieve_task_self_fast( register task_t task) { register ipc_port_t port; assert(task == current_task()); itk_lock(task); assert(task->itk_self != IP_NULL); if ((port = task->itk_sself) == task->itk_self) { /* no interposing */ ip_lock(port); assert(ip_active(port)); ip_reference(port); port->ip_srights++; ip_unlock(port); } else port = ipc_port_copy_send(port); itk_unlock(task); return port; }
kern_return_t mach_ports_register( task_t task, mach_port_array_t memory, mach_msg_type_number_t portsCnt) { ipc_port_t ports[TASK_PORT_REGISTER_MAX]; unsigned int i; if ((task == TASK_NULL) || (portsCnt > TASK_PORT_REGISTER_MAX) || (portsCnt && memory == NULL)) return KERN_INVALID_ARGUMENT; /* * Pad the port rights with nulls. */ for (i = 0; i < portsCnt; i++) ports[i] = memory[i]; for (; i < TASK_PORT_REGISTER_MAX; i++) ports[i] = IP_NULL; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_INVALID_ARGUMENT; } /* * Replace the old send rights with the new. * Release the old rights after unlocking. */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { ipc_port_t old; old = task->itk_registered[i]; task->itk_registered[i] = ports[i]; ports[i] = old; } itk_unlock(task); for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) if (IP_VALID(ports[i])) ipc_port_release_send(ports[i]); /* * Now that the operation is known to be successful, * we can free the memory. */ if (portsCnt != 0) kfree(memory, (vm_size_t) (portsCnt * sizeof(mach_port_t))); return KERN_SUCCESS; }
kern_return_t task_get_special_port( task_t task, int which, ipc_port_t *portp) { ipc_port_t port; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } switch (which) { case TASK_KERNEL_PORT: port = ipc_port_copy_send(task->itk_sself); break; case TASK_NAME_PORT: port = ipc_port_make_send(task->itk_nself); break; case TASK_HOST_PORT: port = ipc_port_copy_send(task->itk_host); break; case TASK_BOOTSTRAP_PORT: port = ipc_port_copy_send(task->itk_bootstrap); break; case TASK_SEATBELT_PORT: port = ipc_port_copy_send(task->itk_seatbelt); break; case TASK_ACCESS_PORT: port = ipc_port_copy_send(task->itk_task_access); break; case TASK_DEBUG_CONTROL_PORT: port = ipc_port_copy_send(task->itk_debug_control); break; default: itk_unlock(task); return KERN_INVALID_ARGUMENT; } itk_unlock(task); *portp = port; return KERN_SUCCESS; }
kern_return_t task_get_inherited_ports( task_t task, ipc_port_t *bootstrap, norma_registered_port_array_t registered, unsigned *count, exception_mask_array_t exc_masks, unsigned *exc_count, exception_port_array_t exc_ports, exception_behavior_array_t exc_behaviors, exception_flavor_array_t exc_flavors) { unsigned i; unsigned j; unsigned n; if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; } itk_lock(task); *bootstrap = ipc_port_copy_send(task->itk_bootstrap); for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) registered[i] = ipc_port_copy_send(task->itk_registered[i]); *count = TASK_PORT_REGISTER_MAX; n = 0; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { for (j = 0; j < n; j++) { /* * Search for an identical entry, if found * set corresponding mask for this exception. */ if (task->exc_actions[i].port == exc_ports[j] && task->exc_actions[i].behavior == exc_behaviors[j] && task->exc_actions[i].flavor == exc_flavors[j]) { exc_masks[j] |= (1 << i); break; } } if (j == n) { exc_masks[j] = (1 << i); exc_ports[j] = ipc_port_copy_send(task->exc_actions[i].port); exc_behaviors[j] = task->exc_actions[i].behavior; exc_flavors[j] = task->exc_actions[i].flavor; n++; } } *exc_count = n; itk_unlock(task); return KERN_SUCCESS; }
void ipc_task_enable( task_t task) { ipc_port_t kport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); itk_unlock(task); }
void ipc_task_disable( task_t task) { ipc_port_t kport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); itk_unlock(task); }
void exception_try_task( integer_t _exception, integer_t code, integer_t subcode) { ipc_thread_t self = current_thread(); task_t task = self->task; ipc_port_t exc_port; /* * Optimized version of retrieve_task_exception. */ itk_lock(task); assert(task->itk_self != IP_NULL); exc_port = task->itk_exception; if (!IP_VALID(exc_port)) { itk_unlock(task); exception_no_server(); /*NOTREACHED*/ } ip_lock(exc_port); itk_unlock(task); if (!ip_active(exc_port)) { ip_unlock(exc_port); exception_no_server(); /*NOTREACHED*/ } /* * Make a naked send right for the exception port. */ ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); /* * This is the thread's last chance. * Clear the saved exception state. */ self->ith_exc = KERN_SUCCESS; exception_raise(exc_port, retrieve_thread_self_fast(self), retrieve_task_self_fast(task), _exception, code, subcode); /*NOTREACHED*/ }
void ipc_task_init( task_t task, task_t parent) { ipc_space_t space; ipc_port_t kport; kern_return_t kr; int i; kr = ipc_space_create(&space); if (kr != KERN_SUCCESS) panic("ipc_task_init"); kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_task_init"); itk_lock_init(task); task->itk_self = kport; task->itk_sself = ipc_port_make_send(kport); task->itk_space = space; if (parent == TASK_NULL) { task->itk_exception = IP_NULL; task->itk_bootstrap = IP_NULL; for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { itk_lock(parent); assert(parent->itk_self != IP_NULL); /* inherit registered ports */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = ipc_port_copy_send(parent->itk_registered[i]); /* inherit exception and bootstrap ports */ task->itk_exception = ipc_port_copy_send(parent->itk_exception); task->itk_bootstrap = ipc_port_copy_send(parent->itk_bootstrap); itk_unlock(parent); } }
ipc_port_t convert_task_name_to_port( task_name_t task_name) { ipc_port_t port; itk_lock(task_name); if (task_name->itk_nself != IP_NULL) port = ipc_port_make_send(task_name->itk_nself); else port = IP_NULL; itk_unlock(task_name); task_name_deallocate(task_name); return port; }
kern_return_t mach_ports_lookup( task_t task, mach_port_array_t *portsp, mach_msg_type_number_t *portsCnt) { void *memory; vm_size_t size; ipc_port_t *ports; int i; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); memory = kalloc(size); if (memory == 0) return KERN_RESOURCE_SHORTAGE; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); kfree(memory, size); return KERN_INVALID_ARGUMENT; } ports = (ipc_port_t *) memory; /* * Clone port rights. Because kalloc'd memory * is wired, we won't fault while holding the task lock. */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) ports[i] = ipc_port_copy_send(task->itk_registered[i]); itk_unlock(task); *portsp = (mach_port_array_t) ports; *portsCnt = TASK_PORT_REGISTER_MAX; return KERN_SUCCESS; }
void ipc_task_terminate( task_t task) { ipc_port_t kport; int i; itk_lock(task); kport = task->itk_self; if (kport == IP_NULL) { /* the task is already terminated (can this happen?) */ itk_unlock(task); return; } task->itk_self = IP_NULL; itk_unlock(task); /* release the naked send rights */ if (IP_VALID(task->itk_sself)) ipc_port_release_send(task->itk_sself); if (IP_VALID(task->itk_exception)) ipc_port_release_send(task->itk_exception); if (IP_VALID(task->itk_bootstrap)) ipc_port_release_send(task->itk_bootstrap); for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) if (IP_VALID(task->itk_registered[i])) ipc_port_release_send(task->itk_registered[i]); /* destroy the space, leaving just a reference for it */ ipc_space_destroy(task->itk_space); /* destroy the kernel port */ ipc_port_dealloc_kernel(kport); }
kern_return_t task_swap_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor, exception_mask_array_t masks, mach_msg_type_number_t *CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; unsigned int i, j, count; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } assert(EXC_TYPES_COUNT > FIRST_EXCEPTION); for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) { if (exception_mask & (1 << i)) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if ( task->exc_actions[i].port == ports[j] && task->exc_actions[i].behavior == behaviors[j] && task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } } if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; ++count; } old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); while (--i >= FIRST_EXCEPTION) { if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); } if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); *CountCnt = count; return (KERN_SUCCESS); }
kern_return_t task_set_inherited_ports( task_t task, ipc_port_t bootstrap, norma_registered_port_array_t registered, unsigned count, exception_mask_array_t exc_masks, unsigned exc_count, exception_port_array_t exc_ports, exception_behavior_array_t exc_behaviors, exception_flavor_array_t exc_flavors) { exception_mask_t mask; ipc_port_t drop_ports[TSIP_DROP_MAX]; unsigned int drop_port_count = 0; unsigned i; unsigned j; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; if (count > TASK_PORT_REGISTER_MAX+1) return KERN_INVALID_ARGUMENT; mask = 0; for (i = 0; i < exc_count; i++) { if ((exc_masks[i] & ~EXC_MASK_ALL) || (mask & exc_masks[i])) return KERN_INVALID_ARGUMENT; mask |= exc_masks[i]; } if (exc_count >= EXC_TYPES_COUNT) return KERN_INVALID_ARGUMENT; itk_lock(task); /* * First, set up bootstrap port. */ if (IP_VALID(task->itk_bootstrap)) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->itk_bootstrap; } task->itk_bootstrap = bootstrap; /* * Next, set up registered ports. */ for (i = 0; i < count; i++) { if (IP_VALID(task->itk_registered[i])) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->itk_registered[i]; } task->itk_registered[i] = registered[i]; } /* * Finally, set up exception ports. */ for (i = 0; i < exc_count; i++) { mask = exc_masks[i] & ~EXC_MASK_ALL; j = 0; while (mask) { if (mask & 1) { if (IP_VALID(task->exc_actions[j].port)) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->exc_actions[j].port; } task->exc_actions[j].port = ipc_port_copy_send(exc_ports[i]); task->exc_actions[j].behavior = exc_behaviors[i]; task->exc_actions[j].flavor = exc_flavors[i]; } mask >>= 1; j++; } if (IP_VALID(exc_ports[i])) { /* consume send right */ assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = exc_ports[i]; } } itk_unlock(task); for (i = 0; i < drop_port_count; ++i) ipc_port_release_send(drop_ports[i]); return KERN_SUCCESS; }
kern_return_t task_get_special_port( task_t task, int which, ipc_port_t *portp) { ipc_port_t port; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } switch (which) { case TASK_KERNEL_PORT: port = ipc_port_copy_send(task->itk_sself); break; case TASK_NAME_PORT: port = ipc_port_make_send(task->itk_nself); break; case TASK_HOST_PORT: port = ipc_port_copy_send(task->itk_host); break; case TASK_BOOTSTRAP_PORT: port = ipc_port_copy_send(task->itk_bootstrap); break; case TASK_WIRED_LEDGER_PORT: port = ipc_port_copy_send(task->wired_ledger_port); break; case TASK_PAGED_LEDGER_PORT: port = ipc_port_copy_send(task->paged_ledger_port); break; case TASK_SEATBELT_PORT: port = ipc_port_copy_send(task->itk_seatbelt); break; case TASK_GSSD_PORT: port = ipc_port_copy_send(task->itk_gssd); break; case TASK_ACCESS_PORT: port = ipc_port_copy_send(task->itk_task_access); break; case TASK_AUTOMOUNTD_PORT: port = ipc_port_copy_send(task->itk_automountd); break; default: itk_unlock(task); return KERN_INVALID_ARGUMENT; } itk_unlock(task); *portp = port; return KERN_SUCCESS; }
kern_return_t task_set_special_port( task_t task, int which, ipc_port_t port) { ipc_port_t *whichp; ipc_port_t old; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; switch (which) { case TASK_KERNEL_PORT: whichp = &task->itk_sself; break; case TASK_HOST_PORT: whichp = &task->itk_host; break; case TASK_BOOTSTRAP_PORT: whichp = &task->itk_bootstrap; break; case TASK_SEATBELT_PORT: whichp = &task->itk_seatbelt; break; case TASK_ACCESS_PORT: whichp = &task->itk_task_access; break; case TASK_DEBUG_CONTROL_PORT: whichp = &task->itk_debug_control; break; default: return KERN_INVALID_ARGUMENT; }/* switch */ itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } /* do not allow overwrite of seatbelt or task access ports */ if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which) && IP_VALID(*whichp)) { itk_unlock(task); return KERN_NO_ACCESS; } old = *whichp; *whichp = port; itk_unlock(task); if (IP_VALID(old)) ipc_port_release_send(old); return KERN_SUCCESS; }
kern_return_t task_set_special_port( task_t task, int which, ipc_port_t port) { ipc_port_t *whichp; ipc_port_t old; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; switch (which) { case TASK_KERNEL_PORT: whichp = &task->itk_sself; break; case TASK_HOST_PORT: whichp = &task->itk_host; break; case TASK_BOOTSTRAP_PORT: whichp = &task->itk_bootstrap; break; case TASK_WIRED_LEDGER_PORT: whichp = &task->wired_ledger_port; break; case TASK_PAGED_LEDGER_PORT: whichp = &task->paged_ledger_port; break; case TASK_SEATBELT_PORT: whichp = &task->itk_seatbelt; break; case TASK_GSSD_PORT: whichp = &task->itk_gssd; break; case TASK_ACCESS_PORT: whichp = &task->itk_task_access; break; case TASK_AUTOMOUNTD_PORT: whichp = &task->itk_automountd; break; default: return KERN_INVALID_ARGUMENT; }/* switch */ itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } /* do not allow overwrite of seatbelt or task access ports */ if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which) && IP_VALID(*whichp)) { itk_unlock(task); return KERN_NO_ACCESS; } #if CONFIG_MACF_MACH if (mac_task_check_service(current_task(), task, "set_special_port")) { itk_unlock(task); return KERN_NO_ACCESS; } #endif old = *whichp; *whichp = port; itk_unlock(task); if (IP_VALID(old)) ipc_port_release_send(old); return KERN_SUCCESS; }
kern_return_t task_set_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); return (KERN_SUCCESS); }
void ipc_task_terminate( task_t task) { ipc_port_t kport; ipc_port_t nport; ipc_port_t rport; int i; itk_lock(task); kport = task->itk_self; if (kport == IP_NULL) { /* the task is already terminated (can this happen?) */ itk_unlock(task); return; } task->itk_self = IP_NULL; nport = task->itk_nself; assert(nport != IP_NULL); task->itk_nself = IP_NULL; rport = task->itk_resume; task->itk_resume = IP_NULL; itk_unlock(task); /* release the naked send rights */ if (IP_VALID(task->itk_sself)) ipc_port_release_send(task->itk_sself); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(task->exc_actions[i].port)) { ipc_port_release_send(task->exc_actions[i].port); } } if (IP_VALID(task->itk_host)) ipc_port_release_send(task->itk_host); if (IP_VALID(task->itk_bootstrap)) ipc_port_release_send(task->itk_bootstrap); if (IP_VALID(task->itk_seatbelt)) ipc_port_release_send(task->itk_seatbelt); if (IP_VALID(task->itk_gssd)) ipc_port_release_send(task->itk_gssd); if (IP_VALID(task->itk_task_access)) ipc_port_release_send(task->itk_task_access); if (IP_VALID(task->itk_debug_control)) ipc_port_release_send(task->itk_debug_control); for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) if (IP_VALID(task->itk_registered[i])) ipc_port_release_send(task->itk_registered[i]); /* destroy the kernel ports */ ipc_port_dealloc_kernel(kport); ipc_port_dealloc_kernel(nport); if (rport != IP_NULL) ipc_port_dealloc_kernel(rport); itk_lock_destroy(task); }
void ipc_task_init( task_t task, task_t parent) { ipc_space_t space; ipc_port_t kport; ipc_port_t nport; kern_return_t kr; int i; kr = ipc_space_create(&ipc_table_entries[0], &space); if (kr != KERN_SUCCESS) panic("ipc_task_init"); space->is_task = task; kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_task_init"); nport = ipc_port_alloc_kernel(); if (nport == IP_NULL) panic("ipc_task_init"); itk_lock_init(task); task->itk_self = kport; task->itk_nself = nport; task->itk_resume = IP_NULL; /* Lazily allocated on-demand */ task->itk_sself = ipc_port_make_send(kport); task->itk_debug_control = IP_NULL; task->itk_space = space; if (parent == TASK_NULL) { ipc_port_t port; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = IP_NULL; }/* for */ kr = host_get_host_port(host_priv_self(), &port); assert(kr == KERN_SUCCESS); task->itk_host = port; task->itk_bootstrap = IP_NULL; task->itk_seatbelt = IP_NULL; task->itk_gssd = IP_NULL; task->itk_task_access = IP_NULL; for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { itk_lock(parent); assert(parent->itk_self != IP_NULL); /* inherit registered ports */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = ipc_port_copy_send(parent->itk_registered[i]); /* inherit exception and bootstrap ports */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = ipc_port_copy_send(parent->exc_actions[i].port); task->exc_actions[i].flavor = parent->exc_actions[i].flavor; task->exc_actions[i].behavior = parent->exc_actions[i].behavior; task->exc_actions[i].privileged = parent->exc_actions[i].privileged; }/* for */ task->itk_host = ipc_port_copy_send(parent->itk_host); task->itk_bootstrap = ipc_port_copy_send(parent->itk_bootstrap); task->itk_seatbelt = ipc_port_copy_send(parent->itk_seatbelt); task->itk_gssd = ipc_port_copy_send(parent->itk_gssd); task->itk_task_access = ipc_port_copy_send(parent->itk_task_access); itk_unlock(parent); } }
kern_return_t task_get_exception_ports( task_t task, exception_mask_t exception_mask, exception_mask_array_t masks, mach_msg_type_number_t *CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors) { unsigned int i, j, count; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } count = 0; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { for (j = 0; j < count; ++j) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if ( task->exc_actions[i].port == ports[j] && task->exc_actions[i].behavior == behaviors[j] && task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } } if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; ++count; if (count > *CountCnt) break; } } } itk_unlock(task); *CountCnt = count; return (KERN_SUCCESS); }
kern_return_t task_set_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } /* * Check the validity of the thread_state_flavor by calling the * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); return (KERN_SUCCESS); }