kern_return_t task_get_inherited_ports( task_t task, ipc_port_t *bootstrap, norma_registered_port_array_t registered, unsigned *count, exception_mask_array_t exc_masks, unsigned *exc_count, exception_port_array_t exc_ports, exception_behavior_array_t exc_behaviors, exception_flavor_array_t exc_flavors) { unsigned i; unsigned j; unsigned n; if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; } itk_lock(task); *bootstrap = ipc_port_copy_send(task->itk_bootstrap); for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) registered[i] = ipc_port_copy_send(task->itk_registered[i]); *count = TASK_PORT_REGISTER_MAX; n = 0; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { for (j = 0; j < n; j++) { /* * Search for an identical entry, if found * set corresponding mask for this exception. */ if (task->exc_actions[i].port == exc_ports[j] && task->exc_actions[i].behavior == exc_behaviors[j] && task->exc_actions[i].flavor == exc_flavors[j]) { exc_masks[j] |= (1 << i); break; } } if (j == n) { exc_masks[j] = (1 << i); exc_ports[j] = ipc_port_copy_send(task->exc_actions[i].port); exc_behaviors[j] = task->exc_actions[i].behavior; exc_flavors[j] = task->exc_actions[i].flavor; n++; } } *exc_count = n; itk_unlock(task); return KERN_SUCCESS; }
kern_return_t task_get_special_port( task_t task, int which, ipc_port_t *portp) { ipc_port_t port; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } switch (which) { case TASK_KERNEL_PORT: port = ipc_port_copy_send(task->itk_sself); break; case TASK_NAME_PORT: port = ipc_port_make_send(task->itk_nself); break; case TASK_HOST_PORT: port = ipc_port_copy_send(task->itk_host); break; case TASK_BOOTSTRAP_PORT: port = ipc_port_copy_send(task->itk_bootstrap); break; case TASK_SEATBELT_PORT: port = ipc_port_copy_send(task->itk_seatbelt); break; case TASK_ACCESS_PORT: port = ipc_port_copy_send(task->itk_task_access); break; default: itk_unlock(task); return KERN_INVALID_ARGUMENT; } itk_unlock(task); *portp = port; return KERN_SUCCESS; }
void ipc_task_init( task_t task, task_t parent) { ipc_space_t space; ipc_port_t kport; kern_return_t kr; int i; kr = ipc_space_create(&space); if (kr != KERN_SUCCESS) panic("ipc_task_init"); kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_task_init"); itk_lock_init(task); task->itk_self = kport; task->itk_sself = ipc_port_make_send(kport); task->itk_space = space; if (parent == TASK_NULL) { task->itk_exception = IP_NULL; task->itk_bootstrap = IP_NULL; for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { itk_lock(parent); assert(parent->itk_self != IP_NULL); /* inherit registered ports */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = ipc_port_copy_send(parent->itk_registered[i]); /* inherit exception and bootstrap ports */ task->itk_exception = ipc_port_copy_send(parent->itk_exception); task->itk_bootstrap = ipc_port_copy_send(parent->itk_bootstrap); itk_unlock(parent); } }
kern_return_t thread_get_special_port( thread_act_t thr_act, int which, ipc_port_t *portp) { ipc_port_t *whichp; ipc_port_t port; thread_t thread; if (!thr_act) return KERN_INVALID_ARGUMENT; thread = act_lock_thread(thr_act); switch (which) { case THREAD_KERNEL_PORT: whichp = &thr_act->ith_sself; break; default: act_unlock_thread(thr_act); return KERN_INVALID_ARGUMENT; } if (!thr_act->active) { act_unlock_thread(thr_act); return KERN_FAILURE; } port = ipc_port_copy_send(*whichp); act_unlock_thread(thr_act); *portp = port; return KERN_SUCCESS; }
ipc_port_t retrieve_task_self_fast( register task_t task) { register ipc_port_t port; assert(task == current_task()); itk_lock(task); assert(task->itk_self != IP_NULL); if ((port = task->itk_sself) == task->itk_self) { /* no interposing */ ip_lock(port); assert(ip_active(port)); ip_reference(port); port->ip_srights++; ip_unlock(port); } else port = ipc_port_copy_send(port); itk_unlock(task); return port; }
ipc_port_t retrieve_thread_self_fast( thread_t thread) { register ipc_port_t port; assert(thread == current_thread()); thread_mtx_lock(thread); assert(thread->ith_self != IP_NULL); if ((port = thread->ith_sself) == thread->ith_self) { /* no interposing */ ip_lock(port); assert(ip_active(port)); ip_reference(port); port->ip_srights++; ip_unlock(port); } else port = ipc_port_copy_send(port); thread_mtx_unlock(thread); return port; }
kern_return_t thread_get_special_port( thread_t thread, int which, ipc_port_t *portp) { kern_return_t result = KERN_SUCCESS; ipc_port_t *whichp; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); switch (which) { case THREAD_KERNEL_PORT: whichp = &thread->ith_sself; break; default: return (KERN_INVALID_ARGUMENT); } thread_mtx_lock(thread); if (thread->active) *portp = ipc_port_copy_send(*whichp); else result = KERN_FAILURE; thread_mtx_unlock(thread); return (result); }
/* * Routine: host_set_exception_ports [kernel call] * Purpose: * Sets the host exception port, flavor and * behavior for the exception types specified by the mask. * There will be one send right per exception per valid * port. * Conditions: * Nothing locked. If successful, consumes * the supplied send right. * Returns: * KERN_SUCCESS Changed the special port. * KERN_INVALID_ARGUMENT The host_priv is not valid, * Illegal mask bit set. * Illegal exception behavior */ kern_return_t host_set_exception_ports( host_priv_t host_priv, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor) { register int i; ipc_port_t old_port[EXC_TYPES_COUNT]; if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_ARGUMENT; } assert(host_priv == &realhost); if (exception_mask & ~EXC_MASK_VALID) { return KERN_INVALID_ARGUMENT; } if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return KERN_INVALID_ARGUMENT; } } /* Cannot easily check "new_flavor", but that just means that * the flavor in the generated exception message might be garbage: * GIGO */ host_lock(host_priv); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (exception_mask & (1 << i)) { old_port[i] = host_priv->exc_actions[i].port; host_priv->exc_actions[i].port = ipc_port_copy_send(new_port); host_priv->exc_actions[i].behavior = new_behavior; host_priv->exc_actions[i].flavor = new_flavor; } else old_port[i] = IP_NULL; }/* for */ /* * Consume send rights without any lock held. */ host_unlock(host_priv); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); return KERN_SUCCESS; }
/* * Routine: host_get_exception_ports [kernel call] * Purpose: * Clones a send right for each of the host's exception * ports specified in the mask and returns the behaviour * and flavor of said port. * * Returns upto [in} CountCnt elements. * * Conditions: * Nothing locked. * Returns: * KERN_SUCCESS Extracted a send right. * KERN_INVALID_ARGUMENT Invalid host_priv specified, * Invalid special port, * Illegal mask bit set. * KERN_FAILURE The thread is dead. */ kern_return_t host_get_exception_ports( host_priv_t host_priv, exception_mask_t exception_mask, exception_mask_array_t masks, mach_msg_type_number_t * CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors ) { register int i, j, count; if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_ARGUMENT; if (exception_mask & ~EXC_MASK_ALL) { return KERN_INVALID_ARGUMENT; } assert (host_priv == &realhost); host_lock(host_priv); count = 0; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (exception_mask & (1 << i)) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if (host_priv->exc_actions[i].port == ports[j] && host_priv->exc_actions[i].behavior == behaviors[j] && host_priv->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } }/* for */ if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(host_priv->exc_actions[i].port); behaviors[j] = host_priv->exc_actions[i].behavior; flavors[j] = host_priv->exc_actions[i].flavor; count++; if (count > *CountCnt) { break; } } } }/* for */ host_unlock(host_priv); *CountCnt = count; return KERN_SUCCESS; }
mach_port_name_t host_self_trap(void) { ipc_port_t sright; sright = ipc_port_copy_send(current_task()->itk_host); return ipc_port_copyout_send(sright, current_space()); }
/* * Copy a ledger */ ipc_port_t ledger_copy( ledger_t ledger) { /* XXX reference counting */ assert(ledger); return(ipc_port_copy_send(ledger->ledger_self)); }
/* * Copy a ledger */ ipc_port_t ledger_copy( ledger_t ledger) { if (ledger == LEDGER_NULL) return IP_NULL; return(ipc_port_copy_send(ledger->ledger_self)); }
mach_port_name_t host_self_trap( __unused struct host_self_trap_args *args) { ipc_port_t sright; mach_port_name_t name; sright = ipc_port_copy_send(current_task()->itk_host); name = ipc_port_copyout_send(sright, current_space()); return name; }
void xmm_object_set( ipc_port_t memory_object, ipc_port_t xmm_object, boolean_t make_copy) { assert(! IP_IS_REMOTE(xmm_object)); assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT); assert(memory_object->ip_norma_xmm_object == IP_NULL); memory_object->ip_norma_xmm_object = ipc_port_make_send(xmm_object); if (make_copy) { memory_object->ip_norma_xmm_object_refs = 1; ipc_port_copy_send(xmm_object); } else { memory_object->ip_norma_xmm_object_refs = 0; } }
kern_return_t mach_ports_lookup( task_t task, mach_port_array_t *portsp, mach_msg_type_number_t *portsCnt) { void *memory; vm_size_t size; ipc_port_t *ports; int i; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); memory = kalloc(size); if (memory == 0) return KERN_RESOURCE_SHORTAGE; itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); kfree(memory, size); return KERN_INVALID_ARGUMENT; } ports = (ipc_port_t *) memory; /* * Clone port rights. Because kalloc'd memory * is wired, we won't fault while holding the task lock. */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) ports[i] = ipc_port_copy_send(task->itk_registered[i]); itk_unlock(task); *portsp = (mach_port_array_t) ports; *portsCnt = TASK_PORT_REGISTER_MAX; return KERN_SUCCESS; }
ipc_port_t xmm_object_copy( ipc_port_t memory_object) { register ipc_port_t xmm_object; assert(! IP_WAS_REMOTE(memory_object)); xmm_object = memory_object->ip_norma_xmm_object; if (xmm_object == IP_NULL) { return IP_NULL; } assert(!((int)xmm_object & 1)); assert(!IP_IS_REMOTE(xmm_object)); assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT); assert(memory_object->ip_norma_xmm_object_refs > 0); memory_object->ip_norma_xmm_object_refs++; return ipc_port_copy_send(xmm_object); }
kern_return_t host_get_special_port( host_priv_t host_priv, __unused int node, int id, ipc_port_t *portp) { ipc_port_t port; if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0) return KERN_INVALID_ARGUMENT; host_lock(host_priv); port = realhost.special[id]; *portp = ipc_port_copy_send(port); host_unlock(host_priv); return KERN_SUCCESS; }
void svm_create_new_copy( xmm_obj_t mobj) { xmm_obj_t old_copy, new_copy, kobj; kern_return_t kr; ipc_port_t old_copy_pager; ipc_port_t new_copy_pager; int k_copy; kern_return_t result; assert(xmm_obj_lock_held(mobj)); /* * Prevent others from examining copy until we are done. */ assert(! MOBJ->copy_in_progress); MOBJ->copy_in_progress = TRUE; xmm_obj_unlock(mobj); new_copy_pager = ipc_port_alloc_kernel(); if (new_copy_pager == IP_NULL) { panic("svm_create_new_copy: ipc_port_alloc_kernel"); } /* we hold a naked receive right for new_copy_pager */ (void) ipc_port_make_send(new_copy_pager); /* now we also hold a naked send right for new_copy_pager */ xmm_user_create(new_copy_pager, &new_copy); xmm_svm_create(new_copy, new_copy_pager, &new_copy); kr = xmm_memory_manager_export(new_copy, new_copy_pager); if (kr != KERN_SUCCESS) { panic("m_ksvm_copy: xmm_memory_manager_export: %x\n", kr); } /* xmm_user should now have a send right; we will share it */ ipc_port_release_send(new_copy_pager); assert((new_copy_pager)->ip_srights > 0); /* * Copy-call objects are temporary and noncachable. */ NEW_COPY->temporary = TRUE; NEW_COPY->may_cache = FALSE; /* * Link old copy with new. * * XXX * Grabbing references creates an object collapsing problem. * Need to add vm_object_collapse-like code for when objs * are only referenced by their copies. */ xmm_obj_lock(mobj); if ((old_copy = MOBJ->copy) != XMM_OBJ_NULL) { xmm_obj_lock(old_copy); xmm_obj_unlock(mobj); /* * Remember old_copy pager, which must be valid */ old_copy_pager = OLD_COPY->memory_object; assert(IP_VALID(old_copy_pager)); /* * New copy steals reference to mobj from old copy. * Old copy creates a reference to new copy. */ OLD_COPY->shadow = new_copy; NEW_COPY->copy = old_copy; xmm_obj_reference(new_copy); /* * old_copy must not be destoyed before the new copy is stable. */ OLD_COPY->k_count++; #if MACH_PAGEMAP /* * We freeze the old copy, now that it is isolated * from the permanent object by the new copy and * therefore will receive no more page pushes. * Note that the old copy might already be frozen * if it was the first copy of this object (see below). */ if (OLD_COPY->pagemap == VM_EXTERNAL_NULL) { svm_create_pagemap_for_copy(old_copy, mobj); } else xmm_obj_unlock(old_copy); #else /* MACH_PAGEMAP */ xmm_obj_unlock(old_copy); #endif /* MACH_PAGEMAP */ /* * Create immediately attributes array for the new copy. */ NEW_COPY->bits.range = MOBJ->bits.range; NEW_COPY->num_pages = MOBJ->num_pages; svm_extend_new_copy(MOBJ->bits.bitmap, MOBJ->bits.level, new_copy, 0); } else { /* * Remember old_copy pager, which is null */ old_copy_pager = IP_NULL; /* * New copy must create its own reference to mobj. */ NEW_COPY->copy = XMM_OBJ_NULL; xmm_obj_reference(mobj); xmm_obj_unlock(mobj); #if MACH_PAGEMAP /* * We freeze the first copy of a copy-call object * as soon as that copy is created (with no pages * in it), to take advantage of the fact that many * permanent objects never push any pages into * their copies: executable files, for example. */ assert(NEW_COPY->pagemap == VM_EXTERNAL_NULL); NEW_COPY->existence_size = ptoa(MOBJ->num_pages); NEW_COPY->pagemap = vm_external_create(NEW_COPY->existence_size); #endif /* MACH_PAGEMAP */ } /* * Link mobj with its new copy. * Appropriate references have been taken above. */ NEW_COPY->shadow = mobj; xmm_obj_lock(mobj); MOBJ->copy = new_copy; /* * Set each page needs_copy so that we know to push a copy * of the page if that page gets written. * * One might think that we should remove write protection * on all pages here, since we know for a fact that the * kernel has done so. However, this is not correct. * We need to remember that the pages are possibly * dirty; however, we cannot set them dirty, since * they might not be. If we set a page dirty, then * it must be dirty. If we leave a page written, then * we know that we have to ask the kernel for the page, * since it might be dirty, but we can tolerate it if * the page is in fact not dirty. * * In other words, we have chosen to have write access * be a hint that the page is dirty, but to have the * dirty flag be truth, not a hint. * * This would seem to introduce a new problem, namely * a writer making a write request. However, this isn't * a new problem; it's just one we haven't seen or * thought of before. * * XXX * Pushing zero-fill pages seems like a total waste. * * XXX * We used to set (obsoleted) readonly flag here -- why? * * Note that this object may previously have had a copy * object and thus we may be resetting some set bits here. * The fact that we always set all bits when we attach * a new copy object is what makes it correct for * M_GET_NEEDS_COPY to ignore needs_copy bits when there * is no copy object. That is, we don't have to make the * bits consistent when there is no copy object because * we will make them consistent here when we attach a * new copy object. */ svm_extend_set_copy(MOBJ->bits.bitmap, MOBJ->bits.level); /* * Tell all kernels about new copy object, while * asking them to protect their pages in the copied * object. * * XXX * We should mark "copy in progress" for any other kernels * that come in here with an m_ksvm_copy, so that we can * return the same copy. * * The memory_object_create_copy call is a synchronous rpc. * This ensures that the pages are protected by the time we * exit the loop. We could have an explicit return message * to reduce latency. * * XXX * We could reduce the number of calls in some cases * by calling only those kernels which either have some * write access or which have a shadow chain for this object. */ svm_klist_first(mobj, &kobj); xmm_obj_unlock(mobj); /* * Reference the XMM memory_object port in order to avoid calling * xmm_obj_destroy during copy creation, */ kr = xmm_object_reference(new_copy_pager); assert(kr == KERN_SUCCESS); while (kobj) { xmm_obj_unlock(kobj); ipc_port_copy_send(new_copy_pager); kr = K_CREATE_COPY(kobj, new_copy_pager, &result); assert(kr == KERN_SUCCESS); xmm_obj_lock(mobj); xmm_obj_lock(kobj); svm_klist_next(mobj, &kobj, FALSE); } /* * Release the XMM memory_object port, destroying the new object * if needed. */ xmm_obj_lock(new_copy); if (NEW_COPY->state == MOBJ_STATE_UNCALLED) svm_mobj_initialize(new_copy, TRUE, 0); xmm_obj_unlock(new_copy); xmm_object_release(new_copy_pager); /* * Wake up anyone waiting for this copy to complete. */ xmm_obj_lock(mobj); MOBJ->copy_in_progress = FALSE; if (MOBJ->copy_wanted) { MOBJ->copy_wanted = FALSE; thread_wakeup(svm_copy_event(mobj)); } /* * Release the old_copy extra reference if needed. */ if (old_copy != XMM_OBJ_NULL) { xmm_obj_lock(old_copy); if (--OLD_COPY->k_count == 0 && OLD_COPY->state == MOBJ_STATE_SHOULD_TERMINATE) { xmm_obj_unlock(mobj); xmm_terminate_pending--; xmm_svm_destroy(old_copy); /* consumes lock */ xmm_obj_lock(mobj); } else xmm_obj_unlock(old_copy); } }
kern_return_t host_swap_exception_ports( host_priv_t host_priv, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor, exception_mask_array_t masks, mach_msg_type_number_t * CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors ) { unsigned int i, j, count; ipc_port_t old_port[EXC_TYPES_COUNT]; if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_ARGUMENT; if (exception_mask & ~EXC_MASK_VALID) { return KERN_INVALID_ARGUMENT; } if (IP_VALID(new_port)) { switch (new_behavior) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return KERN_INVALID_ARGUMENT; } } /* Cannot easily check "new_flavor", but that just means that * the flavor in the generated exception message might be garbage: * GIGO */ host_lock(host_priv); assert(EXC_TYPES_COUNT > FIRST_EXCEPTION); for (count=0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; i++) { if (exception_mask & (1 << i)) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if (host_priv->exc_actions[i].port == ports[j] && host_priv->exc_actions[i].behavior == behaviors[j] && host_priv->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } }/* for */ if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(host_priv->exc_actions[i].port); behaviors[j] = host_priv->exc_actions[i].behavior; flavors[j] = host_priv->exc_actions[i].flavor; count++; } old_port[i] = host_priv->exc_actions[i].port; host_priv->exc_actions[i].port = ipc_port_copy_send(new_port); host_priv->exc_actions[i].behavior = new_behavior; host_priv->exc_actions[i].flavor = new_flavor; } else old_port[i] = IP_NULL; }/* for */ host_unlock(host_priv); /* * Consume send rights without any lock held. */ while (--i >= FIRST_EXCEPTION) { if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); } if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); *CountCnt = count; return KERN_SUCCESS; }
kern_return_t task_set_inherited_ports( task_t task, ipc_port_t bootstrap, norma_registered_port_array_t registered, unsigned count, exception_mask_array_t exc_masks, unsigned exc_count, exception_port_array_t exc_ports, exception_behavior_array_t exc_behaviors, exception_flavor_array_t exc_flavors) { exception_mask_t mask; ipc_port_t drop_ports[TSIP_DROP_MAX]; unsigned int drop_port_count = 0; unsigned i; unsigned j; if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; if (count > TASK_PORT_REGISTER_MAX+1) return KERN_INVALID_ARGUMENT; mask = 0; for (i = 0; i < exc_count; i++) { if ((exc_masks[i] & ~EXC_MASK_ALL) || (mask & exc_masks[i])) return KERN_INVALID_ARGUMENT; mask |= exc_masks[i]; } if (exc_count >= EXC_TYPES_COUNT) return KERN_INVALID_ARGUMENT; itk_lock(task); /* * First, set up bootstrap port. */ if (IP_VALID(task->itk_bootstrap)) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->itk_bootstrap; } task->itk_bootstrap = bootstrap; /* * Next, set up registered ports. */ for (i = 0; i < count; i++) { if (IP_VALID(task->itk_registered[i])) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->itk_registered[i]; } task->itk_registered[i] = registered[i]; } /* * Finally, set up exception ports. */ for (i = 0; i < exc_count; i++) { mask = exc_masks[i] & ~EXC_MASK_ALL; j = 0; while (mask) { if (mask & 1) { if (IP_VALID(task->exc_actions[j].port)) { assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = task->exc_actions[j].port; } task->exc_actions[j].port = ipc_port_copy_send(exc_ports[i]); task->exc_actions[j].behavior = exc_behaviors[i]; task->exc_actions[j].flavor = exc_flavors[i]; } mask >>= 1; j++; } if (IP_VALID(exc_ports[i])) { /* consume send right */ assert(drop_port_count < TSIP_DROP_MAX); drop_ports[drop_port_count++] = exc_ports[i]; } } itk_unlock(task); for (i = 0; i < drop_port_count; ++i) ipc_port_release_send(drop_ports[i]); return KERN_SUCCESS; }
kern_return_t task_get_exception_ports( task_t task, exception_mask_t exception_mask, exception_mask_array_t masks, mach_msg_type_number_t *CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors) { unsigned int i, j, count; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } count = 0; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { for (j = 0; j < count; ++j) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if ( task->exc_actions[i].port == ports[j] && task->exc_actions[i].behavior == behaviors[j] && task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } } if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; ++count; if (count > *CountCnt) break; } } } itk_unlock(task); *CountCnt = count; return (KERN_SUCCESS); }
kern_return_t task_swap_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor, exception_mask_array_t masks, mach_msg_type_number_t *CountCnt, exception_port_array_t ports, exception_behavior_array_t behaviors, thread_state_flavor_array_t flavors) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; unsigned int i, j, count; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } assert(EXC_TYPES_COUNT > FIRST_EXCEPTION); for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) { if (exception_mask & (1 << i)) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if ( task->exc_actions[i].port == ports[j] && task->exc_actions[i].behavior == behaviors[j] && task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } } if (j == count) { masks[j] = (1 << i); ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; ++count; } old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); while (--i >= FIRST_EXCEPTION) { if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); } if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); *CountCnt = count; return (KERN_SUCCESS); }
kern_return_t task_set_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } /* * Check the validity of the thread_state_flavor by calling the * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) return (KERN_INVALID_ARGUMENT); itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); return (KERN_SUCCESS); }
void ipc_task_init( task_t task, task_t parent) { ipc_space_t space; ipc_port_t kport; ipc_port_t nport; kern_return_t kr; int i; kr = ipc_space_create(&ipc_table_entries[0], &space); if (kr != KERN_SUCCESS) panic("ipc_task_init"); space->is_task = task; kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_task_init"); nport = ipc_port_alloc_kernel(); if (nport == IP_NULL) panic("ipc_task_init"); itk_lock_init(task); task->itk_self = kport; task->itk_nself = nport; task->itk_resume = IP_NULL; /* Lazily allocated on-demand */ task->itk_sself = ipc_port_make_send(kport); task->itk_debug_control = IP_NULL; task->itk_space = space; if (parent == TASK_NULL) { ipc_port_t port; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = IP_NULL; }/* for */ kr = host_get_host_port(host_priv_self(), &port); assert(kr == KERN_SUCCESS); task->itk_host = port; task->itk_bootstrap = IP_NULL; task->itk_seatbelt = IP_NULL; task->itk_gssd = IP_NULL; task->itk_task_access = IP_NULL; for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { itk_lock(parent); assert(parent->itk_self != IP_NULL); /* inherit registered ports */ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = ipc_port_copy_send(parent->itk_registered[i]); /* inherit exception and bootstrap ports */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = ipc_port_copy_send(parent->exc_actions[i].port); task->exc_actions[i].flavor = parent->exc_actions[i].flavor; task->exc_actions[i].behavior = parent->exc_actions[i].behavior; task->exc_actions[i].privileged = parent->exc_actions[i].privileged; }/* for */ task->itk_host = ipc_port_copy_send(parent->itk_host); task->itk_bootstrap = ipc_port_copy_send(parent->itk_bootstrap); task->itk_seatbelt = ipc_port_copy_send(parent->itk_seatbelt); task->itk_gssd = ipc_port_copy_send(parent->itk_gssd); task->itk_task_access = ipc_port_copy_send(parent->itk_task_access); itk_unlock(parent); } }
kern_return_t m_ksvm_copy( xmm_obj_t kobj) { kern_return_t kr; xmm_obj_t mobj = KOBJ->mobj; xmm_obj_t old_copy; kern_return_t result; ipc_port_t port; #ifdef lint (void) M_COPY(kobj); #endif /* lint */ assert(kobj->class == &ksvm_class); /* * If we can use a preexisting copy object, do so. * Otherwise, create a new copy object. */ xmm_obj_lock(mobj); while (old_copy = svm_get_stable_copy(mobj)) { xmm_obj_lock(old_copy); if (OLD_COPY->dirty_copy || OLD_COPY->state == MOBJ_STATE_TERMINATED || OLD_COPY->state == MOBJ_STATE_SHOULD_TERMINATE) { xmm_obj_unlock(old_copy); break; } xmm_obj_unlock(mobj); xmm_obj_unlock(old_copy); /* * Reference the local XMM old_copy stack to prevent any call * to xmm_obj_destroy during the copy in order to avoid a * deadlock between a terminating memory_object and the * creation of an XMM stack based on the same memory_object. */ kr = xmm_object_reference(OLD_COPY->memory_object); if (kr == KERN_ABORTED) { xmm_obj_lock(mobj); break; } assert(kr == KERN_SUCCESS); port = ipc_port_copy_send(OLD_COPY->memory_object); assert(IP_VALID(port)); kr = K_CREATE_COPY(kobj, port, &result); assert(kr == KERN_SUCCESS); /* * Release the reference of the XMM stack. */ xmm_object_release(OLD_COPY->memory_object); if (result == KERN_SUCCESS || result == KERN_ABORTED) { #if MACH_ASSERT xmm_obj_lock(mobj); old_copy = svm_get_stable_copy(mobj); xmm_obj_unlock(mobj); #endif /* MACH_ASSERT */ assert(!OLD_COPY->dirty_copy && OLD_COPY->state != MOBJ_STATE_TERMINATED && OLD_COPY->state != MOBJ_STATE_SHOULD_TERMINATE); return KERN_SUCCESS; } assert(result == KERN_INVALID_ARGUMENT); xmm_obj_lock(mobj); } svm_create_new_copy(mobj); xmm_obj_unlock(mobj); return KERN_SUCCESS; }
kern_return_t task_set_exception_ports( task_t task, exception_mask_t exception_mask, ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor) { ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_VALID) return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: return (KERN_INVALID_ARGUMENT); } } itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); return (KERN_FAILURE); } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; } else old_port[i] = IP_NULL; } itk_unlock(task); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); return (KERN_SUCCESS); }