static void k5_cli_ipc_thread_fini (void) { int err = 0; err = k5_mutex_lock (&g_service_ports_mutex); if (!err) { int i; for (i = 0; i < KIPC_SERVICE_COUNT; i++) { if (MACH_PORT_VALID (g_service_ports[i].service_port)) { mach_port_destroy (mach_task_self (), g_service_ports[i].service_port); g_service_ports[i].service_port = MACH_PORT_NULL; } } k5_mutex_unlock (&g_service_ports_mutex); } k5_key_delete (K5_KEY_IPC_CONNECTION_INFO); k5_mutex_destroy (&g_service_ports_mutex); }
IOReturn IOFireWireSBP2LibLUN::stop( void ) { FWLOG(( "IOFireWireSBP2LibLUN : stop\n" )); removeIODispatcherFromRunLoop(); if( fConnection ) { FWLOG(( "IOFireWireSBP2LibLUN : IOServiceClose connection = %d\n", fConnection )); IOServiceClose( fConnection ); fConnection = MACH_PORT_NULL; } if( fAsyncPort != MACH_PORT_NULL ) { FWLOG(( "IOFireWireSBP2LibLUN : release fAsyncPort\n" )); mach_port_destroy( mach_task_self(), fAsyncPort ); fAsyncPort = MACH_PORT_NULL; } return kIOReturnSuccess; }
void destroy_guarded_mach_port() { mach_port_t port; mach_port_options_t options; mach_port_context_t gval = CONTEXT_VALUE1; int kret; printf("Destroying guarded mach port (Expecting exception)...\n"); options.flags = (MPO_CONTEXT_AS_GUARD); kret = mach_port_construct(mach_task_self(), &options, gval, &port); if (kret != KERN_SUCCESS) exit(1); kret = mach_port_destroy(mach_task_self(), port); if (kret == KERN_SUCCESS) { printf("[FAILED]\n"); exit(1); } return; }
kern_return_t mk_timer_destroy_trap( struct mk_timer_destroy_trap_args *args) { mach_port_name_t name = args->name; ipc_space_t myspace = current_space(); ipc_port_t port; kern_return_t result; result = ipc_port_translate_receive(myspace, name, &port); if (result != KERN_SUCCESS) return (result); if (ip_kotype(port) == IKOT_TIMER) { ip_unlock(port); result = mach_port_destroy(myspace, name); } else { ip_unlock(port); result = KERN_INVALID_ARGUMENT; } return (result); }
static void ExceptionThread(mach_port_t port) { Common::SetCurrentThreadName("Mach exception thread"); #pragma pack(4) struct { mach_msg_header_t Head; NDR_record_t NDR; exception_type_t exception; mach_msg_type_number_t codeCnt; int64_t code[2]; int flavor; mach_msg_type_number_t old_stateCnt; natural_t old_state[x86_THREAD_STATE64_COUNT]; mach_msg_trailer_t trailer; } msg_in; struct { mach_msg_header_t Head; NDR_record_t NDR; kern_return_t RetCode; int flavor; mach_msg_type_number_t new_stateCnt; natural_t new_state[x86_THREAD_STATE64_COUNT]; } msg_out; #pragma pack() memset(&msg_in, 0xee, sizeof(msg_in)); memset(&msg_out, 0xee, sizeof(msg_out)); mach_msg_header_t *send_msg = nullptr; mach_msg_size_t send_size = 0; mach_msg_option_t option = MACH_RCV_MSG; while (true) { // If this isn't the first run, send the reply message. Then, receive // a message: either a mach_exception_raise_state RPC due to // thread_set_exception_ports, or MACH_NOTIFY_NO_SENDERS due to // mach_port_request_notification. CheckKR("mach_msg_overwrite", mach_msg_overwrite(send_msg, option, send_size, sizeof(msg_in), port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, &msg_in.Head, 0)); if (msg_in.Head.msgh_id == MACH_NOTIFY_NO_SENDERS) { // the other thread exited mach_port_destroy(mach_task_self(), port); return; } if (msg_in.Head.msgh_id != 2406) { PanicAlert("unknown message received"); return; } if (msg_in.flavor != x86_THREAD_STATE64) { PanicAlert("unknown flavor %d (expected %d)", msg_in.flavor, x86_THREAD_STATE64); return; } x86_thread_state64_t *state = (x86_thread_state64_t *) msg_in.old_state; bool ok = JitInterface::HandleFault((uintptr_t) msg_in.code[1], state); // Set up the reply. msg_out.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(msg_in.Head.msgh_bits), 0); msg_out.Head.msgh_remote_port = msg_in.Head.msgh_remote_port; msg_out.Head.msgh_local_port = MACH_PORT_NULL; msg_out.Head.msgh_id = msg_in.Head.msgh_id + 100; msg_out.NDR = msg_in.NDR; if (ok) { msg_out.RetCode = KERN_SUCCESS; msg_out.flavor = x86_THREAD_STATE64; msg_out.new_stateCnt = x86_THREAD_STATE64_COUNT; memcpy(msg_out.new_state, msg_in.old_state, x86_THREAD_STATE64_COUNT * sizeof(natural_t)); } else { // Pass the exception to the next handler (debugger or crash). msg_out.RetCode = KERN_FAILURE; msg_out.flavor = 0; msg_out.new_stateCnt = 0; } msg_out.Head.msgh_size = offsetof(__typeof__(msg_out), new_state) + msg_out.new_stateCnt * sizeof(natural_t); send_msg = &msg_out.Head; send_size = msg_out.Head.msgh_size; option |= MACH_SEND_MSG; } }
void TargetException::stop() { //TODO:detach时需要将原来的exception port恢复 m_stop = true; mach_port_destroy(mach_task_self(), m_exceptionPort); }
dns_service_discovery_ref DNSServiceRegistrationCreate (const char *name, const char *regtype, const char *domain, uint16_t port, const char *txtRecord, DNSServiceRegistrationReply callBack, void *context) { mach_port_t serverPort = DNSServiceDiscoveryLookupServer(); mach_port_t clientPort; kern_return_t result; dns_service_discovery_ref return_t; struct a_requests *request; IPPort IpPort; char *portptr = (char *)&port; if (!serverPort) { return NULL; } if (!txtRecord) { txtRecord = ""; } result = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &clientPort); if (result != KERN_SUCCESS) { printf("Mach port receive creation failed, %s\n", mach_error_string(result)); return NULL; } result = mach_port_insert_right(mach_task_self(), clientPort, clientPort, MACH_MSG_TYPE_MAKE_SEND); if (result != KERN_SUCCESS) { printf("Mach port send creation failed, %s\n", mach_error_string(result)); mach_port_destroy(mach_task_self(), clientPort); return NULL; } _increaseQueueLengthOnPort(clientPort); return_t = malloc(sizeof(dns_service_discovery_t)); return_t->port = clientPort; request = malloc(sizeof(struct a_requests)); request->client_port = clientPort; request->context = context; request->callout.regCallback = callBack; // older versions of this code passed the port via mach IPC as an int. // we continue to pass it as 4 bytes to maintain binary compatibility, // but now ensure that the network byte order is preserved by using a struct IpPort.bytes[0] = 0; IpPort.bytes[1] = 0; IpPort.bytes[2] = portptr[0]; IpPort.bytes[3] = portptr[1]; result = DNSServiceRegistrationCreate_rpc(serverPort, clientPort, (char *)name, (char *)regtype, (char *)domain, IpPort, (char *)txtRecord); if (result != KERN_SUCCESS) { printf("There was an error creating a resolve, %s\n", mach_error_string(result)); free(request); return NULL; } pthread_mutex_lock(&a_requests_lock); request->next = a_requests; a_requests = request; pthread_mutex_unlock(&a_requests_lock); return return_t; }
int32_t k5_ipc_send_request (const char *in_service_id, int32_t in_launch_server, k5_ipc_stream in_request_stream, k5_ipc_stream *out_reply_stream) { int err = 0; int32_t done = 0; int32_t try_count = 0; mach_port_t server_port = MACH_PORT_NULL; k5_ipc_connection_info cinfo = NULL; k5_ipc_connection connection = NULL; mach_port_t reply_port = MACH_PORT_NULL; const char *inl_request = NULL; /* char * so we can pass the buffer in directly */ mach_msg_type_number_t inl_request_length = 0; k5_ipc_ool_request_t ool_request = NULL; mach_msg_type_number_t ool_request_length = 0; if (!in_request_stream) { err = EINVAL; } if (!out_reply_stream ) { err = EINVAL; } if (!err) { err = CALL_INIT_FUNCTION (k5_cli_ipc_thread_init); } if (!err) { /* depending on how big the message is, use the fast inline buffer or * the slow dynamically allocated buffer */ mach_msg_type_number_t request_length = k5_ipc_stream_size (in_request_stream); if (request_length > K5_IPC_MAX_INL_MSG_SIZE) { /*dprintf ("%s choosing out of line buffer (size is %d)", * __FUNCTION__, request_length); */ err = vm_read (mach_task_self (), (vm_address_t) k5_ipc_stream_data (in_request_stream), request_length, (vm_address_t *) &ool_request, &ool_request_length); } else { /*dprintf ("%s choosing in line buffer (size is %d)", * __FUNCTION__, request_length); */ inl_request_length = request_length; inl_request = k5_ipc_stream_data (in_request_stream); } } if (!err) { cinfo = k5_getspecific (K5_KEY_IPC_CONNECTION_INFO); if (!cinfo) { err = k5_ipc_client_cinfo_allocate (&cinfo); if (!err) { err = k5_setspecific (K5_KEY_IPC_CONNECTION_INFO, cinfo); } } if (!err) { int i, found = 0; for (i = 0; i < KIPC_SERVICE_COUNT; i++) { if (!strcmp (in_service_id, cinfo->connections[i].service_id)) { found = 1; connection = &cinfo->connections[i]; break; } } if (!found) { err = EINVAL; } } } if (!err) { err = k5_ipc_client_lookup_server (in_service_id, in_launch_server, TRUE, &server_port); } if (!err) { err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &reply_port); } while (!err && !done) { if (!err && !MACH_PORT_VALID (connection->port)) { err = k5_ipc_client_create_client_connection (server_port, &connection->port); } if (!err) { err = k5_ipc_client_request (connection->port, reply_port, inl_request, inl_request_length, ool_request, ool_request_length); } if (err == MACH_SEND_INVALID_DEST) { if (try_count < 2) { try_count++; err = 0; } if (MACH_PORT_VALID (connection->port)) { mach_port_mod_refs (mach_task_self(), connection->port, MACH_PORT_RIGHT_SEND, -1 ); connection->port = MACH_PORT_NULL; } /* Look up server name again without using the cached copy */ err = k5_ipc_client_lookup_server (in_service_id, in_launch_server, FALSE, &server_port); } else { /* Talked to server, though we may have gotten an error */ done = 1; /* Because we use ",dealloc" ool_request will be freed by mach. * Don't double free it. */ ool_request = NULL; ool_request_length = 0; } } if (!err) { err = k5_ipc_stream_new (&cinfo->reply_stream); } if (!err) { mach_port_t old_notification_target = MACH_PORT_NULL; /* request no-senders notification so we know when server dies */ err = mach_port_request_notification (mach_task_self (), reply_port, MACH_NOTIFY_NO_SENDERS, 1, reply_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &old_notification_target); if (!err && old_notification_target != MACH_PORT_NULL) { mach_port_deallocate (mach_task_self (), old_notification_target); } } if (!err) { cinfo->server_died = 0; err = mach_msg_server_once (k5_ipc_reply_demux, K5_IPC_MAX_MSG_SIZE, reply_port, MACH_MSG_TIMEOUT_NONE); if (!err && cinfo->server_died) { err = ENOTCONN; } } if (err == BOOTSTRAP_UNKNOWN_SERVICE && !in_launch_server) { err = 0; /* If server is not running just return an empty stream. */ } if (!err) { *out_reply_stream = cinfo->reply_stream; cinfo->reply_stream = NULL; } if (reply_port != MACH_PORT_NULL) { mach_port_destroy (mach_task_self (), reply_port); } if (ool_request_length) { vm_deallocate (mach_task_self (), (vm_address_t) ool_request, ool_request_length); } if (cinfo && cinfo->reply_stream) { k5_ipc_stream_release (cinfo->reply_stream); cinfo->reply_stream = NULL; } return err; }
int32_t k5_ipc_server_listen_loop (void) { /* Run the Mach IPC listen loop. * This will call k5_ipc_server_create_client_connection for new clients * and k5_ipc_server_request for existing clients */ kern_return_t err = KERN_SUCCESS; char *service = NULL; char *lookup = NULL; mach_port_t lookup_port = MACH_PORT_NULL; mach_port_t boot_port = MACH_PORT_NULL; mach_port_t previous_notify_port = MACH_PORT_NULL; if (!err) { err = k5_ipc_server_get_lookup_and_service_names (&lookup, &service); } if (!err) { /* Get the bootstrap port */ err = task_get_bootstrap_port (mach_task_self (), &boot_port); } if (!err) { /* We are an on-demand server so our lookup port already exists. */ err = bootstrap_check_in (boot_port, lookup, &lookup_port); } if (!err) { /* We are an on-demand server so our service port already exists. */ err = bootstrap_check_in (boot_port, service, &g_service_port); } if (!err) { /* Create the port set that the server will listen on */ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &g_notify_port); } if (!err) { /* Ask for notification when the server port has no more senders * A send-once right != a send right so our send-once right will * not interfere with the notification */ err = mach_port_request_notification (mach_task_self (), g_service_port, MACH_NOTIFY_NO_SENDERS, true, g_notify_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous_notify_port); } if (!err) { /* Create the port set that the server will listen on */ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_PORT_SET, &g_listen_port_set); } if (!err) { /* Add the lookup port to the port set */ err = mach_port_move_member (mach_task_self (), lookup_port, g_listen_port_set); } if (!err) { /* Add the service port to the port set */ err = mach_port_move_member (mach_task_self (), g_service_port, g_listen_port_set); } if (!err) { /* Add the notify port to the port set */ err = mach_port_move_member (mach_task_self (), g_notify_port, g_listen_port_set); } while (!err && !g_ready_to_quit) { /* Handle one message at a time so we can check to see if * the server wants to quit */ err = mach_msg_server_once (k5_ipc_request_demux, K5_IPC_MAX_MSG_SIZE, g_listen_port_set, MACH_MSG_OPTION_NONE); } /* Clean up the ports and strings */ if (MACH_PORT_VALID (g_notify_port)) { mach_port_destroy (mach_task_self (), g_notify_port); g_notify_port = MACH_PORT_NULL; } if (MACH_PORT_VALID (g_listen_port_set)) { mach_port_destroy (mach_task_self (), g_listen_port_set); g_listen_port_set = MACH_PORT_NULL; } if (MACH_PORT_VALID (boot_port)) { mach_port_deallocate (mach_task_self (), boot_port); } free (service); free (lookup); return err; }
static IOReturn vnodeNotificationHandler(io_connect_t connection) { kern_return_t kr; VnodeWatcherData_t vdata; UInt32 dataSize; IODataQueueMemory *queueMappedMemory; vm_size_t queueMappedMemorySize; vm_address_t address = nil; vm_size_t size = 0; unsigned int msgType = 1; // family-defined port type (arbitrary) mach_port_t recvPort; // allocate a Mach port to receive notifications from the IODataQueue if (!(recvPort = IODataQueueAllocateNotificationPort())) { fprintf(stderr, "%s: failed to allocate notification port\n", PROGNAME); return kIOReturnError; } // this will call registerNotificationPort() inside our user client class kr = IOConnectSetNotificationPort(connection, msgType, recvPort, 0); if (kr != kIOReturnSuccess) { fprintf(stderr, "%s: failed to register notification port (%d)\n", PROGNAME, kr); mach_port_destroy(mach_task_self(), recvPort); return kr; } // this will call clientMemoryForType() inside our user client class kr = IOConnectMapMemory(connection, kIODefaultMemoryType, mach_task_self(), &address, &size, kIOMapAnywhere); if (kr != kIOReturnSuccess) { fprintf(stderr, "%s: failed to map memory (%d)\n", PROGNAME, kr); mach_port_destroy(mach_task_self(), recvPort); return kr; } queueMappedMemory = (IODataQueueMemory *)address; queueMappedMemorySize = size; while (IODataQueueWaitForAvailableData(queueMappedMemory, recvPort) == kIOReturnSuccess) { while (IODataQueueDataAvailable(queueMappedMemory)) { dataSize = sizeof(vdata); kr = IODataQueueDequeue(queueMappedMemory, &vdata, &dataSize); if (kr == kIOReturnSuccess) { if (*(UInt8 *)&vdata == kt_kStopListeningToMessages) goto exit; printf("\"%s\" %s %s %lu(%s) ", vdata.path, vtype_name(vdata.v_type), vtag_name(vdata.v_tag), vdata.pid, vdata.p_comm); action_print(vdata.action, (vdata.v_type & VDIR)); } else fprintf(stderr, "*** error in receiving data (%d)\n", kr); } } exit: kr = IOConnectUnmapMemory(connection, kIODefaultMemoryType, mach_task_self(), address); if (kr != kIOReturnSuccess) fprintf(stderr, "%s: failed to unmap memory (%d)\n", PROGNAME, kr); mach_port_destroy(mach_task_self(), recvPort); return kr; }
/* Create a new trivfs control port, with underlying node UNDERLYING, and return it in CONTROL. CONTROL_CLASS & CONTROL_BUCKET are passed to the ports library to create the control port, and PROTID_CLASS & PROTID_BUCKET are used when creating ports representing opens of this node. */ error_t trivfs_create_control (mach_port_t underlying, struct port_class *control_class, struct port_bucket *control_bucket, struct port_class *protid_class, struct port_bucket *protid_bucket, struct trivfs_control **control) { error_t err; /* Perhaps allocate, and perhaps add the specified port classes the ones recognized by trivfs. */ err = trivfs_add_control_port_class (&control_class); if (! err) err = trivfs_add_protid_port_class (&protid_class); else protid_class = 0; /* Perhaps allocate new port buckets. */ if (! err) err = trivfs_add_port_bucket (&control_bucket); else control_bucket = 0; if (! err) { if (! protid_bucket) /* By default, use the same port bucket for both. */ protid_bucket = control_bucket; err = trivfs_add_port_bucket (&protid_bucket); } else protid_bucket = 0; if (! err) err = ports_create_port (control_class, control_bucket, sizeof (struct trivfs_control), control); if (! err) { (*control)->underlying = underlying; (*control)->protid_class = protid_class; (*control)->protid_bucket = protid_bucket; err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &(*control)->filesys_id); if (err) { ports_port_deref (*control); goto out; } err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &(*control)->file_id); if (err) { mach_port_destroy (mach_task_self (), (*control)->filesys_id); ports_port_deref (*control); goto out; } (*control)->hook = 0; mutex_init (&(*control)->lock); } out: if (err) { trivfs_remove_control_port_class (control_class); trivfs_remove_protid_port_class (protid_class); trivfs_remove_port_bucket (control_bucket); trivfs_remove_port_bucket (protid_bucket); } return err; }
IOReturn vnodeNotificationHandler(io_connect_t connection) { kern_return_t kr; DldDriverDataLog vdata; uint32_t dataSize; IODataQueueMemory *queueMappedMemory; vm_size_t queueMappedMemorySize; #if !__LP64__ || defined(IOCONNECT_MAPMEMORY_10_6) vm_address_t address = nil; vm_size_t size = 0; #else mach_vm_address_t address = NULL; mach_vm_size_t size = 0x0; #endif mach_port_t recvPort; // // allocate a Mach port to receive notifications from the IODataQueue // if (!(recvPort = IODataQueueAllocateNotificationPort())) { PRINT_ERROR(("failed to allocate notification port\n")); return kIOReturnError; } // // this will call registerNotificationPort() inside our user client class // kr = IOConnectSetNotificationPort(connection, kt_DldNotifyTypeLog, recvPort, 0); if (kr != kIOReturnSuccess) { PRINT_ERROR(("failed to register notification port (%d)\n", kr)); mach_port_destroy(mach_task_self(), recvPort); return kr; } // // this will call clientMemoryForType() inside our user client class // kr = IOConnectMapMemory( connection, kt_DldNotifyTypeLog, mach_task_self(), &address, &size, kIOMapAnywhere ); if (kr != kIOReturnSuccess) { PRINT_ERROR(("failed to map memory (%d)\n",kr)); mach_port_destroy(mach_task_self(), recvPort); return kr; } queueMappedMemory = (IODataQueueMemory *)address; queueMappedMemorySize = size; printf("before the while loop\n"); //bool first_iteration = true; while( kIOReturnSuccess == IODataQueueWaitForAvailableData(queueMappedMemory, recvPort) ) { //first_iteration = false; //printf("a buffer has been received\n"); while (IODataQueueDataAvailable(queueMappedMemory)) { dataSize = sizeof(vdata); kr = IODataQueueDequeue(queueMappedMemory, &vdata, &dataSize); if (kr == kIOReturnSuccess) { if (*(UInt32 *)&vdata == kt_DldStopListeningToMessages) goto exit; printf( "\"%s\" %s %i(%s) ", vdata.Fsd.path, vtype_name(vdata.Fsd.v_type), (int)vdata.Fsd.pid, vdata.Fsd.p_comm ); if( DLD_KAUTH_SCOPE_VNODE_ID == vdata.Fsd.scopeID ) vnode_action_print(vdata.Fsd.action, (vdata.Fsd.v_type & VDIR)); else fileop_action_print(vdata.Fsd.action, (vdata.Fsd.v_type & VDIR)); } else { PRINT_ERROR(("*** error in receiving data (%d)\n", kr)); } }// end while }// end while exit: kr = IOConnectUnmapMemory( connection, kt_DldNotifyTypeLog, mach_task_self(), address ); if (kr != kIOReturnSuccess){ PRINT_ERROR(("failed to unmap memory (%d)\n", kr)); } mach_port_destroy(mach_task_self(), recvPort); return kr; }
/* Implement the object termination call from the kernel as described in <mach/memory_object.defs>. */ kern_return_t _pager_seqnos_memory_object_terminate (mach_port_t object, mach_port_seqno_t seqno, mach_port_t control, mach_port_t name) { struct pager *p; p = ports_lookup_port (0, object, _pager_class); if (!p) return EOPNOTSUPP; mutex_lock (&p->interlock); _pager_wait_for_seqno (p, seqno); if (control != p->memobjcntl) { printf ("incg terminate: wrong control port"); goto out; } if (name != p->memobjname) { printf ("incg terminate: wrong name port"); goto out; } while (p->noterm) { p->termwaiting = 1; condition_wait (&p->wakeup, &p->interlock); } /* Destry the ports we received; mark that in P so that it doesn't bother doing it again. */ mach_port_destroy (mach_task_self (), control); mach_port_destroy (mach_task_self (), name); p->memobjcntl = p->memobjname = MACH_PORT_NULL; _pager_free_structure (p); #ifdef KERNEL_INIT_RACE if (p->init_head) { struct pending_init *i = p->init_head; p->init_head = i->next; if (!i->next) p->init_tail = 0; p->memobjcntl = i->control; p->memobjname = i->name; memory_object_ready (i->control, p->may_cache, p->copy_strategy); p->pager_state = NORMAL; free (i); } #endif out: _pager_release_seqno (p, seqno); mutex_unlock (&p->interlock); ports_port_deref (p); return 0; }
int main (int argc, char **argv) { kern_return_t kr; mach_port_name_t labelHandle, portName; char *textlabel, textbuf[512]; int ch, count, dealloc, destroy, getnew, getport; int gettask, reqlabel, i; count = 1; dealloc = destroy = getnew = gettask = getport = reqlabel = 0; /* XXX - add port lh and request lh */ while ((ch = getopt(argc, argv, "c:dn:prtx")) != -1) { switch (ch) { case 'c': count = atoi(optarg); break; case 'd': dealloc = 1; break; case 'n': getnew = 1; textlabel = optarg; break; case 'p': getport = 1; break; case 'r': reqlabel = 1; break; case 't': gettask = 1; break; case 'x': destroy = 1; break; default: usage(); } } if (getnew + gettask + getport + reqlabel != 1) usage(); /* Get a new port. */ if (getport || reqlabel) { kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &portName); if (kr != KERN_SUCCESS) { mach_error("mach_port_allocate():", kr); exit(1); } } for (i = 0; i < count; i++) { if (getnew) { /* Get a new label handle */ kr = mac_label_new(mach_task_self(), &labelHandle, textlabel); if (kr != KERN_SUCCESS) { fprintf(stderr, "mac_label_new(%s)", textlabel); mach_error(":", kr); exit(1); } printf("new label handle: 0x%x (%s)\n", labelHandle, textlabel); } if (gettask) { /* Get label handle for our task */ kr = mach_get_task_label(mach_task_self(), &labelHandle); if (kr != KERN_SUCCESS) { mach_error("mach_get_task_label():", kr); exit(1); } kr = mach_get_task_label_text(mach_task_self(), "sebsd", textbuf); if (kr != KERN_SUCCESS) { mach_error("mach_get_task_label_text():", kr); exit(1); } printf("task label handle: 0x%x (%s)\n", labelHandle, textbuf); } if (getport) { /* Get a label handle for the new port */ kr = mach_get_label(mach_task_self(), portName, &labelHandle); if (kr != KERN_SUCCESS) { mach_error("mach_get_label():", kr); exit(1); } kr = mach_get_label_text(mach_task_self(), labelHandle, "sebsd", textbuf); if (kr != KERN_SUCCESS) { mach_error("mach_get_label_text():", kr); exit(1); } printf("port label handle: 0x%x (%s)\n", labelHandle, textbuf); } if (reqlabel) { /* Compute label handle based on port and task. */ kr = mac_request_label(mach_task_self(), portName, mach_task_self(), "mach_task", &labelHandle); if (kr != KERN_SUCCESS) { mach_error("mac_request_label():", kr); exit(1); } kr = mach_get_label_text(mach_task_self(), labelHandle, "sebsd", textbuf); if (kr != KERN_SUCCESS) { mach_error("mach_get_label_text():", kr); exit(1); } printf("computed label handle: 0x%x (%s)\n", labelHandle, textbuf); } if (dealloc) { /* Deallocate the label handle */ kr = mach_port_deallocate(mach_task_self(), labelHandle); if (kr != KERN_SUCCESS) { mach_error("mach_port_deallocate:", kr); exit(1); } printf("successfully deallocated the label handle\n"); } if (destroy) { /* Destroy the label handle */ kr = mach_port_destroy(mach_task_self(), labelHandle); if (kr != KERN_SUCCESS) { mach_error("mach_port_destroy:", kr); exit(1); } printf("successfully destroyed the label handle\n"); } } exit(0); }