static void setExceptionThread(void) { kern_return_t r; // char *nullAddr = NULL; bailOut = FALSE; /* save the old exception port for this task */ r = task_get_exception_port(task_self(), &(ports.old_exc_port)); if (r != KERN_SUCCESS) { mach_error("task_get_exception_port",r); exit(1); } if (!ports.exc_port) { /* create a new exception port for this task */ r = port_allocate(task_self(), &(ports.exc_port)); if (r != KERN_SUCCESS) { mach_error("port_allocate",r); exit(1); } /* Fork the thread that listens to the exception port. */ cthread_detach(cthread_fork((cthread_fn_t)exc_thread,(any_t)&ports)); ports.clear_port = thread_reply(); } /* install the new exception port for this task */ r = task_set_exception_port(task_self(), (ports.exc_port)); if (r != KERN_SUCCESS) { mach_error("task_set_exception_port",r); exit(1); } }
int main (int argc, char **argv) { int nthreads; int fail; if (argc > 2) { fprintf (stderr, "%s [num-threads]\n", argv[0]); exit (1); } if (argc == 1) nthreads = 4; else nthreads = atoi (argv[1]); if (!nthreads) nthreads = 4; authserver = getauth (); maptime_map (0, 0, &mapped_time); main_address.sin_family = AF_INET; main_address.sin_port = htons (NFS_PORT); main_address.sin_addr.s_addr = INADDR_ANY; pmap_address.sin_family = AF_INET; pmap_address.sin_port = htons (PMAPPORT); pmap_address.sin_addr.s_addr = INADDR_ANY; main_udp_socket = socket (PF_INET, SOCK_DGRAM, 0); pmap_udp_socket = socket (PF_INET, SOCK_DGRAM, 0); fail = bind (main_udp_socket, (struct sockaddr *)&main_address, sizeof (struct sockaddr_in)); if (fail) error (1, errno, "Binding NFS socket"); fail = bind (pmap_udp_socket, (struct sockaddr *)&pmap_address, sizeof (struct sockaddr_in)); if (fail) error (1, errno, "Binding PMAP socket"); init_filesystems (); cthread_detach (cthread_fork ((cthread_fn_t) server_loop, (any_t)(intptr_t) pmap_udp_socket)); while (nthreads--) cthread_detach (cthread_fork ((cthread_fn_t) server_loop, (any_t)(intptr_t) main_udp_socket)); for (;;) { sleep (1); scan_fhs (); scan_creds (); scan_replies (); } }
static error_t hurdio_init (void) { condition_init (&hurdio_writer_condition); condition_init (&hurdio_assert_dtr_condition); cthread_detach (cthread_fork (hurdio_reader_loop, 0)); cthread_detach (cthread_fork (hurdio_writer_loop, 0)); return 0; }
static error_t pc_mouse_start (void *handle) { error_t err; char device_name[9]; int devnum = majordev << 3 | minordev; device_t device_master; sprintf (device_name, "mouse%d", devnum); err = get_privileged_ports (0, &device_master); if (err) return err; err = device_open (device_master, D_READ, device_name, &mousedev); mach_port_deallocate (mach_task_self (), device_master); if (err) return ENODEV; err = driver_add_input (&pc_mouse_ops, NULL); if (err) { device_close (mousedev); mach_port_deallocate (mach_task_self (), mousedev); return err; } cthread_detach (cthread_fork (input_loop, NULL)); if (repeater_node) setrepeater (repeater_node); return 0; }
void root_update_init() { mutex_init (&update_lock); rwlock_init (&update_rwlock); condition_init (&update_wakeup); cthread_detach (cthread_fork ( (cthread_fn_t)_root_update_thread, 0)); }
/* * This message server catches server exceptions. It runs in a dedicated thread. */ void * server_exception_catcher( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; #define MSG_BUFFER_SIZE 8192 union request_msg { mach_msg_header_t hdr; mig_reply_error_t death_pill; char space[MSG_BUFFER_SIZE]; } *msg_buffer_1, *msg_buffer_2; mach_msg_header_t *request; mig_reply_error_t *reply; cthread_set_name(cthread_self(), "server exc catcher"); server_thread_set_priv_data(cthread_self(), &priv_data); kr = vm_allocate(mach_task_self(), (vm_address_t *) &msg_buffer_1, 2 * sizeof *msg_buffer_1, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("server_exception_catcher: vm_allocate")); panic("server_exception_catcher"); } msg_buffer_2 = msg_buffer_1 + 1; request = &msg_buffer_1->hdr; reply = &msg_buffer_2->death_pill; do { kr = mach_msg(request, MACH_RCV_MSG, 0, sizeof *msg_buffer_1, server_exception_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("server_exception_catcher: mach_msg")); panic("server_exception_catcher: receive"); } if (exc_server(request, &reply->Head)) {} else { printk("server_exception_catcher: invalid message" "(id = %d = 0x%x)\n", request->msgh_id, request->msgh_id); } panic("server_exception_catcher: what now ?"); } while (1); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ return (void *) 0; }
void * server_thread_bootstrap( void *dummy) { struct server_thread_priv_data priv_data; void **args; int (*fn)(void *); void *arg; struct task_struct *tsk; int ret; osfmach3_jmp_buf jmp_buf; extern int sys_exit(int error_code); args = (void **) dummy; fn = (int(*)(void *)) args[0]; arg = args[1]; tsk = (struct task_struct *) args[2]; cthread_set_name(cthread_self(), "kernel thread"); server_thread_set_priv_data(cthread_self(), &priv_data); priv_data.current_task = tsk; #if 0 /* XXX ? */ tsk->osfmach3.thread->mach_thread_port = mach_thread_self(); #endif tsk->osfmach3.thread->under_server_control = TRUE; tsk->osfmach3.thread->active_on_cthread = cthread_self(); uniproc_enter(); priv_data.jmp_buf = &jmp_buf; if (osfmach3_setjmp(priv_data.jmp_buf)) { /* * The kernel thread is being terminated. */ uniproc_exit(); cthread_set_name(cthread_self(), "dead kernel thread"); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ panic("server_thread_bootstrap: the zombie cthread walks !\n"); } kfree(args); while (current->state != TASK_RUNNING) { schedule(); /* wait until we're resumed by our parent */ } ret = (*fn)(arg); sys_exit(ret); /*NOTREACHED*/ panic("server_thread_bootstrap: the zombie kernel thread walks !\n"); }
void diskfs_start_disk_pager (struct user_pager_info *upi, struct port_bucket *pager_bucket, int may_cache, size_t size, void **image) { error_t err; mach_port_t disk_pager_port; /* Make a thread to service paging requests. */ cthread_detach (cthread_fork ((cthread_fn_t) service_paging_requests, (any_t)pager_bucket)); /* Create the pager. */ diskfs_disk_pager = pager_create (upi, pager_bucket, may_cache, MEMORY_OBJECT_COPY_NONE); assert (diskfs_disk_pager); /* Get a port to the disk pager. */ disk_pager_port = pager_get_port (diskfs_disk_pager); mach_port_insert_right (mach_task_self (), disk_pager_port, disk_pager_port, MACH_MSG_TYPE_MAKE_SEND); /* Now map the disk image. */ err = vm_map (mach_task_self (), (vm_address_t *)image, size, 0, 1, disk_pager_port, 0, 0, VM_PROT_READ | (diskfs_readonly ? 0 : VM_PROT_WRITE), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); if (err) error (2, err, "cannot vm_map whole disk"); /* Set up the signal preemptor to catch faults on the disk image. */ preemptor.first = (vm_address_t) *image; preemptor.last = ((vm_address_t) *image + size); hurd_preempt_signals (&preemptor); /* We have the mapping; we no longer need the send right. */ mach_port_deallocate (mach_task_self (), disk_pager_port); }
/* Establish a thread to sync the filesystem every INTERVAL seconds, or never, if INTERVAL is zero. If an error occurs creating the thread, it is returned, otherwise 0. Subsequent calls will create a new thread and (eventually) get rid of the old one; the old thread won't do any more syncs, regardless. */ error_t diskfs_set_sync_interval (int interval) { error_t err = 0; if (! pi) { err = ports_create_port (diskfs_control_class, diskfs_port_bucket, sizeof (struct port_info), &pi); if (err) return err; } err = ports_inhibit_port_rpcs (pi); if (err) return err; /* Here we just set the new thread; any existing thread will notice when it wakes up and go away silently. */ if (interval == 0) periodic_sync_thread = 0; else { periodic_sync_thread = cthread_fork ((cthread_fn_t)periodic_sync, (any_t)(intptr_t)interval); if (periodic_sync_thread) cthread_detach (periodic_sync_thread); else err = ENOMEM; } if (!err) diskfs_sync_interval = interval; ports_resume_port_rpcs (pi); return err; }
/* * This message server catches user task exceptions. Most user exceptions * will be received on the thread exception port. This server servers * only exceptions from unknown threads or from external debuggers. * It runs in a dedicated thread. */ void * task_exception_catcher( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; #define MSG_BUFFER_SIZE 8192 union request_msg { mach_msg_header_t hdr; mig_reply_error_t death_pill; char space[MSG_BUFFER_SIZE]; } *msg_buffer_1, *msg_buffer_2; mach_msg_header_t *request; mig_reply_error_t *reply; mach_msg_header_t *tmp; cthread_set_name(cthread_self(), "task exc catcher"); server_thread_set_priv_data(cthread_self(), &priv_data); uniproc_enter(); kr = vm_allocate(mach_task_self(), (vm_address_t *) &msg_buffer_1, 2 * sizeof *msg_buffer_1, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("task_exception_catcher: vm_allocate")); panic("task_exception_catcher"); } msg_buffer_2 = msg_buffer_1 + 1; request = &msg_buffer_1->hdr; reply = &msg_buffer_2->death_pill; do { uniproc_exit(); kr = mach_msg(request, MACH_RCV_MSG, 0, sizeof *msg_buffer_1, user_trap_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("task_exception_catcher: mach_msg")); panic("task_exception_catcher: receive"); } uniproc_enter(); if (exc_server(request, &reply->Head)) {} else { printk("trap_exception_catcher: invalid message" "(id = %d = 0x%x)\n", request->msgh_id, request->msgh_id); } if (reply->Head.msgh_remote_port == MACH_PORT_NULL) { /* no reply port, just get another request */ continue; } if (!(reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && reply->RetCode == MIG_NO_REPLY) { /* deallocate reply port right */ (void) mach_port_deallocate(mach_task_self(), reply->Head.msgh_remote_port); continue; } /* Send reply to request and receive another */ uniproc_exit(); kr = mach_msg(&reply->Head, MACH_SEND_MSG, reply->Head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); uniproc_enter(); if (kr != MACH_MSG_SUCCESS) { if (kr == MACH_SEND_INVALID_DEST) { /* deallocate reply port right */ /* XXX should destroy reply msg */ (void) mach_port_deallocate(mach_task_self(), reply->Head.msgh_remote_port); } else { MACH3_DEBUG(0, kr, ("mach_msg")); panic("task_exception_catcher: send"); } } tmp = request; request = (mach_msg_header_t *) reply; reply = (mig_reply_error_t *) tmp; } while (1); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ return (void *) 0; }
/* Send signal SIGNO to MSGPORT with REFPORT as reference. Don't block in any fashion. */ void send_signal (mach_port_t msgport, int signal, mach_port_t refport) { error_t err; /* This message buffer might be modified by mach_msg in some error cases, so we cannot safely use a shared static buffer. */ struct msg_sig_post_request message = { { /* Message header: */ (MACH_MSGH_BITS_COMPLEX | MACH_MSGH_BITS (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE)), /* msgh_bits */ sizeof message, /* msgh_size */ msgport, /* msgh_remote_port */ MACH_PORT_NULL, /* msgh_local_port */ 0, /* msgh_seqno */ RPCID_SIG_POST, /* msgh_id */ }, { /* Type descriptor for signo */ MACH_MSG_TYPE_INTEGER_32, /* msgt_name */ 32, /* msgt_size */ 1, /* msgt_number */ 1, /* msgt_inline */ 0, /* msgt_longform */ 0, /* msgt_deallocate */ 0, /* msgt_unused */ }, /* Signal number */ signal, /* Type descriptor for sigcode */ { MACH_MSG_TYPE_INTEGER_32, /* msgt_name */ 32, /* msgt_size */ 1, /* msgt_number */ 1, /* msgt_inline */ 0, /* msgt_longform */ 0, /* msgt_deallocate */ 0, /* msgt_unused */ }, /* Sigcode */ 0, { /* Type descriptor for refport */ MACH_MSG_TYPE_COPY_SEND, /* msgt_name */ 32, /* msgt_size */ 1, /* msgt_number */ 1, /* msgt_inline */ 0, /* msgt_longform */ 0, /* msgt_deallocate */ 0, /* msgt_unused */ }, refport }; err = mach_msg ((mach_msg_header_t *)&message, MACH_SEND_MSG|MACH_SEND_TIMEOUT, sizeof message, 0, MACH_PORT_NULL, 0, MACH_PORT_NULL); switch (err) { case MACH_SEND_TIMED_OUT: /* The send could not complete immediately, and we do not want to block. We'll fork off another thread to do the blocking send. The message buffer was modified by a pseudo-receive operation, so we need to copy its modified contents into a malloc'd buffer. */ { struct msg_sig_post_request *copy = malloc (sizeof *copy); if (copy) { memcpy (copy, &message, sizeof message); cthread_detach (cthread_fork (blocking_message_send, copy)); } break; } /* These are the other codes that mean a pseudo-receive modified the message buffer and we might need to clean up the send rights. None of them should be possible in our usage. */ case MACH_SEND_INTERRUPTED: case MACH_SEND_INVALID_NOTIFY: case MACH_SEND_NO_NOTIFY: case MACH_SEND_NOTIFY_IN_PROGRESS: assert_perror (err); break; default: /* Other errors are safe to ignore. */ break; } }
void * serial_read_thread( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; struct async_struct *info; int line, wait_loop, count; io_buf_ptr_inband_t inbuf; /* 128 chars */ mach_msg_type_number_t data_count; mach_port_t device_port; cthread_set_name(cthread_self(), "serial read"); server_thread_set_priv_data(cthread_self(), &priv_data); line = (int) arg; info = rs_table + line; uniproc_enter(); device_port = info->device_port; for (;;) { data_count = sizeof inbuf; uniproc_exit(); kr = device_read_inband(device_port, 0, /* mode */ 0, /* recnum */ sizeof inbuf, inbuf, &data_count); uniproc_enter(); if (kr == D_OUT_OF_BAND) { serial_handle_oob_event(info, device_port); continue; } else if (kr != D_SUCCESS) { /* Something happened.. simply shutdown the line.. */ if (!((info->flags & ASYNC_CALLOUT_ACTIVE) && (info->flags & ASYNC_CALLOUT_NOHUP))) queue_task_irq_off(&info->tqueue_hangup, &tq_scheduler); uniproc_exit(); cthread_detach(cthread_self()); cthread_exit((void *) 0); /* NEVER REACHED */ } if (data_count <= 0) continue; /* * Its very possible with the Power Mac * to overflow the Linux TTY buffers. * (A serial interrupt can present up to * 8K worth of data in one shot) * * The following loops attempts to give the * PPP line disc. a chance to clear the queue * out. */ for (wait_loop = 0; wait_loop < 6; wait_loop++) { /* Check to make sure the tty did not * go away.. */ if (info->tty == NULL) break; if ((info->tty->flip.count+data_count) < TTY_FLIPBUF_SIZE) break; /* Try to give another thread some time.. */ osfmach3_yield(); } if (info->tty == NULL) continue; count = MIN(TTY_FLIPBUF_SIZE + info->tty->flip.count, data_count); if (count <= 0) continue; info->last_active = jiffies; info->tty->flip.count += count; memcpy(info->tty->flip.char_buf_ptr, inbuf, count); memset(info->tty->flip.flag_buf_ptr, 0, count); info->tty->flip.flag_buf_ptr += count; info->tty->flip.char_buf_ptr += count; queue_task_irq_off(&info->tty->flip.tqueue, &tq_timer); } }