/* thread for calling back to a compressor's memory allocator * Needed for Digital UNIX since it's VM can't handle requests * for large amounts of memory without blocking. The thread * provides a context in which we can call a memory allocator * that may block. */ static void ppp_comp_alloc(comp_state_t *cp) { int len, cmd; unsigned char *compressor_options; thread_t thread; void *(*comp_allocator)(); #if defined(MAJOR_VERSION) && (MAJOR_VERSION <= 2) /* In 2.x and earlier the argument gets passed * in the thread structure itself. Yuck. */ thread = current_thread(); cp = thread->reply_port; thread->reply_port = PORT_NULL; #endif for (;;) { assert_wait((vm_offset_t)&cp->memreq.thread_status, TRUE); thread_block(); if (thread_should_halt(current_thread())) thread_halt_self(); cmd = cp->memreq.cmd; compressor_options = &cp->memreq.comp_opts[0]; len = compressor_options[1]; if (cmd == PPPIO_XCOMP) { cp->memreq.returned_mem = cp->xcomp->comp_alloc(compressor_options, len); if (!cp->memreq.returned_mem) { cp->memreq.thread_status = ENOSR; } else { cp->memreq.thread_status = 0; } } else { cp->memreq.returned_mem = cp->rcomp->decomp_alloc(compressor_options, len); if (!cp->memreq.returned_mem) { cp->memreq.thread_status = ENOSR; } else { cp->memreq.thread_status = 0; } } } }
void ast_taken(void) { thread_t self = current_thread(); ast_t reasons; /* * Interrupts are still disabled. * We must clear need_ast and then enable interrupts. */ reasons = need_ast[cpu_number()]; need_ast[cpu_number()] = AST_ZILCH; (void) spl0(); /* * These actions must not block. */ if (reasons & AST_NETWORK) net_ast(); /* * Make darn sure that we don't call thread_halt_self * or thread_block from the idle thread. */ if (self != current_processor()->idle_thread) { #ifndef MIGRATING_THREADS while (thread_should_halt(self)) thread_halt_self(); #endif /* * One of the previous actions might well have * woken a high-priority thread, so we use * csw_needed in addition to AST_BLOCK. */ if ((reasons & AST_BLOCK) || csw_needed(self, current_processor())) { counter(c_ast_taken_block++); thread_block(thread_exception_return); } } }
void exception_no_server(void) { ipc_thread_t self = current_thread(); /* * If this thread is being terminated, cooperate. */ while (thread_should_halt(self)) thread_halt_self(thread_exception_return); #if 0 if (thread_suspend (self) == KERN_SUCCESS) thread_exception_return (); #endif #if MACH_KDB if (debug_user_with_kdb) { /* * Debug the exception with kdb. * If kdb handles the exception, * then thread_kdb_return won't return. */ db_printf("No exception server, calling kdb...\n"); thread_kdb_return(); } #endif /* MACH_KDB */ /* * All else failed; terminate task. */ (void) task_terminate(self->task); thread_halt_self(thread_exception_return); panic("terminating the task didn't kill us"); /*NOTREACHED*/ }
void exception_raise_continue_slow( mach_msg_return_t mr, ipc_kmsg_t kmsg, mach_port_seqno_t seqno) { ipc_thread_t self = current_thread(); ipc_port_t reply_port = self->ith_port; ipc_mqueue_t reply_mqueue = &reply_port->ip_messages; while (mr == MACH_RCV_INTERRUPTED) { /* * Somebody is trying to force this thread * to a clean point. We must cooperate * and then resume the receive. */ while (thread_should_halt(self)) { /* if thread is about to terminate, release the port */ if (self->ast & AST_TERMINATE) ipc_port_release(reply_port); /* * Use the continuation to release the port in * case the thread is about to halt. */ thread_halt_self(thread_release_and_exception_return); } ip_lock(reply_port); if (!ip_active(reply_port)) { ip_unlock(reply_port); mr = MACH_RCV_PORT_DIED; break; } imq_lock(reply_mqueue); ip_unlock(reply_port); mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE, MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE, FALSE, exception_raise_continue, &kmsg, &seqno); /* reply_mqueue is unlocked */ } ipc_port_release(reply_port); assert((mr == MACH_MSG_SUCCESS) || (mr == MACH_RCV_PORT_DIED)); if (mr == MACH_MSG_SUCCESS) { /* * Consume the reply message. */ ipc_port_release_sonce(reply_port); mr = exception_parse_reply(kmsg); } if ((mr == KERN_SUCCESS) || (mr == MACH_RCV_PORT_DIED)) { thread_exception_return(); /*NOTREACHED*/ } if (self->ith_exc != KERN_SUCCESS) { exception_try_task(self->ith_exc, self->ith_exc_code, self->ith_exc_subcode); /*NOTREACHED*/ } exception_no_server(); /*NOTREACHED*/ }