static void fault_handler (int sig, long int sigcode, struct sigcontext *scp) { jmp_buf *env = cthread_data (cthread_self ()); error_t err; #ifndef NDEBUG if (!env) { error (0, 0, "BUG: unexpected fault on disk image (%d, %#lx) in [%#lx,%#lx)" " eip %#zx err %#x", sig, sigcode, preemptor.first, preemptor.last, scp->sc_pc, scp->sc_error); assert (scp->sc_error == EKERN_MEMORY_ERROR); err = pager_get_error (diskfs_disk_pager, sigcode); assert (err); assert_perror (err); } #endif /* Clear the record, since the faulting thread will not. */ cthread_set_data (cthread_self (), 0); /* Fetch the error code from the pager. */ assert (scp->sc_error == EKERN_MEMORY_ERROR); err = pager_get_error (diskfs_disk_pager, sigcode); assert (err); /* Make `diskfault_catch' return the error code. */ longjmp (*env, err); }
void* OSThread::_Entry(void *inThread) //static #endif { OSThread* theThread = (OSThread*)inThread; #ifdef __Win32__ BOOL theErr = ::TlsSetValue(sThreadStorageIndex, theThread); Assert(theErr == TRUE); #elif __PTHREADS__ theThread->fThreadID = (pthread_t)pthread_self(); pthread_setspecific(OSThread::gMainKey, theThread); #else theThread->fThreadID = (UInt32)cthread_self(); cthread_set_data(cthread_self(), (any_t)theThread); #endif theThread->SwitchPersonality(); // // Run the thread theThread->Entry(); #ifdef __Win32__ return 0; #else return NULL; #endif }
/* * This message server catches server exceptions. It runs in a dedicated thread. */ void * server_exception_catcher( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; #define MSG_BUFFER_SIZE 8192 union request_msg { mach_msg_header_t hdr; mig_reply_error_t death_pill; char space[MSG_BUFFER_SIZE]; } *msg_buffer_1, *msg_buffer_2; mach_msg_header_t *request; mig_reply_error_t *reply; cthread_set_name(cthread_self(), "server exc catcher"); server_thread_set_priv_data(cthread_self(), &priv_data); kr = vm_allocate(mach_task_self(), (vm_address_t *) &msg_buffer_1, 2 * sizeof *msg_buffer_1, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("server_exception_catcher: vm_allocate")); panic("server_exception_catcher"); } msg_buffer_2 = msg_buffer_1 + 1; request = &msg_buffer_1->hdr; reply = &msg_buffer_2->death_pill; do { kr = mach_msg(request, MACH_RCV_MSG, 0, sizeof *msg_buffer_1, server_exception_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("server_exception_catcher: mach_msg")); panic("server_exception_catcher: receive"); } if (exc_server(request, &reply->Head)) {} else { printk("server_exception_catcher: invalid message" "(id = %d = 0x%x)\n", request->msgh_id, request->msgh_id); } panic("server_exception_catcher: what now ?"); } while (1); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ return (void *) 0; }
void * server_thread_bootstrap( void *dummy) { struct server_thread_priv_data priv_data; void **args; int (*fn)(void *); void *arg; struct task_struct *tsk; int ret; osfmach3_jmp_buf jmp_buf; extern int sys_exit(int error_code); args = (void **) dummy; fn = (int(*)(void *)) args[0]; arg = args[1]; tsk = (struct task_struct *) args[2]; cthread_set_name(cthread_self(), "kernel thread"); server_thread_set_priv_data(cthread_self(), &priv_data); priv_data.current_task = tsk; #if 0 /* XXX ? */ tsk->osfmach3.thread->mach_thread_port = mach_thread_self(); #endif tsk->osfmach3.thread->under_server_control = TRUE; tsk->osfmach3.thread->active_on_cthread = cthread_self(); uniproc_enter(); priv_data.jmp_buf = &jmp_buf; if (osfmach3_setjmp(priv_data.jmp_buf)) { /* * The kernel thread is being terminated. */ uniproc_exit(); cthread_set_name(cthread_self(), "dead kernel thread"); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ panic("server_thread_bootstrap: the zombie cthread walks !\n"); } kfree(args); while (current->state != TASK_RUNNING) { schedule(); /* wait until we're resumed by our parent */ } ret = (*fn)(arg); sys_exit(ret); /*NOTREACHED*/ panic("server_thread_bootstrap: the zombie kernel thread walks !\n"); }
struct task_struct * get_current_task(void) { struct server_thread_priv_data *priv_datap; struct task_struct *current_task; priv_datap = server_thread_get_priv_data(cthread_self()); current_task = priv_datap->current_task; if (current_task->osfmach3.thread != init_task.osfmach3.thread) { ASSERT(current_task->osfmach3.thread->active_on_cthread == cthread_self()); } return current_task; }
void uniproc_enter(void) { struct server_thread_priv_data *priv_datap; #if UNIPROC_PREEMPTION if (!mutex_try_lock(&uniproc_mutex)) { priv_datap = server_thread_get_priv_data(cthread_self()); if (priv_datap->preemptive && uniproc_allow_preemption) { /* prioritary thread: try to preempt */ while (!uniproc_try_enter() && !uniproc_preempt()) { server_thread_yield(1); /* yield for 1 ms */ } /* we're all set */ return; } else { /* just block and wait for our turn */ mutex_lock(&uniproc_mutex); } } #else /* UNIPROC_PREEMPTION */ if (use_antechamber_mutex) { priv_datap = server_thread_get_priv_data(cthread_self()); if (priv_datap->current_task && priv_datap->current_task->mm == &init_mm) { /* * We're working for a system thread: carry on. */ mutex_lock(&uniproc_mutex); } else { /* * We're working for a user thread: we don't want * loads of user threads contending with system * threads on the uniproc mutex, so take the * uniproc_antechamber_mutex first to ensure that * only one user thread will be contending with * system threads at a given time. */ mutex_lock(&uniproc_antechamber_mutex); mutex_lock(&uniproc_mutex); mutex_unlock(&uniproc_antechamber_mutex); } } else { mutex_lock(&uniproc_mutex); } #endif /* UNIPROC_PREEMPTION */ uniproc_has_entered(); }
/* * Set private data associated with given key * Returns 0 if successful and returns -1 otherwise. */ int cthread_setspecific(cthread_key_t key, void *value) { register int i; register cthread_t self; register void **thread_data; if (key < CTHREAD_KEY_NULL || key >= cthread_key) return(-1); self = cthread_self(); thread_data = (void **)(self->private_data); if (thread_data != NULL) thread_data[key] = value; else { /* * Allocate and initialize thread data table, * point cthread_data at it, and then set the * data for the given key with the given value. */ thread_data = malloc(CTHREAD_KEY_MAX * sizeof(void *)); if (thread_data == NULL) { printf("cthread_setspecific: malloc failed\n"); return(-1); } self->private_data = thread_data; for (i = 0; i < CTHREAD_KEY_MAX; i++) thread_data[i] = CTHREAD_DATA_VALUE_NULL; thread_data[key] = value; } return(0); }
boolean_t uniproc_preempt(void) { if (mutex_try_lock(&uniproc_preemption_mutex)) { uniproc_change_current(current, get_current_task()); #if CONFIG_OSFMACH3_DEBUG { struct server_thread_priv_data *priv_datap; priv_datap = server_thread_get_priv_data(cthread_self()); if (priv_datap->preemptive) { /* * We actually preempted another thread... * account for this glorious deed ! */ uniproc_preemptions++; } else { /* * It's just the preemptible thread * preempting itself */ } } #endif /* CONFIG_OSFMACH3_DEBUG */ return TRUE; } return FALSE; }
void uniproc_switch_to( struct task_struct *old_task, struct task_struct *new_task) { UNIPROC_ASSERT(uniproc_holder_cthread == cthread_self()); ASSERT(old_task == FIRST_TASK || new_task == FIRST_TASK); uniproc_change_current(old_task, new_task); }
boolean_t holding_uniproc(void) { #ifdef CONFIG_OSFMACH3_DEBUG if (uniproc_holder_cthread != cthread_self()) return FALSE; else #endif /* CONFIG_OSFMACH3_DEBUG */ return TRUE; }
OSThread* OSThread::GetCurrent() { #ifdef __Win32__ return (OSThread *)::TlsGetValue(sThreadStorageIndex); #elif __PTHREADS__ return (OSThread *)pthread_getspecific(OSThread::gMainKey); #else return (OSThread*)cthread_data(cthread_self()); #endif }
void uniproc_has_entered(void) { UNIPROC_ASSERT(uniproc_holder == NULL); UNIPROC_ASSERT(uniproc_holder_cthread == NULL); current_set[smp_processor_id()] = get_current_task(); #if CONFIG_OSFMACH3_DEBUG uniproc_holder = current; #endif /* CONFIG_OSFMACH3_DEBUG */ uniproc_holder_cthread = cthread_self(); }
int dp_thread_id(void) { default_pager_thread_t *dpt; dpt = (default_pager_thread_t *) cthread_data(cthread_self()); if (dpt != NULL) return dpt->dpt_id; else return -1; }
void uniproc_will_exit(void) { UNIPROC_ASSERT(uniproc_holder == current); UNIPROC_ASSERT(uniproc_holder_cthread == cthread_self()); set_current_task(current); current_set[smp_processor_id()] = (struct task_struct *) NULL; #if CONFIG_OSFMACH3_DEBUG uniproc_holder = NULL; #endif /* CONFIG_OSFMACH3_DEBUG */ uniproc_holder_cthread = NULL; }
static inline #endif ExceptionHandlerStack *findme (void) { ExceptionHandlerStack *stack; cthread_t self = (cthread_t)cthread_self (); for (stack = &Base; stack; stack = stack->next) if (stack->thread == self) return stack; return addme (self); }
/* * Set thread specific "global" variable, * using new POSIX routines. * Crash and burn if the thread given isn't the calling thread. * XXX For compatibility with old cthread_set_data() XXX */ int cthread_set_data(cthread_t t, void *x) { register cthread_t self; self = cthread_self(); if (t == self) return(cthread_setspecific(CTHREAD_KEY_RESERVED, x)); else { ASSERT(t == self); return(-1); } }
void set_current_task( struct task_struct *current_task) { struct server_thread_priv_data *priv_datap; struct task_struct *old_current_task; priv_datap = server_thread_get_priv_data(cthread_self()); old_current_task = priv_datap->current_task; if (old_current_task->osfmach3.thread != init_task.osfmach3.thread) { ASSERT(old_current_task->osfmach3.thread->active_on_cthread == cthread_self()); old_current_task->osfmach3.thread->active_on_cthread = CTHREAD_NULL; } ASSERT(current_task->osfmach3.thread->active_on_cthread == CTHREAD_NULL); if (current_task->osfmach3.thread != init_task.osfmach3.thread) { current_task->osfmach3.thread->active_on_cthread = cthread_self(); } priv_datap->current_task = current_task; }
static __inline__ void uniproc_change_current( struct task_struct *old_task, struct task_struct *new_task) { UNIPROC_ASSERT(uniproc_holder != NULL); UNIPROC_ASSERT(uniproc_holder == old_task); ASSERT(current == old_task); current_set[smp_processor_id()] = new_task; set_current_task(current); #if CONFIG_OSFMACH3_DEBUG uniproc_holder = current; #endif /* CONFIG_OSFMACH3_DEBUG */ uniproc_holder_cthread = cthread_self(); }
/* * Get thread specific "global" variable, * using new POSIX routines. * Crash and burn if the thread given isn't the calling thread. * XXX For compatibility with old cthread_data() XXX */ void * cthread_data(cthread_t t) { register cthread_t self; void *value; self = cthread_self(); if (t == self) { (void)cthread_getspecific(CTHREAD_KEY_RESERVED, &value); return(value); } else { ASSERT(t == self); return(NULL); } }
/* cthreads will reuse a stack and a cthread_id. * We provide this call-in to clean up matters */ static void _NXClearExceptionStack (void) { ExceptionHandlerStack *stack; cthread_t self = (cthread_t)cthread_self (); // Don't allocate a stack for this thread if it doesn't already have one. for (stack = &Base; stack; stack = stack->next) if (stack->thread == self) break; if (stack) { stack->handlerStack = 0; // reset the handler chain stack->altHandlersUsed = 0; // reset the alt handler count } }
void WA_lock(WA_recursiveLock _lock) #endif { CThreadRecursiveLock *lock = (CThreadRecursiveLock *)_lock; cthread_t self = cthread_self(); #ifdef EXTRA_DEBUGGING_LOGS if (_lock != logMutex) WOLog(WO_DBG, "thread %x locking %s from %s:%d", self, lock->name, file, line); #endif mutex_lock(&lock->m); while (lock->lockingThread != self && lock->lockingThread != NULL) condition_wait(&lock->c, &lock->m); lock->lockingThread = self; lock->lockCount++; mutex_unlock(&lock->m); }
/* * Get private data associated with given key * Returns 0 if successful and returns -1 if the key is invalid. * If the calling thread doesn't have a value for the given key, * the value returned is CTHREAD_DATA_VALUE_NULL. */ int cthread_getspecific(cthread_key_t key, void **value) { register cthread_t self; register void **thread_data; *value = CTHREAD_DATA_VALUE_NULL; if (key < CTHREAD_KEY_NULL || key >= cthread_key) return(-1); self = cthread_self(); thread_data = (void **)(self->private_data); if (thread_data != NULL) *value = thread_data[key]; return(0); }
void WA_unlock(WA_recursiveLock _lock) #endif { CThreadRecursiveLock *lock = (CThreadRecursiveLock *)_lock; #ifdef EXTRA_DEBUGGING_LOGS if (_lock != logMutex) WOLog(WO_DBG, "thread unlocking %s from %s:%d", cthread_self(), lock->name, file, line); #endif mutex_lock(&lock->m); lock->lockCount--; if (lock->lockCount == 0) { lock->lockingThread = NULL; condition_signal(&lock->c); } mutex_unlock(&lock->m); }
void uniproc_preemptible(void) { #if UNIPROC_PREEMPTION if (uniproc_allow_preemption) { UNIPROC_ASSERT(uniproc_holder == current); UNIPROC_ASSERT(uniproc_holder_cthread == cthread_self()); #if CONFIG_OSFMACH3_DEBUG uniproc_preemptibles++; #endif /* CONFIG_OSFMACH3_DEBUG */ mutex_unlock(&uniproc_preemption_mutex); } else { uniproc_exit(); } #else /* UNIPROC_PREEMPTION */ uniproc_exit(); #endif /* UNIPROC_PREEMPTION */ }
/* Sync the filesystem (pointed to by the variable CONTROL_PORT above) every INTERVAL seconds, as long as it's in the thread pointed to by the global variable PERIODIC_SYNC_THREAD. */ static void periodic_sync (int interval) { for (;;) { error_t err; struct rpc_info link; /* This acts as a lock against creation of a new sync thread while we are in the process of syncing. */ err = ports_begin_rpc (pi, 0, &link); if (periodic_sync_thread != cthread_self ()) { /* We've been superseded as the sync thread. Just die silently. */ ports_end_rpc (pi, &link); return; } if (! err) { if (! diskfs_readonly) { rwlock_reader_lock (&diskfs_fsys_lock); /* Only sync if we need to, to avoid clearing the clean flag when it's just been set. Any other thread doing a sync will have held the lock while it did its work. */ if (_diskfs_diskdirty) { diskfs_sync_everything (0); diskfs_set_hypermetadata (0, 0); } rwlock_reader_unlock (&diskfs_fsys_lock); } ports_end_rpc (pi, &link); } /* Wait until next time. */ sleep (interval); } }
void default_pager_thread( void *arg) { default_pager_thread_t *dpt; mach_port_t pset; kern_return_t kr; static char here[] = "default_pager_thread"; mach_msg_options_t server_options; dpt = (default_pager_thread_t *)arg; cthread_set_data(cthread_self(), (char *) dpt); /* * Threads handling external objects cannot have * privileges. Otherwise a burst of data-requests for an * external object could empty the free-page queue, * because the fault code only reserves real pages for * requests sent to internal objects. */ if (dpt->dpt_internal) { default_pager_thread_privileges(); pset = default_pager_internal_set; } else { pset = default_pager_external_set; } dpt->dpt_initialized_p = TRUE; /* Ready for requests. */ server_options = MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_SEQNO); for (;;) { kr = mach_msg_server(default_pager_demux_object, default_pager_msg_size, pset, server_options); Panic("mach_msg_server failed"); } }
/* * This message server catches user task exceptions. Most user exceptions * will be received on the thread exception port. This server servers * only exceptions from unknown threads or from external debuggers. * It runs in a dedicated thread. */ void * task_exception_catcher( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; #define MSG_BUFFER_SIZE 8192 union request_msg { mach_msg_header_t hdr; mig_reply_error_t death_pill; char space[MSG_BUFFER_SIZE]; } *msg_buffer_1, *msg_buffer_2; mach_msg_header_t *request; mig_reply_error_t *reply; mach_msg_header_t *tmp; cthread_set_name(cthread_self(), "task exc catcher"); server_thread_set_priv_data(cthread_self(), &priv_data); uniproc_enter(); kr = vm_allocate(mach_task_self(), (vm_address_t *) &msg_buffer_1, 2 * sizeof *msg_buffer_1, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("task_exception_catcher: vm_allocate")); panic("task_exception_catcher"); } msg_buffer_2 = msg_buffer_1 + 1; request = &msg_buffer_1->hdr; reply = &msg_buffer_2->death_pill; do { uniproc_exit(); kr = mach_msg(request, MACH_RCV_MSG, 0, sizeof *msg_buffer_1, user_trap_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("task_exception_catcher: mach_msg")); panic("task_exception_catcher: receive"); } uniproc_enter(); if (exc_server(request, &reply->Head)) {} else { printk("trap_exception_catcher: invalid message" "(id = %d = 0x%x)\n", request->msgh_id, request->msgh_id); } if (reply->Head.msgh_remote_port == MACH_PORT_NULL) { /* no reply port, just get another request */ continue; } if (!(reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && reply->RetCode == MIG_NO_REPLY) { /* deallocate reply port right */ (void) mach_port_deallocate(mach_task_self(), reply->Head.msgh_remote_port); continue; } /* Send reply to request and receive another */ uniproc_exit(); kr = mach_msg(&reply->Head, MACH_SEND_MSG, reply->Head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); uniproc_enter(); if (kr != MACH_MSG_SUCCESS) { if (kr == MACH_SEND_INVALID_DEST) { /* deallocate reply port right */ /* XXX should destroy reply msg */ (void) mach_port_deallocate(mach_task_self(), reply->Head.msgh_remote_port); } else { MACH3_DEBUG(0, kr, ("mach_msg")); panic("task_exception_catcher: send"); } } tmp = request; request = (mach_msg_header_t *) reply; reply = (mig_reply_error_t *) tmp; } while (1); cthread_detach(cthread_self()); cthread_exit((void *) 0); /*NOTREACHED*/ return (void *) 0; }
ldap_pvt_thread_t ldap_pvt_thread_self( void ) { return cthread_self(); }
void WA_yield() { cthread_yield(cthread_self()); }
void * inode_pager_thread( void *arg) { struct server_thread_priv_data priv_data; kern_return_t kr; mach_msg_header_t *in_msg; mach_msg_header_t *out_msg; cthread_set_name(cthread_self(), "inode pager thread"); server_thread_set_priv_data(cthread_self(), &priv_data); /* * The inode pager runs in its own Linux task... */ priv_data.current_task = &inode_pager_task; #if 0 inode_pager_task.osfmach3.thread->active_on_cthread = cthread_self(); #endif /* * Allow this thread to preempt preemptible threads, to solve deadlocks * where the server touches some data that is backed by the inode * pager. See user_copy.c. */ priv_data.preemptive = TRUE; uniproc_enter(); kr = vm_allocate(mach_task_self(), (vm_offset_t *) &in_msg, INODE_PAGER_MESSAGE_SIZE, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("inode_pager_thread: vm_allocate")); panic("inode_pager_thread: can't allocate in_msg"); } kr = vm_allocate(mach_task_self(), (vm_offset_t *) &out_msg, INODE_PAGER_MESSAGE_SIZE, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("inode_pager_thread: vm_allocate")); panic("inode_pager_thread: can't allocate out_msg"); } inode_pager_task.state = TASK_INTERRUPTIBLE; server_thread_blocking(FALSE); for (;;) { kr = mach_msg(in_msg, MACH_RCV_MSG, 0, INODE_PAGER_MESSAGE_SIZE, inode_pager_port_set, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); server_thread_unblocking(FALSE); /* can preempt ! */ inode_pager_task.state = TASK_RUNNING; if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("inode_pager_thread: mach_msg(RCV)")); server_thread_blocking(FALSE); continue; } if (!inode_object_server(in_msg, out_msg)) { printk("inode_pager_thread: invalid msg id 0x%x\n", in_msg->msgh_id); } inode_pager_task.state = TASK_INTERRUPTIBLE; server_thread_blocking(FALSE); if (MACH_PORT_VALID(out_msg->msgh_remote_port)) { kr = mach_msg(out_msg, MACH_SEND_MSG, out_msg->msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kr != MACH_MSG_SUCCESS) { MACH3_DEBUG(1, kr, ("inode_pager_thread: mach_msg(SEND)")); } } } }