void flockfile(FILE * fp) { int idx = file_idx(fp); struct file_lock *p; /* Lock the hash table: */ _SPINLOCK(&hash_lock); /* Check if the static array has not been initialised: */ if (!init_done) { /* Initialise the global array: */ memset(flh,0,sizeof(flh)); /* Flag the initialisation as complete: */ init_done = 1; } /* Get a pointer to any existing lock for the file: */ if ((p = find_lock(idx, fp)) == NULL) { /* * The file is not locked, so this thread can * grab the lock: */ p = do_lock(idx, fp); /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); /* * The file is already locked, so check if the * running thread is the owner: */ } else if (p->owner == _thread_run) { /* * The running thread is already the * owner, so increment the count of * the number of times it has locked * the file: */ p->count++; /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); } else { /* * The file is locked for another thread. * Append this thread to the queue of * threads waiting on the lock. */ TAILQ_INSERT_TAIL(&p->l_head,_thread_run,qe); /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); /* Wait on the FILE lock: */ _thread_kern_sched_state(PS_FILE_WAIT, "", 0); } }
void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); }
int close(int fd) { int ret; /* This is a cancelation point: */ _thread_enter_cancellation_point(); if ((fd < 0) || (fd >= _thread_max_fdtsize) || (fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) { errno = EBADF; ret = -1; } else if ((ret = _FD_LOCK(fd, FD_RDWR_CLOSE, NULL)) != -1) { /* * We need to hold the entry spinlock till after * _thread_sys_close() to stop races caused by the * fd state transition. */ _SPINLOCK(&_thread_fd_table[fd]->lock); _thread_fd_entry_close(fd); /* Close the file descriptor: */ ret = _thread_sys_close(fd); _SPINUNLOCK(&_thread_fd_table[fd]->lock); _FD_UNLOCK(fd, FD_RDWR_CLOSE); } /* No longer in a cancellation point: */ _thread_leave_cancellation_point(); return (ret); }
/* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } }
/* * quick_exit: * * Abandon a process. Execute all quick_exit handlers. */ void quick_exit(int status) { struct quick_exit_fn *fn; if (__isthreaded) _SPINLOCK(&quick_exit_spinlock); for (fn = quick_exit_fns; fn != NULL; fn = fn->next) { fn->func(); } if (__isthreaded) _SPINUNLOCK(&quick_exit_spinlock); _exit(status); }
int ftrylockfile(FILE * fp) { int ret = -1; int idx = file_idx(fp); struct file_lock *p; /* Lock the hash table: */ _SPINLOCK(&hash_lock); /* Get a pointer to any existing lock for the file: */ if ((p = find_lock(idx, fp)) == NULL) { /* * The file is not locked, so this thread can * grab the lock: */ p = do_lock(idx, fp); /* * The file is already locked, so check if the * running thread is the owner: */ } else if (p->owner == _thread_run) { /* * The running thread is already the * owner, so increment the count of * the number of times it has locked * the file: */ p->count++; } else { /* * The file is locked for another thread, * so this try fails. */ p = NULL; } /* Check if the lock was obtained: */ if (p != NULL) /* Return success: */ ret = 0; /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); return (ret); }
static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); }
/* * Fake up a minimal siginfo_t for the given signal unless one is already * pending. The signal number is assumed to be valid. */ void _thread_kill_siginfo(int sig) { struct sigstatus *ss = &_thread_sigq[sig - 1]; _SPINLOCK(&ss->lock); if (ss->pending == 0) { ss->pending = 1; memset(&ss->siginfo, 0, sizeof ss->siginfo); ss->siginfo.si_signo = sig; ss->siginfo.si_code = SI_USER; ss->siginfo.si_errno = errno; ss->siginfo.si_pid = getpid(); } _SPINUNLOCK(&ss->lock); }
static int init_static (pthread_rwlock_t *rwlock) { int ret; _SPINLOCK(&static_init_lock); if (*rwlock == NULL) ret = _pthread_rwlock_init(rwlock, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return (ret); }
int _pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); }
void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } }
/* * at_quick_exit: * * Register a function to be called at quick_exit. */ int at_quick_exit(void (*func)(void)) { struct quick_exit_fn *fn; fn = malloc(sizeof(struct quick_exit_fn)); if (!fn) return (-1); fn->func = func; if (__isthreaded) _SPINLOCK(&quick_exit_spinlock); fn->next = quick_exit_fns; quick_exit_fns = fn; if (__isthreaded) _SPINUNLOCK(&quick_exit_spinlock); return (0); }
static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { /* Make the new owner runnable: */ PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* * Make the new owner runnable: */ PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ tls_set_tcb((*mutex)->m_owner->tcb); errno = EINVAL; tls_set_tcb(curthread->tcb); PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* * Make the new owner runnable: */ PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); }
void funlockfile(FILE * fp) { int idx = file_idx(fp); struct file_lock *p; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the hash table: */ _SPINLOCK(&hash_lock); /* * Get a pointer to the lock for the file and check that * the running thread is the one with the lock: */ if ((p = find_lock(idx, fp)) != NULL && p->owner == _thread_run) { /* * Check if this thread has locked the FILE * more than once: */ if (p->count > 1) /* * Decrement the count of the number of * times the running thread has locked this * file: */ p->count--; else { /* * The running thread will release the * lock now: */ p->count = 0; /* Get the new owner of the lock: */ if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { /* Pop the thread off the queue: */ TAILQ_REMOVE(&p->l_head,p->owner,qe); /* * This is the first lock for the new * owner: */ p->count = 1; /* Allow the new owner to run: */ PTHREAD_NEW_STATE(p->owner,PS_RUNNING); } } } /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); }
int _pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Wake up the signaled thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); }
int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; curthread->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reacquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); /* * Return ETIMEDOUT if the wait * timed out and there wasn't an * error locking the mutex: */ if ((curthread->timeout != 0) && rval == 0) rval = ETIMEDOUT; } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); }
void _thread_arc4_unlock() { _SPINUNLOCK(&arc4_lock); }
void _thread_atexit_unlock() { _SPINUNLOCK(&atexit_lock); }
void _thread_malloc_unlock() { _SPINUNLOCK(&malloc_lock); }
int _pthread_mutex_lock(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) return (EINVAL); /* * If the mutex is statically initialized, perform the dynamic * initialization: */ if ((*mutex == NULL) && ((ret = init_static(mutex)) != 0)) return (ret); /* Reset the interrupted flag: */ curthread->interrupted = 0; /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal * to execute a signal handler. It is not (currently) possible * to remain in the waiting queue while running a handler. * Instead, the thread is interrupted and backed out of the * waiting queue prior to executing the signal handler. */ do { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* Clear any previous error: */ errno = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = errno; errno = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (curthread->interrupted != 0) mutex_queue_remove(*mutex, curthread); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } while (((*mutex)->m_owner != curthread) && (ret == 0) && (curthread->interrupted == 0)); if (curthread->interrupted != 0 && curthread->continuation != NULL) curthread->continuation((void *) curthread); /* Return the completion status: */ return (ret); }
int _pthread_mutex_trylock(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); }