Exemplo n.º 1
0
int
_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
	struct pthread	*curthread = _get_curthread();
	pthread_mutex_t m;
	int ret = 0;

	if (mutex == NULL || *mutex == NULL)
		ret = EINVAL;
	else {
		/* Lock the mutex structure: */
		THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);

		/*
		 * Check to see if this mutex is in use:
		 */
		if (((*mutex)->m_owner != NULL) ||
		    (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
		    ((*mutex)->m_refcount != 0)) {
			ret = EBUSY;

			/* Unlock the mutex structure: */
			THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
		} else {
			/*
			 * Save a pointer to the mutex so it can be free'd
			 * and set the caller's pointer to NULL:
			 */
			m = *mutex;
			*mutex = NULL;

			/* Unlock the mutex structure: */
			THR_LOCK_RELEASE(curthread, &m->m_lock);

			/*
			 * Free the memory allocated for the mutex
			 * structure:
			 */
			MUTEX_ASSERT_NOT_OWNED(m);
			MUTEX_DESTROY(m);
		}
	}

	/* Return the completion status: */
	return (ret);
}
Exemplo n.º 2
0
void
_thr_tsd_unload(struct dl_phdr_info *phdr_info)
{
	struct pthread *curthread = _get_curthread();
	void (*destructor)(void *);
	int key;

	THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
	for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
		if (_thread_keytable[key].allocated) {
			destructor = _thread_keytable[key].destructor;
			if (destructor != NULL) {
				if (__elf_phdr_match_addr(phdr_info, destructor))
					_thread_keytable[key].destructor = NULL;
			}
		}
	}
	THR_LOCK_RELEASE(curthread, &_keytable_lock);
}
Exemplo n.º 3
0
int
_pthread_key_delete(pthread_key_t key)
{
	struct pthread *curthread = _get_curthread();
	int ret = 0;

	if ((unsigned int)key < PTHREAD_KEYS_MAX) {
		/* Lock the key table: */
		THR_LOCK_ACQUIRE(curthread, &_keytable_lock);

		if (_thread_keytable[key].allocated)
			_thread_keytable[key].allocated = 0;
		else
			ret = EINVAL;

		/* Unlock the key table: */
		THR_LOCK_RELEASE(curthread, &_keytable_lock);
	} else
		ret = EINVAL;
	return (ret);
}
Exemplo n.º 4
0
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
	struct pthread_cond	*cv;
	struct pthread		*curthread = _get_curthread();
	int			rval = 0;

	if (cond == NULL || *cond == NULL)
		rval = EINVAL;
	else {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/*
		 * NULL the caller's pointer now that the condition
		 * variable has been destroyed:
		 */
		cv = *cond;
		*cond = NULL;

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &cv->c_lock);

		/* Free the cond lock structure: */
		_lock_destroy(&cv->c_lock);

		/*
		 * Free the memory allocated for the condition
		 * variable structure:
		 */
		free(cv);

	}
	/* Return the completion status: */
	return (rval);
}
Exemplo n.º 5
0
void 
_thread_cleanupspecific(void)
{
	struct pthread	*curthread = _get_curthread();
	void		(*destructor)( void *);
	const void	*data = NULL;
	int		key;
	int		i;

	if (curthread->specific == NULL)
		return;

	/* Lock the key table: */
	THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
	for (i = 0; (i < PTHREAD_DESTRUCTOR_ITERATIONS) &&
	    (curthread->specific_data_count > 0); i++) {
		for (key = 0; (key < PTHREAD_KEYS_MAX) &&
		    (curthread->specific_data_count > 0); key++) {
			destructor = NULL;

			if (_thread_keytable[key].allocated &&
			    (curthread->specific[key].data != NULL)) {
				if (curthread->specific[key].seqno ==
				    _thread_keytable[key].seqno) {
					data = curthread->specific[key].data;
					destructor = _thread_keytable[key].destructor;
				}
				curthread->specific[key].data = NULL;
				curthread->specific_data_count--;
			}
			else if (curthread->specific[key].data != NULL) {
				/* 
				 * This can happen if the key is deleted via
				 * pthread_key_delete without first setting the value
				 * to NULL in all threads.  POSIX says that the
				 * destructor is not invoked in this case.
				 */
				curthread->specific[key].data = NULL;
				curthread->specific_data_count--;
			}

			/*
			 * If there is a destructor, call it
			 * with the key table entry unlocked:
			 */
			if (destructor != NULL) {
				/*
				 * Don't hold the lock while calling the
				 * destructor:
				 */
				THR_LOCK_RELEASE(curthread, &_keytable_lock);
				destructor(__DECONST(void *, data));
				THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
			}
		}
	}
	THR_LOCK_RELEASE(curthread, &_keytable_lock);
	free(curthread->specific);
	curthread->specific = NULL;
	if (curthread->specific_data_count > 0)
		stderr_debug("Thread %p has exited with leftover "
		    "thread-specific data after %d destructor iterations\n",
		    curthread, PTHREAD_DESTRUCTOR_ITERATIONS);
}
Exemplo n.º 6
0
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Enter a loop to bring all threads off the
			 * condition queue:
			 */
			while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}

			/* There are no more waiting threads: */
			(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
Exemplo n.º 7
0
int
_pthread_cond_signal(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Wakeups have to be done with the CV lock held;
			 * otherwise there is a race condition where the
			 * thread can timeout, run on another KSE, and enter
			 * another blocking state (including blocking on a CV).
			 */
			if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}
			/* Check for no more waiters: */
			if (TAILQ_EMPTY(&(*cond)->c_queue))
				(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
Exemplo n.º 8
0
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
		       const struct timespec * abstime)
{
	struct pthread	*curthread = _get_curthread();
	int	rval = 0;
	int	done = 0;
	int	mutex_locked = 1;
	int	seqno;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");

	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);
	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0)
		return (rval);

	if (!_kse_isthreaded())
		_kse_setthreaded(1);

	/*
	 * Enter a loop waiting for a condition signal or broadcast
	 * to wake up this thread.  A loop is needed in case the waiting
	 * thread is interrupted by a signal to execute a signal handler.
	 * It is not (currently) possible to remain in the waiting queue
	 * while running a handler.  Instead, the thread is interrupted
	 * and backed out of the waiting queue prior to executing the
	 * signal handler.
	 */

	/* Lock the condition variable structure: */
	THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
	seqno = (*cond)->c_seqno;
	do {
		/*
		 * If the condvar was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*cond)->c_queue);
			(*cond)->c_flags |= COND_FLAGS_INITED;
		}

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
			    ((*cond)->c_mutex != *mutex))) {
				/* Return invalid argument error: */
				rval = EINVAL;
			} else {
				/* Reset the timeout and interrupted flags: */
				curthread->timeout = 0;
				curthread->interrupted = 0;

				/*
				 * Queue the running thread for the condition
				 * variable:
				 */
				cond_queue_enq(*cond, curthread);

				/* Unlock the mutex: */
				if (mutex_locked &&
				   ((rval = _mutex_cv_unlock(mutex)) != 0)) {
					/*
					 * Cannot unlock the mutex; remove the
					 * running thread from the condition
					 * variable queue: 
					 */
					cond_queue_remove(*cond, curthread);
				} else {
					/* Remember the mutex: */
					(*cond)->c_mutex = *mutex;

					/*
					 * Don't unlock the mutex the next
					 * time through the loop (if the
					 * thread has to be requeued after
					 * handling a signal).
					 */
					mutex_locked = 0;

					/*
					 * This thread is active and is in a
					 * critical region (holding the cv
					 * lock); we should be able to safely
					 * set the state.
					 */
					THR_SCHED_LOCK(curthread, curthread);
					/* Set the wakeup time: */
					curthread->wakeup_time.tv_sec =
					    abstime->tv_sec;
					curthread->wakeup_time.tv_nsec =
					    abstime->tv_nsec;
					THR_SET_STATE(curthread, PS_COND_WAIT);

					/* Remember the CV: */
					curthread->data.cond = *cond;
					curthread->sigbackout = cond_wait_backout;
					THR_SCHED_UNLOCK(curthread, curthread);

					/* Unlock the CV structure: */
					THR_LOCK_RELEASE(curthread,
					    &(*cond)->c_lock);

					/* Schedule the next thread: */
					_thr_sched_switch(curthread);

					/*
					 * XXX - This really isn't a good check
					 * since there can be more than one
					 * thread waiting on the CV.  Signals
					 * sent to threads waiting on mutexes
					 * or CVs should really be deferred
					 * until the threads are no longer
					 * waiting, but POSIX says that signals
					 * should be sent "as soon as possible".
					 */
					done = (seqno != (*cond)->c_seqno);
					if (done && !THR_IN_CONDQ(curthread)) {
						/*
						 * The thread is dequeued, so
						 * it is safe to clear these.
						 */
						curthread->data.cond = NULL;
						curthread->sigbackout = NULL;
						check_continuation(curthread,
						    NULL, mutex);
						return (_mutex_cv_lock(mutex));
					}

					/* Relock the CV structure: */
					THR_LOCK_ACQUIRE(curthread,
					    &(*cond)->c_lock);

					/*
					 * Clear these after taking the lock to
					 * prevent a race condition where a
					 * signal can arrive before dequeueing
					 * the thread.
					 */
					curthread->data.cond = NULL;
					curthread->sigbackout = NULL;

					done = (seqno != (*cond)->c_seqno);

					if (THR_IN_CONDQ(curthread)) {
						cond_queue_remove(*cond,
						    curthread);

						/* Check for no more waiters: */
						if (TAILQ_EMPTY(&(*cond)->c_queue))
							(*cond)->c_mutex = NULL;
					}

					if (curthread->timeout != 0) {
						/* The wait timedout. */
						rval = ETIMEDOUT;
					}
				}
			}
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		check_continuation(curthread, *cond,
		    mutex_locked ? NULL : mutex);
	} while ((done == 0) && (rval == 0));

	/* Unlock the condition variable structure: */
	THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);

	if (mutex_locked == 0)
		_mutex_cv_lock(mutex);

	/* Return the completion status: */
	return (rval);
}