Esempio n. 1
0
int
pthread_barrier_wait(pthread_barrier_t *barrier)
{
	pthread_mutex_t *interlock;
	pthread_t self;
	unsigned int gen;

	if (barrier->ptb_magic != _PT_BARRIER_MAGIC)
		return EINVAL;

	/*
	 * A single arbitrary thread is supposed to return
	 * PTHREAD_BARRIER_SERIAL_THREAD, and everone else
	 * is supposed to return 0.  Since pthread_barrier_wait()
	 * is not a cancellation point, this is trivial; we
	 * simply elect that the thread that causes the barrier
	 * to be satisfied gets the special return value.  Note
	 * that this final thread does not actually need to block,
	 * but instead is responsible for waking everyone else up.
	 */
	self = pthread__self();
	interlock = pthread__hashlock(barrier);
	pthread_mutex_lock(interlock);
	if (barrier->ptb_curcount + 1 == barrier->ptb_initcount) {
		barrier->ptb_generation++;
		barrier->ptb_curcount = 0;
		pthread__unpark_all(&barrier->ptb_waiters, self,
		    interlock);
		pthread_mutex_unlock(interlock);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}
	barrier->ptb_curcount++;
	gen = barrier->ptb_generation;
	for (;;) {
		PTQ_INSERT_TAIL(&barrier->ptb_waiters, self, pt_sleep);
		self->pt_sleepobj = &barrier->ptb_waiters;
		(void)pthread__park(self, interlock, &barrier->ptb_waiters,
		    NULL, 0, __UNVOLATILE(&interlock->ptm_waiters));
		if (__predict_true(gen != barrier->ptb_generation)) {
			break;
		}
		pthread_mutex_lock(interlock);
		if (gen != barrier->ptb_generation) {
			pthread_mutex_unlock(interlock);
			break;
		}
	}

	return 0;
}
Esempio n. 2
0
static int
pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
{
	uintptr_t owner, next;
	pthread_mutex_t *interlock;
	pthread_t self;
	int error;

	self = pthread__self();

#ifdef ERRORCHECK
	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
		return EINVAL;
#endif

	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
		/*
		 * Read the lock owner field.  If the need-to-wait
		 * indicator is clear, then try to acquire the lock.
		 */
		if ((owner & RW_THREAD) == 0) {
			next = rw_cas(ptr, owner,
			    (uintptr_t)self | RW_WRITE_LOCKED);
			if (owner == next) {
				/* Got it! */
#ifndef PTHREAD__ATOMIC_IS_MEMBAR
				membar_enter();
#endif
				return 0;
			}

			/*
			 * Didn't get it -- spin around again (we'll
			 * probably sleep on the next iteration).
			 */
			continue;
		}

		if ((owner & RW_THREAD) == (uintptr_t)self)
			return EDEADLK;

		/* If held write locked and no waiters, spin. */
		if (pthread__rwlock_spin(owner)) {
			while (pthread__rwlock_spin(owner)) {
				owner = (uintptr_t)ptr->ptr_owner;
			}
			next = owner;
			continue;
		}

		/*
		 * Grab the interlock.  Once we have that, we
		 * can adjust the waiter bits and sleep queue.
		 */
		interlock = pthread__hashlock(ptr);
		pthread_mutex_lock(interlock);

		/*
		 * Mark the rwlock as having waiters.  If the set fails,
		 * then we may not need to sleep and should spin again.
		 */
		next = rw_cas(ptr, owner,
		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
		if (owner != next) {
			pthread_mutex_unlock(interlock);
			continue;
		}

		/* The waiters bit is set - it's safe to sleep. */
	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
		self->pt_rwlocked = _RW_WANT_WRITE;
		self->pt_sleepobj = &ptr->ptr_wblocked;
		self->pt_early = pthread__rwlock_early;
		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
		    ts, 0, &ptr->ptr_wblocked);

		/* Did we get the lock? */
		if (self->pt_rwlocked == _RW_LOCKED) {
#ifndef PTHREAD__ATOMIC_IS_MEMBAR
			membar_enter();
#endif
			return 0;
		}
		if (error != 0)
			return error;

		pthread__errorfunc(__FILE__, __LINE__, __func__,
		    "direct handoff failure");
	}
}