Esempio n. 1
0
void play(pthread_cond_t *cond, struct service *service, struct horse *horses, size_t horse_num) {
	init_race(service);

	while(run) {
		_pthread_mutex_lock(service->mfinished);
		if (service->finished) {
			_pthread_mutex_unlock(service->mfinished);

			// send if someone is waiting
			_pthread_cond_broadcast(cond);
check_for_horses:
			_pthread_mutex_lock(service->mcur_run);
			if (!service->cur_run) {
				_pthread_mutex_unlock(service->mcur_run);	
				break;
			}
			else {
				_pthread_mutex_unlock(service->mcur_run);
				_sleep(1, 0);
				goto check_for_horses;
			}
		}
		_pthread_mutex_unlock(service->mfinished);	
		_sleep(1, 0);
		_pthread_cond_broadcast(cond);
	}

	post_race(service, horses, horse_num);
} 
Esempio n. 2
0
int
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
	int wakeup = 0;

	if (once_control->state == ONCE_DONE)
		return (0);
	_pthread_mutex_lock(&once_lock);
	while (*(volatile int *)&(once_control->state) == ONCE_IN_PROGRESS)
		_pthread_cond_wait(&once_cv, &once_lock);
	/*
	 * If previous thread was canceled, then the state still
	 * could be ONCE_NEVER_DONE, we need to check it again.
	 */
	if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) {
		once_control->state = ONCE_IN_PROGRESS;
		_pthread_mutex_unlock(&once_lock);
		_pthread_cleanup_push(once_cancel_handler, once_control);
		init_routine();
		_pthread_cleanup_pop(0);
		_pthread_mutex_lock(&once_lock);
		once_control->state = ONCE_DONE;
		wakeup = 1;
	}
	_pthread_mutex_unlock(&once_lock);
	if (wakeup)
		_pthread_cond_broadcast(&once_cv);
	return (0);
}
Esempio n. 3
0
struct tm *
gmtime(const time_t * const timep)
{
	static pthread_mutex_t gmtime_mutex = PTHREAD_MUTEX_INITIALIZER;
	static pthread_key_t gmtime_key = -1;
	struct tm *p_tm;

	if (__isthreaded != 0) {
		if (gmtime_key < 0) {
			_pthread_mutex_lock(&gmtime_mutex);
			if (gmtime_key < 0) {
				if (_pthread_key_create(&gmtime_key, free) < 0) {
					_pthread_mutex_unlock(&gmtime_mutex);
					return(NULL);
				}
			}
			_pthread_mutex_unlock(&gmtime_mutex);
		}
		/*
		 * Changed to follow POSIX.1 threads standard, which
		 * is what BSD currently has.
		 */
		if ((p_tm = _pthread_getspecific(gmtime_key)) == NULL) {
			if ((p_tm = (struct tm *)malloc(sizeof(struct tm)))
			    == NULL) {
				return(NULL);
			}
			_pthread_setspecific(gmtime_key, p_tm);
		}
		return gmtsub(timep, 0L, p_tm);
	} else {
		return gmtsub(timep, 0L, &tm);
	}
}
Esempio n. 4
0
void init_race(struct service *service) {
	// copy horse bets
	_pthread_mutex_lock(service->mhb);
	memcpy(service->copy_horse_bet, service->horse_bet, sizeof(unsigned int) * HORSE_RUN);	
	_pthread_mutex_unlock(service->mhb);
	
	// be sure that noone is using value
	_pthread_mutex_lock(service->mfinished);
	service->finished = 0;
	_pthread_mutex_unlock(service->mfinished);

	start_horses(service);
}
Esempio n. 5
0
void update_money(struct user *user, unsigned int *_money) {
	if (user->horse && !strcmp(user->horse->name, user->service->win->name)) {
		_pthread_mutex_lock(user->service->mbank);

		_pthread_mutex_lock(user->service->mhb);	
		*_money = user->service->bank / user->service->copy_horse_bet[user->id];
		--user->service->copy_horse_bet[user->id];	
		user->service->bank -= *_money;
		user->money += *_money;
		
		_pthread_mutex_unlock(user->service->mhb);
		_pthread_mutex_unlock(user->service->mbank);			
	} 
}
Esempio n. 6
0
char* get_next_race_time(struct service *service) {
	char *ret;
	_pthread_mutex_lock(service->mnr);
	ret = ctime(&service->next_race);
	_pthread_mutex_unlock(service->mnr);
	return ret;	
}
int
_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
			      int prioceiling, int *old_ceiling)
{
	int ret = 0;
	int tmp;

	if ((mutex == NULL) || (*mutex == NULL))
		ret = EINVAL;
	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
		ret = EINVAL;
	/* Lock the mutex: */
	else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
		tmp = (*mutex)->m_prio;
		/* Set the new ceiling: */
		(*mutex)->m_prio = prioceiling;

		/* Unlock the mutex: */
		ret = _pthread_mutex_unlock(mutex);

		/* Return the old ceiling: */
		*old_ceiling = tmp;
	}
	return(ret);
}
Esempio n. 8
0
void 
_funlockfile(FILE *fp)
{
	pthread_t	curthread = _pthread_self();

	/*
	 * Check if this file is owned by the current thread:
	 */
	if (fp->_lock->fl_owner == curthread) {
		/*
		 * Check if this thread has locked the FILE
		 * more than once:
		 */
		if (fp->_lock->fl_count > 1)
			/*
			 * Decrement the count of the number of
			 * times the running thread has locked this
			 * file:
			 */
			fp->_lock->fl_count--;
		else {
			/*
			 * The running thread will release the
			 * lock now:
			 */
			fp->_lock->fl_count = 0;
#ifndef __SYMBIAN32__			
			fp->_lock->fl_owner = NULL;
#else //__SYMBIAN32__
			fp->_lock->fl_owner = 0;
#endif //__SYMBIAN32__
			_pthread_mutex_unlock(&fp->_lock->fl_mutex);
		}
	}
}
Esempio n. 9
0
/*
 * Reclaim memory for telldir cookies which weren't used.
 */
void
_reclaim_telldir(DIR *dirp)
{
    struct ddloc *lp;
    struct ddloc **prevlp;
    int i;

    if (__isthreaded)
        _pthread_mutex_lock(&dd_hash_lock);
    for (i = 0; i < NDIRHASH; i++) {
        prevlp = &dd_hash[i];
        lp = *prevlp;
        while (lp != NULL) {
            if (lp->loc_dirp == dirp) {
                *prevlp = lp->loc_next;
                free((caddr_t)lp);
                lp = *prevlp;
                continue;
            }
            prevlp = &lp->loc_next;
            lp = lp->loc_next;
        }
    }
    if (__isthreaded)
        _pthread_mutex_unlock(&dd_hash_lock);
}
Esempio n. 10
0
int
_sem_wait(sem_t *sem)
{
	struct pthread *curthread;
	int retval;

	if (sem_check_validity(sem) != 0)
		return (-1);

	curthread = _get_curthread();
	if ((*sem)->syssem != 0) {
		_thr_cancel_enter(curthread);
		retval = ksem_wait((*sem)->semid);
		_thr_cancel_leave(curthread, retval != 0);
	}
	else {
		_pthread_testcancel();
		_pthread_mutex_lock(&(*sem)->lock);

		while ((*sem)->count <= 0) {
			(*sem)->nwaiters++;
			THR_CLEANUP_PUSH(curthread, decrease_nwaiters, sem);
			_pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
			THR_CLEANUP_POP(curthread, 0);
			(*sem)->nwaiters--;
		}
		(*sem)->count--;

		_pthread_mutex_unlock(&(*sem)->lock);

		retval = 0;
	}
	return (retval);
}
Esempio n. 11
0
/*
 * seek to an entry in a directory.
 * Only values returned by "telldir" should be passed to seekdir.
 */
void
_seekdir(DIR *dirp, long loc)
{
    struct ddloc *lp;
    struct dirent *dp;

    if (__isthreaded)
        _pthread_mutex_lock(&dd_hash_lock);
    for (lp = dd_hash[LOCHASH(loc)]; lp; lp = lp->loc_next) {
        if (lp->loc_dirp == dirp && lp->loc_index == loc)
            break;
    }
    if (__isthreaded)
        _pthread_mutex_unlock(&dd_hash_lock);
    if (lp == NULL)
        return;
    if (lp->loc_loc == dirp->dd_loc && lp->loc_seek == dirp->dd_seek)
        return;
    lseek(dirp->dd_fd, lp->loc_seek, SEEK_SET);
    dirp->dd_seek = lp->loc_seek;
    dirp->dd_loc = 0;
    dirp->dd_lastseek = loc;

    /*
     * Scan the buffer until we find dd_loc.  If the directory
     * changed between the tell and seek it is possible to
     * load a new buffer or for dd_loc to not match directly.
     */
    while (dirp->dd_loc < lp->loc_loc && dirp->dd_seek == lp->loc_seek) {
        dp = _readdir_unlocked(dirp, 0);
        if (dp == NULL)
            break;
    }
}
Esempio n. 12
0
int
_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
	pthread_rwlock_t prwlock;
	int ret;

	if (rwlock == NULL)
		return(EINVAL);

	prwlock = *rwlock;

	/* check for static initialization */
	if (prwlock == NULL) {
		if ((ret = init_static(rwlock)) != 0)
			return(ret);

		prwlock = *rwlock;
	}

	/* grab the monitor lock */
	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
		return(ret);

	if (prwlock->state != 0)
		ret = EBUSY;
	else
		/* indicate we are locked for writing */
		prwlock->state = -1;

	/* see the comment on this in pthread_rwlock_rdlock */
	_pthread_mutex_unlock(&prwlock->lock);

	return (ret);
}
Esempio n. 13
0
int
_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
{
	pthread_rwlock_t prwlock;
	int ret;

	if (rwlock == NULL)
		return(EINVAL);

	prwlock = *rwlock;

	/* check for static initialization */
	if (prwlock == NULL) {
		if ((ret = init_static(rwlock)) != 0)
			return(ret);

		prwlock = *rwlock;
	}

	/* grab the monitor lock */
	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
		return(ret);

	while (prwlock->state != 0) {
		prwlock->blocked_writers++;

		ret = _pthread_cond_wait(&prwlock->write_signal,
		    &prwlock->lock);

		if (ret != 0) {
			prwlock->blocked_writers--;
			_pthread_mutex_unlock(&prwlock->lock);
			return(ret);
		}

		prwlock->blocked_writers--;
	}

	/* indicate we are locked for writing */
	prwlock->state = -1;

	/* see the comment on this in pthread_rwlock_rdlock */
	_pthread_mutex_unlock(&prwlock->lock);

	return (ret);
}
Esempio n. 14
0
/*
 * Seek to an entry in a directory.
 * _seekdir is in telldir.c so that it can share opaque data structures.
 */
void
seekdir(DIR *dirp, long loc)
{
    if (__isthreaded)
        _pthread_mutex_lock((pthread_mutex_t *)&dirp->dd_lock);
    _seekdir(dirp, loc);
    if (__isthreaded)
        _pthread_mutex_unlock((pthread_mutex_t *)&dirp->dd_lock);
}
Esempio n. 15
0
/*
 * return a pointer into a directory
 */
long
telldir(DIR *dirp)
{
    long index;
    struct ddloc *lp;

    if (__isthreaded) {
        _pthread_mutex_lock(&dirp->dd_lock);
        _pthread_mutex_lock(&dd_hash_lock);
    }

    /*
     * Reduce memory use by reusing a ddloc that might already exist
     * for this position.
     */
    for (lp = dd_hash[LOCHASH(dirp->dd_lastseek)]; lp; lp = lp->loc_next) {
        if (lp->loc_dirp == dirp && lp->loc_seek == dirp->dd_seek &&
                lp->loc_loc == dirp->dd_loc) {
            index = lp->loc_index;
            goto done;
        }
    }

    if ((lp = (struct ddloc *)malloc(sizeof(struct ddloc))) == NULL) {
        index = -1;
        goto done;
    }
    index = dd_loccnt++;
    lp->loc_index = index;
    lp->loc_seek = dirp->dd_seek;
    lp->loc_loc = dirp->dd_loc;
    lp->loc_dirp = dirp;

    lp->loc_next = dd_hash[LOCHASH(index)];
    dd_hash[LOCHASH(index)] = lp;

done:
    if (__isthreaded) {
        _pthread_mutex_unlock(&dd_hash_lock);
        _pthread_mutex_unlock(&dirp->dd_lock);
    }
    return (index);
}
Esempio n. 16
0
static void
once_cancel_handler(void *arg)
{
	pthread_once_t *once_control = arg;

	_pthread_mutex_lock(&once_lock);
	once_control->state = ONCE_NEVER_DONE;
	_pthread_mutex_unlock(&once_lock);
	_pthread_cond_broadcast(&once_cv);
}
Esempio n. 17
0
void set_next_race_time(struct service *service) {
	time_t now = time(NULL);
	struct tm *tm_now = localtime(&now);
	if (!tm_now)
		ERR("localtime");
	struct tm tm_then = *tm_now;
	tm_then.tm_sec += service->delay;
	_pthread_mutex_lock(service->mnr);
	service->next_race = mktime(&tm_then);
	_pthread_mutex_unlock(service->mnr);				
}
Esempio n. 18
0
static void
decrease_nwaiters(void *arg)
{
	sem_t *sem = (sem_t *)arg;

	(*sem)->nwaiters--;
	/*
	 * this function is called from cancellation point,
	 * the mutex should already be hold.
	 */
	_pthread_mutex_unlock(&(*sem)->lock);
}
Esempio n. 19
0
int
_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
{
	if (once_control->state == PTHREAD_NEEDS_INIT) {
		if (_thread_initial == NULL)
			_thread_init();
		_pthread_mutex_lock(&(once_control->mutex));
		if (once_control->state == PTHREAD_NEEDS_INIT) {
			init_routine();
			once_control->state = PTHREAD_DONE_INIT;
		}
		_pthread_mutex_unlock(&(once_control->mutex));
	}
	return (0);
}
Esempio n. 20
0
struct tm *
localtime(const time_t * const timep)
{
	static pthread_mutex_t localtime_mutex = PTHREAD_MUTEX_INITIALIZER;
	static pthread_key_t localtime_key = -1;
	struct tm *p_tm;

	if (__isthreaded != 0) {
		if (localtime_key < 0) {
			_pthread_mutex_lock(&localtime_mutex);
			if (localtime_key < 0) {
				if (_pthread_key_create(&localtime_key, free) < 0) {
					_pthread_mutex_unlock(&localtime_mutex);
					return(NULL);
				}
			}
			_pthread_mutex_unlock(&localtime_mutex);
		}
		p_tm = _pthread_getspecific(localtime_key);
		if (p_tm == NULL) {
			if ((p_tm = (struct tm *)malloc(sizeof(struct tm)))
			    == NULL)
				return(NULL);
			_pthread_setspecific(localtime_key, p_tm);
		}
		_RWLOCK_RDLOCK(&lcl_rwlock);
		tzset_basic(1);
		localsub(timep, 0L, p_tm);
		_RWLOCK_UNLOCK(&lcl_rwlock);
		return(p_tm);
	} else {
		tzset_basic(0);
		localsub(timep, 0L, &tm);
		return(&tm);
	}
}
Esempio n. 21
0
void post_race(struct service *service, struct horse *horses, size_t horse_num) {
	_pthread_mutex_lock(service->mcur_run);
	service->cur_run = HORSE_RUN;
	_pthread_mutex_unlock(service->mcur_run);

	// choose new horses
	memset(service->current_run, 0, sizeof(struct horse *) * HORSE_RUN);
	choose_run_horses(horses, horse_num, service);	
	memset(service->horse_bet, 0, sizeof(unsigned int) * HORSE_RUN);
	
	// set new time for next run
	set_next_race_time(service);		

	run = 0; 

	alarm(service->delay);
}
Esempio n. 22
0
int
_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
	pthread_rwlock_t prwlock;
	struct pthread *curthread;
	int ret;

	if (rwlock == NULL)
		return(EINVAL);

	prwlock = *rwlock;

	/* check for static initialization */
	if (prwlock == NULL) {
		if ((ret = init_static(rwlock)) != 0)
			return(ret);

		prwlock = *rwlock;
	}

	/* grab the monitor lock */
	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
		return(ret);

	curthread = _get_curthread();
	if (prwlock->state == MAX_READ_LOCKS)
		ret = EAGAIN; /* too many read locks acquired */
	else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
		/* see comment for pthread_rwlock_rdlock() */
		curthread->rdlock_count++;
		prwlock->state++;
	}
	/* give writers priority over readers */
	else if (prwlock->blocked_writers || prwlock->state < 0)
		ret = EBUSY;
	else {
		prwlock->state++; /* indicate we are locked for reading */
		curthread->rdlock_count++;
	}

	/* see the comment on this in pthread_rwlock_rdlock */
	_pthread_mutex_unlock(&prwlock->lock);

	return (ret);
}
Esempio n. 23
0
void
rewinddir(DIR *dirp)
{

	if (__isthreaded)
		_pthread_mutex_lock(&dirp->dd_lock);
	dirp->dd_flags &= ~__DTF_SKIPREAD; /* current contents are invalid */
	if (dirp->dd_flags & __DTF_READALL)
		_filldir(dirp, false);
	else {
		(void) lseek(dirp->dd_fd, 0, SEEK_SET);
		dirp->dd_seek = 0;
	}
	dirp->dd_loc = 0;
	_reclaim_telldir(dirp);
	if (__isthreaded)
		_pthread_mutex_unlock(&dirp->dd_lock);
}
Esempio n. 24
0
/*
 * close a directory.
 */
int
fdclosedir(DIR *dirp)
{
	int fd;

	if (__isthreaded)
		_pthread_mutex_lock(&dirp->dd_lock);
	fd = dirp->dd_fd;
	dirp->dd_fd = -1;
	dirp->dd_loc = 0;
	free((void *)dirp->dd_buf);
	_reclaim_telldir(dirp);
	if (__isthreaded) {
		_pthread_mutex_unlock(&dirp->dd_lock);
		_pthread_mutex_destroy(&dirp->dd_lock);
	}
	free((void *)dirp);
	return (fd);
}
Esempio n. 25
0
int
_pthread_atfork(void (*prepare)(void), void (*parent)(void),
                void (*child)(void))
{
    struct pthread_atfork *af;

    if (_thr_initial == NULL)
        _libpthread_init(NULL);

    if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
        return (ENOMEM);

    af->prepare = prepare;
    af->parent = parent;
    af->child = child;
    _pthread_mutex_lock(&_thr_atfork_mutex);
    TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
    _pthread_mutex_unlock(&_thr_atfork_mutex);
    return (0);
}
Esempio n. 26
0
int
_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
	pthread_rwlock_t prwlock;
	struct pthread *curthread;
	int ret;

	if (rwlock == NULL)
		return(EINVAL);

	prwlock = *rwlock;

	if (prwlock == NULL)
		return(EINVAL);

	/* grab the monitor lock */
	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
		return(ret);

	curthread = _get_curthread();
	if (prwlock->state > 0) {
		curthread->rdlock_count--;
		prwlock->state--;
		if (prwlock->state == 0 && prwlock->blocked_writers)
			ret = _pthread_cond_signal(&prwlock->write_signal);
	} else if (prwlock->state < 0) {
		prwlock->state = 0;

		if (prwlock->blocked_writers)
			ret = _pthread_cond_signal(&prwlock->write_signal);
		else
			ret = _pthread_cond_broadcast(&prwlock->read_signal);
	} else
		ret = EINVAL;

	/* see the comment on this in pthread_rwlock_rdlock */
	_pthread_mutex_unlock(&prwlock->lock);

	return (ret);
}
Esempio n. 27
0
static void
sem_child_postfork()
{
	_pthread_mutex_unlock(&sem_llock);
}
Esempio n. 28
0
/*
 * The first time nsdispatch is called (during a process's lifetime,
 * or after nsswitch.conf has been updated), nss_configure will
 * prepare global data needed by NSS.
 */
static int
nss_configure(void)
{
	static pthread_mutex_t conf_lock = PTHREAD_MUTEX_INITIALIZER;
	static time_t	 confmod;
	struct stat	 statbuf;
	int		 result, isthreaded;
	const char	*path;
#ifdef NS_CACHING
	void		*handle;
#endif

	result = 0;
	isthreaded = __isthreaded;
#if defined(_NSS_DEBUG) && defined(_NSS_SHOOT_FOOT)
	/* NOTE WELL:  THIS IS A SECURITY HOLE. This must only be built
	 * for debugging purposes and MUST NEVER be used in production.
	 */
	path = getenv("NSSWITCH_CONF");
	if (path == NULL)
#endif
	path = _PATH_NS_CONF;
	if (stat(path, &statbuf) != 0)
		return (0);
	if (statbuf.st_mtime <= confmod)
		return (0);
	if (isthreaded) {
	    result = _pthread_mutex_trylock(&conf_lock);
	    if (result != 0)
		    return (0);
	    _pthread_rwlock_unlock(&nss_lock);
	    result = _pthread_rwlock_wrlock(&nss_lock);
	    if (result != 0)
		    goto fin2;
	}
	_nsyyin = fopen(path, "r");
	if (_nsyyin == NULL)
		goto fin;
	VECTOR_FREE(_nsmap, &_nsmapsize, sizeof(*_nsmap),
	    (vector_free_elem)ns_dbt_free);
	VECTOR_FREE(_nsmod, &_nsmodsize, sizeof(*_nsmod),
	    (vector_free_elem)ns_mod_free);
	nss_load_builtin_modules();
	_nsyyparse();
	fclose(_nsyyin);
	vector_sort(_nsmap, _nsmapsize, sizeof(*_nsmap), string_compare);
	if (confmod == 0)
		atexit(nss_atexit);
	confmod = statbuf.st_mtime;

#ifdef NS_CACHING
	handle = dlopen(NULL, RTLD_LAZY | RTLD_GLOBAL);
	if (handle != NULL) {
		nss_cache_cycle_prevention_func = dlsym(handle,
			"_nss_cache_cycle_prevention_function");
		dlclose(handle);
	}
#endif
fin:
	if (isthreaded) {
	    _pthread_rwlock_unlock(&nss_lock);
	    if (result == 0)
		    result = _pthread_rwlock_rdlock(&nss_lock);
	}
fin2:
	if (isthreaded)
		_pthread_mutex_unlock(&conf_lock);
	return (result);
}
Esempio n. 29
0
static void
sem_postfork(void)
{

	_pthread_mutex_unlock(&sem_llock);
}
Esempio n. 30
0
int
_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
	pthread_rwlock_t prwlock;
	struct pthread *curthread;
	int ret;

	if (rwlock == NULL)
		return(EINVAL);

	prwlock = *rwlock;

	/* check for static initialization */
	if (prwlock == NULL) {
		if ((ret = init_static(rwlock)) != 0)
			return(ret);

		prwlock = *rwlock;
	}

	/* grab the monitor lock */
	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
		return(ret);

	/* check lock count */
	if (prwlock->state == MAX_READ_LOCKS) {
		_pthread_mutex_unlock(&prwlock->lock);
		return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
		/*
		 * To avoid having to track all the rdlocks held by
		 * a thread or all of the threads that hold a rdlock,
		 * we keep a simple count of all the rdlocks held by
		 * a thread.  If a thread holds any rdlocks it is
		 * possible that it is attempting to take a recursive
		 * rdlock.  If there are blocked writers and precedence
		 * is given to them, then that would result in the thread
		 * deadlocking.  So allowing a thread to take the rdlock
		 * when it already has one or more rdlocks avoids the
		 * deadlock.  I hope the reader can follow that logic ;-)
		 */
		;	/* nothing needed */
	} else {
		/* give writers priority over readers */
		while (prwlock->blocked_writers || prwlock->state < 0) {
			ret = _pthread_cond_wait(&prwlock->read_signal,
			    &prwlock->lock);

			if (ret != 0) {
				/* can't do a whole lot if this fails */
				_pthread_mutex_unlock(&prwlock->lock);
				return(ret);
			}
		}
	}

	curthread->rdlock_count++;
	prwlock->state++; /* indicate we are locked for reading */

	/*
	 * Something is really wrong if this call fails.  Returning
	 * error won't do because we've already obtained the read
	 * lock.  Decrementing 'state' is no good because we probably
	 * don't have the monitor lock.
	 */
	_pthread_mutex_unlock(&prwlock->lock);

	return (ret);
}