Пример #1
0
int
rw_rdlock_impl(rwlock_t *rwlp, timespec_t *tsp)
{
	ulwp_t *self = curthread;
	uberdata_t *udp = self->ul_uberdata;
	readlock_t *readlockp;
	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
	int error;

	/*
	 * If we already hold a readers lock on this rwlock,
	 * just increment our reference count and return.
	 */
	sigoff(self);
	readlockp = rwl_entry(rwlp);
	if (readlockp->rd_count != 0) {
		if (readlockp->rd_count == READ_LOCK_MAX) {
			sigon(self);
			error = EAGAIN;
			goto out;
		}
		sigon(self);
		error = 0;
		goto out;
	}
	sigon(self);

	/*
	 * If we hold the writer lock, bail out.
	 */
	if (rw_write_held(rwlp)) {
		if (self->ul_error_detection)
			rwlock_error(rwlp, "rwlock_rdlock",
			    "calling thread owns the writer lock");
		error = EDEADLK;
		goto out;
	}

	if (read_lock_try(rwlp, 0))
		error = 0;
	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
		error = shared_rwlock_lock(rwlp, tsp, READ_LOCK);
	else						/* user-level */
		error = rwlock_lock(rwlp, tsp, READ_LOCK);

out:
	if (error == 0) {
		sigoff(self);
		rwl_entry(rwlp)->rd_count++;
		sigon(self);
		if (rwsp)
			tdb_incr(rwsp->rw_rdlock);
		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK);
	} else {
		DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, error);
	}

	return (error);
}
Пример #2
0
/* ARGSUSED2 */
int
rwlock_init(rwlock_t *rwlp, int type, void *arg)
{
	ulwp_t *self = curthread;

	if (type != USYNC_THREAD && type != USYNC_PROCESS)
		return (EINVAL);
	/*
	 * Once reinitialized, we can no longer be holding a read or write lock.
	 * We can do nothing about other threads that are holding read locks.
	 */
	sigoff(self);
	rwl_entry(rwlp)->rd_count = 0;
	sigon(self);
	(void) memset(rwlp, 0, sizeof (*rwlp));
	rwlp->rwlock_type = (uint16_t)type;
	rwlp->rwlock_magic = RWL_MAGIC;
	rwlp->mutex.mutex_type = (uint8_t)type;
	rwlp->mutex.mutex_flag = LOCK_INITED;
	rwlp->mutex.mutex_magic = MUTEX_MAGIC;

	/*
	 * This should be at the beginning of the function,
	 * but for the sake of old broken applications that
	 * do not have proper alignment for their rwlocks
	 * (and don't check the return code from rwlock_init),
	 * we put it here, after initializing the rwlock regardless.
	 */
	if (((uintptr_t)rwlp & (_LONG_LONG_ALIGNMENT - 1)) &&
	    self->ul_misaligned == 0)
		return (EINVAL);

	return (0);
}
Пример #3
0
int
rwlock_destroy(rwlock_t *rwlp)
{
	ulwp_t *self = curthread;

	/*
	 * Once destroyed, we can no longer be holding a read or write lock.
	 * We can do nothing about other threads that are holding read locks.
	 */
	sigoff(self);
	rwl_entry(rwlp)->rd_count = 0;
	sigon(self);
	rwlp->rwlock_magic = 0;
	tdb_sync_obj_deregister(rwlp);
	return (0);
}
Пример #4
0
int
_stack_setbounds(const stack_t *sp)
{
	ulwp_t *self = curthread;

	if (sp == NULL || sp->ss_sp == NULL ||
	    (uintptr_t)sp->ss_sp != SA((uintptr_t)sp->ss_sp) ||
	    sp->ss_flags != 0 || sp->ss_size < MINSIGSTKSZ ||
	    (uintptr_t)sp->ss_size != SA((uintptr_t)sp->ss_size)) {
		errno = EINVAL;
		return (-1);
	}

	sigoff(self);
	self->ul_ustack = *sp;
	sigon(self);

	return (0);
}
Пример #5
0
/*
 * Return the address of a TLS variable for the current thread.
 * Run the constructors for newly-allocated dynamic TLS.
 */
void *
slow_tls_get_addr(TLS_index *tls_index)
{
	ulwp_t *self = curthread;
	tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata;
	TLS_modinfo *tlsp;
	ulong_t moduleid;
	tls_t *tlsent;
	caddr_t	base;
	void (**initarray)(void);
	ulong_t arraycnt = 0;

	/*
	 * Defer signals until we have finished calling
	 * all of the constructors.
	 */
	sigoff(self);
	lmutex_lock(&tlsm->tls_lock);
	if ((moduleid = tls_index->ti_moduleid) < self->ul_ntlsent)
		tlsent = self->ul_tlsent;
	else {
		ASSERT(moduleid < tlsm->tls_modinfo.tls_size);
		tlsent = lmalloc(tlsm->tls_modinfo.tls_size * sizeof (tls_t));
		if (self->ul_tlsent != NULL) {
			(void) memcpy(tlsent, self->ul_tlsent,
			    self->ul_ntlsent * sizeof (tls_t));
			lfree(self->ul_tlsent,
			    self->ul_ntlsent * sizeof (tls_t));
		}
		self->ul_tlsent = tlsent;
		self->ul_ntlsent = tlsm->tls_modinfo.tls_size;
	}
	tlsent += moduleid;
	if ((base = tlsent->tls_data) == NULL) {
		tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid;
		if (tlsp->tm_memsz == 0) {	/* dlclose()d module? */
			base = NULL;
		} else if (tlsp->tm_flags & TM_FLG_STATICTLS) {
			/* static TLS is already allocated/initialized */
			base = (caddr_t)self - tlsp->tm_stattlsoffset;
			tlsent->tls_data = base;
			tlsent->tls_size = 0;	/* don't lfree() this space */
		} else {
			/* allocate/initialize the dynamic TLS */
			base = lmalloc(tlsp->tm_memsz);
			if (tlsp->tm_filesz != 0)
				(void) memcpy(base, tlsp->tm_tlsblock,
				    tlsp->tm_filesz);
			tlsent->tls_data = base;
			tlsent->tls_size = tlsp->tm_memsz;
			/* remember the constructors */
			arraycnt = tlsp->tm_tlsinitarraycnt;
			initarray = tlsp->tm_tlsinitarray;
		}
	}
	lmutex_unlock(&tlsm->tls_lock);

	/*
	 * Call constructors, if any, in ascending order.
	 * We have to do this after dropping tls_lock because
	 * we have no idea what the constructors will do.
	 * At least we have signals deferred until they are done.
	 */
	if (arraycnt) {
		do {
			(**initarray++)();
		} while (--arraycnt != 0);
	}

	if (base == NULL)	/* kludge to get x86/x64 to boot */
		base = (caddr_t)self - 512;

	sigon(self);
	return (base + tls_index->ti_tlsoffset);
}