Exemplo n.º 1
0
int pthread_barrier_wait(pthread_barrier_t *b)
{
	int limit = b->_b_limit;
	struct instance *inst;

	/* Trivial case: count was set at 1 */
	if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;

	/* Process-shared barriers require a separate, inefficient wait */
	if (limit < 0) return pshared_barrier_wait(b);

	/* Otherwise we need a lock on the barrier object */
	while (a_swap(&b->_b_lock, 1))
		__wait(&b->_b_lock, &b->_b_waiters, 1, 1);
	inst = b->_b_inst;

	/* First thread to enter the barrier becomes the "instance owner" */
	if (!inst) {
		struct instance new_inst = { 0 };
		int spins = 10000;
		b->_b_inst = inst = &new_inst;
		a_store(&b->_b_lock, 0);
		if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
		while (spins-- && !inst->finished)
			a_spin();
		a_inc(&inst->finished);
		while (inst->finished == 1)
			__syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}

	/* Last thread to enter the barrier wakes all non-instance-owners */
	if (++inst->count == limit) {
		b->_b_inst = 0;
		a_store(&b->_b_lock, 0);
		if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
		a_store(&inst->last, 1);
		if (inst->waiters)
			__wake(&inst->last, -1, 1);
	} else {
		a_store(&b->_b_lock, 0);
		if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
		__wait(&inst->last, &inst->waiters, 0, 1);
	}

	/* Last thread to exit the barrier wakes the instance owner */
	if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
		__wake(&inst->finished, 1, 1);

	return 0;
}
Exemplo n.º 2
0
Arquivo: tempnam.c Projeto: KGG814/AOS
char *tempnam(const char *dir, const char *pfx)
{
	static int index;
	char *s;
	struct timespec ts;
	int pid = getpid();
	size_t l;
	int n;
	int try=0;

	if (!dir) dir = P_tmpdir;
	if (!pfx) pfx = "temp";

	if (access(dir, R_OK|W_OK|X_OK) != 0)
		return NULL;

	l = strlen(dir) + 1 + strlen(pfx) + 3*(sizeof(int)*3+2) + 1;
	s = malloc(l);
	if (!s) return s;

	do {
		clock_gettime(CLOCK_REALTIME, &ts);
		n = ts.tv_nsec ^ (uintptr_t)&s ^ (uintptr_t)s;
		snprintf(s, l, "%s/%s-%d-%d-%x", dir, pfx, pid, a_fetch_add(&index, 1), n);
	} while (!access(s, F_OK) && try++<MAXTRIES);
	if (try>=MAXTRIES) {
		free(s);
		return 0;
	}
	return s;
}
Exemplo n.º 3
0
int pthread_cond_broadcast(pthread_cond_t *c)
{
	pthread_mutex_t *m;

	if (!c->_c_waiters) return 0;

	a_inc(&c->_c_seq);

#ifdef __EMSCRIPTEN__
	// XXX Emscripten: TODO: This is suboptimal but works naively correctly for now. The Emscripten-specific code path below
	// has a bug and does not work for some reason. Figure it out and remove this code block.
	__wake(&c->_c_seq, -1, 0);
	return 0;
#endif

	/* If cond var is process-shared, simply wake all waiters. */
	if (c->_c_mutex == (void *)-1) {
		__wake(&c->_c_seq, -1, 0);
		return 0;
	}

	/* Block waiters from returning so we can use the mutex. */
	while (a_swap(&c->_c_lock, 1))
		__wait(&c->_c_lock, &c->_c_lockwait, 1, 1);
	if (!c->_c_waiters)
		goto out;
	m = c->_c_mutex;

	/* Move waiter count to the mutex */
	a_fetch_add(&m->_m_waiters, c->_c_waiters2);
	c->_c_waiters2 = 0;

#ifdef __EMSCRIPTEN__
	int futexResult;
	do {
		// XXX Emscripten: Bug, this does not work correctly.
		futexResult = emscripten_futex_wake_or_requeue(&c->_c_seq, !m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid,
			c->_c_seq, &m->_m_lock);
	} while(futexResult == -EAGAIN);
#else
	/* Perform the futex requeue, waking one waiter unless we know
	 * that the calling thread holds the mutex. */
	__syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE,
		!m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid,
		INT_MAX, &m->_m_lock);
#endif

out:
	a_store(&c->_c_lock, 0);
	if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0);

	return 0;
}
Exemplo n.º 4
0
static int pshared_barrier_wait(pthread_barrier_t *b)
{
	int limit = (b->_b_limit & INT_MAX) + 1;
	int ret = 0;
	int v, w;

	if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;

	while ((v=a_cas(&b->_b_lock, 0, limit)))
		__wait(&b->_b_lock, &b->_b_waiters, v, 0);

	/* Wait for <limit> threads to get to the barrier */
	if (++b->_b_count == limit) {
		a_store(&b->_b_count, 0);
		ret = PTHREAD_BARRIER_SERIAL_THREAD;
		if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
	} else {
		a_store(&b->_b_lock, 0);
		if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
		while ((v=b->_b_count)>0)
			__wait(&b->_b_count, &b->_b_waiters2, v, 0);
	}

	__vm_lock_impl(+1);

	/* Ensure all threads have a vm lock before proceeding */
	if (a_fetch_add(&b->_b_count, -1)==1-limit) {
		a_store(&b->_b_count, 0);
		if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
	} else {
		while ((v=b->_b_count))
			__wait(&b->_b_count, &b->_b_waiters2, v, 0);
	}
	
	/* Perform a recursive unlock suitable for self-sync'd destruction */
	do {
		v = b->_b_lock;
		w = b->_b_waiters;
	} while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);

	/* Wake a thread waiting to reuse or destroy the barrier */
	if (v==INT_MIN+1 || (v==1 && w))
		__wake(&b->_b_lock, 1, 0);

	__vm_unlock_impl();

	return ret;
}
Exemplo n.º 5
0
int pthread_cond_broadcast(pthread_cond_t *c)
{
	pthread_mutex_t *m;

	if (!c->_c_waiters) return 0;

	a_inc(&c->_c_seq);

	/* If cond var is process-shared, simply wake all waiters. */
	if (c->_c_mutex == (void *)-1) {
		__wake(&c->_c_seq, -1, 0);
		return 0;
	}

	/* Block waiters from returning so we can use the mutex. */
	while (a_swap(&c->_c_lock, 1))
		__wait(&c->_c_lock, &c->_c_lockwait, 1, 1);
	if (!c->_c_waiters)
		goto out;
	m = c->_c_mutex;

	/* Move waiter count to the mutex */
	a_fetch_add(&m->_m_waiters, c->_c_waiters2);
	c->_c_waiters2 = 0;

	/* Perform the futex requeue, waking one waiter unless we know
	 * that the calling thread holds the mutex. */
	__syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE,
		!m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid,
		INT_MAX, &m->_m_lock);

out:
	a_store(&c->_c_lock, 0);
	if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0);

	return 0;
}
Exemplo n.º 6
0
void __vm_unlock(void)
{
    int inc = vmlock[0]>0 ? -1 : 1;
    if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
        __wake(vmlock, -1, 1);
}
Exemplo n.º 7
0
void __vm_unlock() {
  if (a_fetch_add(vmlock, -1) == 1 && vmlock[1])
    __wake(vmlock, -1, 1);
}
Exemplo n.º 8
0
_Noreturn void pthread_exit(void *result)
{
	pthread_t self = pthread_self();
	sigset_t set;

	self->result = result;

	while (self->cancelbuf) {
		void (*f)(void *) = self->cancelbuf->__f;
		void *x = self->cancelbuf->__x;
		self->cancelbuf = self->cancelbuf->__next;
		f(x);
	}

	__pthread_tsd_run_dtors();

	__lock(self->exitlock);

	/* Mark this thread dead before decrementing count */
	__lock(self->killlock);
	self->dead = 1;

	/* Block all signals before decrementing the live thread count.
	 * This is important to ensure that dynamically allocated TLS
	 * is not under-allocated/over-committed, and possibly for other
	 * reasons as well. */
	__block_all_sigs(&set);

	/* Wait to unlock the kill lock, which governs functions like
	 * pthread_kill which target a thread id, until signals have
	 * been blocked. This precludes observation of the thread id
	 * as a live thread (with application code running in it) after
	 * the thread was reported dead by ESRCH being returned. */
	__unlock(self->killlock);

	/* It's impossible to determine whether this is "the last thread"
	 * until performing the atomic decrement, since multiple threads
	 * could exit at the same time. For the last thread, revert the
	 * decrement and unblock signals to give the atexit handlers and
	 * stdio cleanup code a consistent state. */
	if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
		libc.threads_minus_1 = 0;
		__restore_sigs(&set);
		exit(0);
	}

	if (self->detached && self->map_base) {
		/* Detached threads must avoid the kernel clear_child_tid
		 * feature, since the virtual address will have been
		 * unmapped and possibly already reused by a new mapping
		 * at the time the kernel would perform the write. In
		 * the case of threads that started out detached, the
		 * initial clone flags are correct, but if the thread was
		 * detached later (== 2), we need to clear it here. */
		if (self->detached == 2) __syscall(SYS_set_tid_address, 0);

		/* The following call unmaps the thread's stack mapping
		 * and then exits without touching the stack. */
		__unmapself(self->map_base, self->map_size);
	}

	for (;;) __syscall(SYS_exit, 0);
}