Exemple #1
0
/*
 * Broadcast a condition variable
 */
int lthread_cond_broadcast(struct lthread_cond *c)
{
	struct lthread *lt;

	if (c == NULL) {
		DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0);
	do {
		/* drain the queue waking everybody */
		lt = _lthread_queue_remove(c->blocked);

		if (lt != NULL) {
			DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, lt);
			/* wake up */
			_ready_queue_insert((struct lthread_sched *)lt->sched,
					    lt);
		}
	} while (!_lthread_queue_empty(c->blocked));
	_reschedule();
	DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0);
	return 0;
}
Exemple #2
0
/*
 * Unlock a mutex
 */
int lthread_mutex_unlock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;
	struct lthread *unblocked;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* fail if its owned */
	if (m->owner != lt || m->owner == NULL) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
		return POSIX_ERRNO(EPERM);
	}

	rte_atomic64_dec(&m->count);
	/* if there are blocked threads then make one ready */
	while (rte_atomic64_read(&m->count) > 0) {
		unblocked = _lthread_queue_remove(m->blocked);

		if (unblocked != NULL) {
			rte_atomic64_dec(&m->count);
			DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
			RTE_ASSERT(unblocked->sched != NULL);
			_ready_queue_insert((struct lthread_sched *)
					    unblocked->sched, unblocked);
			break;
		}
	}
	/* release the lock */
	m->owner = NULL;
	return 0;
}
Exemple #3
0
/* try to lock a mutex but don't block */
int lthread_mutex_trylock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	if (m->owner == lt) {
		/* no recursion */
		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
		return POSIX_ERRNO(EDEADLK);
	}

	rte_atomic64_inc(&m->count);
	if (rte_atomic64_cmpset
	    ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
		/* got the lock */
		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
		return 0;
	}

	/* failed so return busy */
	rte_atomic64_dec(&m->count);
	DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
	return POSIX_ERRNO(EBUSY);
}
Exemple #4
0
/*
 * Signal a condition variable
 * attempt to resume any blocked thread
 */
int lthread_cond_signal(struct lthread_cond *c)
{
	struct lthread *lt;

	if (c == NULL) {
		DIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	lt = _lthread_queue_remove(c->blocked);

	if (lt != NULL) {
		/* okay wake up this thread */
		DIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, lt);
		_ready_queue_insert((struct lthread_sched *)lt->sched, lt);
	}
	return 0;
}
Exemple #5
0
/*
 * Try to obtain a mutex
 */
int lthread_mutex_lock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* allow no recursion */
	if (m->owner == lt) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
		return POSIX_ERRNO(EDEADLK);
	}

	for (;;) {
		rte_atomic64_inc(&m->count);
		do {
			if (rte_atomic64_cmpset
			    ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
				/* happy days, we got the lock */
				DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
				return 0;
			}
			/* spin due to race with unlock when
			* nothing was blocked
			*/
		} while ((rte_atomic64_read(&m->count) == 1) &&
				(m->owner == NULL));

		/* queue the current thread in the blocked queue
		 * we defer this to after we return to the scheduler
		 * to ensure that the current thread context is saved
		 * before unlock could result in it being dequeued and
		 * resumed
		 */
		DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
		lt->pending_wr_queue = m->blocked;
		/* now relinquish cpu */
		_suspend();
		/* resumed, must loop and compete for the lock again */
	}
	return 0;
}
Exemple #6
0
/*
 * Destroy a condition variable
 */
int lthread_cond_destroy(struct lthread_cond *c)
{
	if (c == NULL) {
		DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* try to free it */
	if (_lthread_queue_destroy(c->blocked) < 0) {
		/* queue in use */
		DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EBUSY));
		return POSIX_ERRNO(EBUSY);
	}

	/* okay free it */
	_lthread_objcache_free(c->root_sched->cond_cache, c);
	DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, 0);
	return 0;
}
Exemple #7
0
/*
 * Create a scheduler on the current lcore
 */
struct lthread_sched *_lthread_sched_create(size_t stack_size)
{
	int status;
	struct lthread_sched *new_sched;
	unsigned lcoreid = rte_lcore_id();

	RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);

	if (stack_size == 0)
		stack_size = LTHREAD_MAX_STACK_SIZE;

	new_sched =
	     rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched),
				RTE_CACHE_LINE_SIZE,
				rte_socket_id());
	if (new_sched == NULL) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate memory for scheduler\n");
		return NULL;
	}

	_lthread_key_pool_init();

	new_sched->stack_size = stack_size;
	new_sched->birth = rte_rdtsc();
	THIS_SCHED = new_sched;

	status = _lthread_sched_alloc_resources(new_sched);
	if (status != SCHED_ALLOC_OK) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate resources for scheduler code = %d\n",
			status);
		rte_free(new_sched);
		return NULL;
	}

	bzero(&new_sched->ctx, sizeof(struct ctx));

	new_sched->lcore_id = lcoreid;

	schedcore[lcoreid] = new_sched;

	new_sched->run_flag = 1;

	DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0);

	rte_wmb();
	return new_sched;
}
Exemple #8
0
/*
 * Wait on a condition variable
 */
int lthread_cond_wait(struct lthread_cond *c, __rte_unused uint64_t reserved)
{
	struct lthread *lt = THIS_LTHREAD;

	if (c == NULL) {
		DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}


	DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, 0);

	/* queue the current thread in the blocked queue
	 * this will be written when we return to the scheduler
	 * to ensure that the current thread context is saved
	 * before any signal could result in it being dequeued and
	 * resumed
	 */
	lt->pending_wr_queue = c->blocked;
	_suspend();

	/* the condition happened */
	return 0;
}
Exemple #9
0
/*
 * Destroy a mutex
 */
int lthread_mutex_destroy(struct lthread_mutex *m)
{
	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	if (m->owner == NULL) {
		/* try to delete the blocked queue */
		if (_lthread_queue_destroy(m->blocked) < 0) {
			DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
					m, POSIX_ERRNO(EBUSY));
			return POSIX_ERRNO(EBUSY);
		}

		/* free the mutex to cache */
		_lthread_objcache_free(m->root_sched->mutex_cache, m);
		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
		return 0;
	}
	/* can't do its still in use */
	DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
	return POSIX_ERRNO(EBUSY);
}
Exemple #10
0
/*
 * Handle sleep timer expiry
*/
void
_sched_timer_cb(struct rte_timer *tim, void *arg)
{
	struct lthread *lt = (struct lthread *) arg;
	uint64_t state = lt->state;

	DIAG_EVENT(lt, LT_DIAG_LTHREAD_TMR_EXPIRED, &lt->tim, 0);

	rte_timer_stop(tim);

	if (lt->state & BIT(ST_LT_CANCELLED))
		(THIS_SCHED)->nb_blocked_threads--;

	lt->state = state | BIT(ST_LT_EXPIRED);
	_lthread_resume(lt);
	lt->state = state & CLEARBIT(ST_LT_EXPIRED);
}
Exemple #11
0
/*
 * migrate the current thread to another scheduler running
 * on the specified lcore.
 */
int lthread_set_affinity(unsigned lcoreid)
{
	struct lthread *lt = THIS_LTHREAD;
	struct lthread_sched *dest_sched;

	if (unlikely(lcoreid >= LTHREAD_MAX_LCORES))
		return POSIX_ERRNO(EINVAL);

	DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);

	dest_sched = schedcore[lcoreid];

	if (unlikely(dest_sched == NULL))
		return POSIX_ERRNO(EINVAL);

	if (likely(dest_sched != THIS_SCHED)) {
		lt->sched = dest_sched;
		lt->pending_wr_queue = dest_sched->pready;
		_affinitize();
		return 0;
	}
	return 0;
}
Exemple #12
0
static inline void _lthread_resume(struct lthread *lt)
{
	struct lthread_sched *sched = THIS_SCHED;
	struct lthread_stack *s;
	uint64_t state = lt->state;
#if LTHREAD_DIAG
	int init = 0;
#endif

	sched->current_lthread = lt;

	if (state & (BIT(ST_LT_CANCELLED) | BIT(ST_LT_EXITED))) {
		/* if detached we can free the thread now */
		if (state & BIT(ST_LT_DETACH)) {
			_lthread_free(lt);
			sched->current_lthread = NULL;
			return;
		}
	}

	if (state & BIT(ST_LT_INIT)) {
		/* first time this thread has been run */
		/* assign thread to this scheduler */
		lt->sched = THIS_SCHED;

		/* allocate stack */
		s = _stack_alloc();

		lt->stack_container = s;
		_lthread_set_stack(lt, s->stack, s->stack_size);

		/* allocate memory for TLS used by this thread */
		_lthread_tls_alloc(lt);

		lt->state = BIT(ST_LT_READY);
#if LTHREAD_DIAG
		init = 1;
#endif
	}

	DIAG_EVENT(lt, LT_DIAG_LTHREAD_RESUMED, init, lt);

	/* switch to the new thread */
	ctx_switch(&lt->ctx, &sched->ctx);

	/* If posting to a queue that could be read by another lcore
	 * we defer the queue write till now to ensure the context has been
	 * saved before the other core tries to resume it
	 * This applies to blocking on mutex, cond, and to set_affinity
	 */
	if (lt->pending_wr_queue != NULL) {
		struct lthread_queue *dest = lt->pending_wr_queue;

		lt->pending_wr_queue = NULL;

		/* queue the current thread to the specified queue */
		_lthread_queue_insert_mp(dest, lt);
	}

	sched->current_lthread = NULL;
}