Пример #1
0
/*
 * Create a condition variable
 */
int
lthread_cond_init(char *name, struct lthread_cond **cond,
		  __rte_unused const struct lthread_condattr *attr)
{
	struct lthread_cond *c;

	if (cond == NULL)
		return POSIX_ERRNO(EINVAL);

	/* allocate a condition variable from cache */
	c = _lthread_objcache_alloc((THIS_SCHED)->cond_cache);

	if (c == NULL)
		return POSIX_ERRNO(EAGAIN);

	c->blocked = _lthread_queue_create("blocked");
	if (c->blocked == NULL) {
		_lthread_objcache_free((THIS_SCHED)->cond_cache, (void *)c);
		return POSIX_ERRNO(EAGAIN);
	}

	if (name == NULL)
		strncpy(c->name, "no name", sizeof(c->name));
	else
		strncpy(c->name, name, sizeof(c->name));
	c->name[sizeof(c->name)-1] = 0;

	c->root_sched = THIS_SCHED;

	(*cond) = c;
	DIAG_CREATE_EVENT((*cond), LT_DIAG_COND_CREATE);
	return 0;
}
Пример #2
0
/*
 * Create a mutex
 */
int
lthread_mutex_init(char *name, struct lthread_mutex **mutex,
		   __rte_unused const struct lthread_mutexattr *attr)
{
	struct lthread_mutex *m;

	if (mutex == NULL)
		return POSIX_ERRNO(EINVAL);


	m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
	if (m == NULL)
		return POSIX_ERRNO(EAGAIN);

	m->blocked = _lthread_queue_create("blocked queue");
	if (m->blocked == NULL) {
		_lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
		return POSIX_ERRNO(EAGAIN);
	}

	if (name == NULL)
		strncpy(m->name, "no name", sizeof(m->name));
	else
		strncpy(m->name, name, sizeof(m->name));
	m->name[sizeof(m->name)-1] = 0;

	m->root_sched = THIS_SCHED;
	m->owner = NULL;

	rte_atomic64_init(&m->count);

	DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
	/* success */
	(*mutex) = m;
	return 0;
}
Пример #3
0
static int
_lthread_sched_alloc_resources(struct lthread_sched *new_sched)
{
	int alloc_status;

	do {
		/* Initialize per scheduler queue node pool */
		alloc_status = SCHED_ALLOC_QNODE_POOL;
		new_sched->qnode_pool =
			_qnode_pool_create("qnode pool", LTHREAD_PREALLOC);
		if (new_sched->qnode_pool == NULL)
			break;

		/* Initialize per scheduler local ready queue */
		alloc_status = SCHED_ALLOC_READY_QUEUE;
		new_sched->ready = _lthread_queue_create("ready queue");
		if (new_sched->ready == NULL)
			break;

		/* Initialize per scheduler local peer ready queue */
		alloc_status = SCHED_ALLOC_PREADY_QUEUE;
		new_sched->pready = _lthread_queue_create("pready queue");
		if (new_sched->pready == NULL)
			break;

		/* Initialize per scheduler local free lthread cache */
		alloc_status = SCHED_ALLOC_LTHREAD_CACHE;
		new_sched->lthread_cache =
			_lthread_objcache_create("lthread cache",
						sizeof(struct lthread),
						LTHREAD_PREALLOC);
		if (new_sched->lthread_cache == NULL)
			break;

		/* Initialize per scheduler local free stack cache */
		alloc_status = SCHED_ALLOC_STACK_CACHE;
		new_sched->stack_cache =
			_lthread_objcache_create("stack_cache",
						sizeof(struct lthread_stack),
						LTHREAD_PREALLOC);
		if (new_sched->stack_cache == NULL)
			break;

		/* Initialize per scheduler local free per lthread data cache */
		alloc_status = SCHED_ALLOC_PERLT_CACHE;
		new_sched->per_lthread_cache =
			_lthread_objcache_create("per_lt cache",
						RTE_PER_LTHREAD_SECTION_SIZE,
						LTHREAD_PREALLOC);
		if (new_sched->per_lthread_cache == NULL)
			break;

		/* Initialize per scheduler local free tls cache */
		alloc_status = SCHED_ALLOC_TLS_CACHE;
		new_sched->tls_cache =
			_lthread_objcache_create("TLS cache",
						sizeof(struct lthread_tls),
						LTHREAD_PREALLOC);
		if (new_sched->tls_cache == NULL)
			break;

		/* Initialize per scheduler local free cond var cache */
		alloc_status = SCHED_ALLOC_COND_CACHE;
		new_sched->cond_cache =
			_lthread_objcache_create("cond cache",
						sizeof(struct lthread_cond),
						LTHREAD_PREALLOC);
		if (new_sched->cond_cache == NULL)
			break;

		/* Initialize per scheduler local free mutex cache */
		alloc_status = SCHED_ALLOC_MUTEX_CACHE;
		new_sched->mutex_cache =
			_lthread_objcache_create("mutex cache",
						sizeof(struct lthread_mutex),
						LTHREAD_PREALLOC);
		if (new_sched->mutex_cache == NULL)
			break;

		alloc_status = SCHED_ALLOC_OK;
	} while (0);

	/* roll back on any failure */
	switch (alloc_status) {
	case SCHED_ALLOC_MUTEX_CACHE:
		_lthread_objcache_destroy(new_sched->cond_cache);
		/* fall through */
	case SCHED_ALLOC_COND_CACHE:
		_lthread_objcache_destroy(new_sched->tls_cache);
		/* fall through */
	case SCHED_ALLOC_TLS_CACHE:
		_lthread_objcache_destroy(new_sched->per_lthread_cache);
		/* fall through */
	case SCHED_ALLOC_PERLT_CACHE:
		_lthread_objcache_destroy(new_sched->stack_cache);
		/* fall through */
	case SCHED_ALLOC_STACK_CACHE:
		_lthread_objcache_destroy(new_sched->lthread_cache);
		/* fall through */
	case SCHED_ALLOC_LTHREAD_CACHE:
		_lthread_queue_destroy(new_sched->pready);
		/* fall through */
	case SCHED_ALLOC_PREADY_QUEUE:
		_lthread_queue_destroy(new_sched->ready);
		/* fall through */
	case SCHED_ALLOC_READY_QUEUE:
		_qnode_pool_destroy(new_sched->qnode_pool);
		/* fall through */
	case SCHED_ALLOC_QNODE_POOL:
		/* fall through */
	case SCHED_ALLOC_OK:
		break;
	}
	return alloc_status;
}