示例#1
0
/*
 * Destroy a condition variable
 */
int lthread_cond_destroy(struct lthread_cond *c)
{
	if (c == NULL) {
		DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* try to free it */
	if (_lthread_queue_destroy(c->blocked) < 0) {
		/* queue in use */
		DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EBUSY));
		return POSIX_ERRNO(EBUSY);
	}

	/* okay free it */
	_lthread_objcache_free(c->root_sched->cond_cache, c);
	DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, 0);
	return 0;
}
示例#2
0
/*
 * Destroy a mutex
 */
int lthread_mutex_destroy(struct lthread_mutex *m)
{
	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	if (m->owner == NULL) {
		/* try to delete the blocked queue */
		if (_lthread_queue_destroy(m->blocked) < 0) {
			DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
					m, POSIX_ERRNO(EBUSY));
			return POSIX_ERRNO(EBUSY);
		}

		/* free the mutex to cache */
		_lthread_objcache_free(m->root_sched->mutex_cache, m);
		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
		return 0;
	}
	/* can't do its still in use */
	DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
	return POSIX_ERRNO(EBUSY);
}
示例#3
0
static int
_lthread_sched_alloc_resources(struct lthread_sched *new_sched)
{
	int alloc_status;

	do {
		/* Initialize per scheduler queue node pool */
		alloc_status = SCHED_ALLOC_QNODE_POOL;
		new_sched->qnode_pool =
			_qnode_pool_create("qnode pool", LTHREAD_PREALLOC);
		if (new_sched->qnode_pool == NULL)
			break;

		/* Initialize per scheduler local ready queue */
		alloc_status = SCHED_ALLOC_READY_QUEUE;
		new_sched->ready = _lthread_queue_create("ready queue");
		if (new_sched->ready == NULL)
			break;

		/* Initialize per scheduler local peer ready queue */
		alloc_status = SCHED_ALLOC_PREADY_QUEUE;
		new_sched->pready = _lthread_queue_create("pready queue");
		if (new_sched->pready == NULL)
			break;

		/* Initialize per scheduler local free lthread cache */
		alloc_status = SCHED_ALLOC_LTHREAD_CACHE;
		new_sched->lthread_cache =
			_lthread_objcache_create("lthread cache",
						sizeof(struct lthread),
						LTHREAD_PREALLOC);
		if (new_sched->lthread_cache == NULL)
			break;

		/* Initialize per scheduler local free stack cache */
		alloc_status = SCHED_ALLOC_STACK_CACHE;
		new_sched->stack_cache =
			_lthread_objcache_create("stack_cache",
						sizeof(struct lthread_stack),
						LTHREAD_PREALLOC);
		if (new_sched->stack_cache == NULL)
			break;

		/* Initialize per scheduler local free per lthread data cache */
		alloc_status = SCHED_ALLOC_PERLT_CACHE;
		new_sched->per_lthread_cache =
			_lthread_objcache_create("per_lt cache",
						RTE_PER_LTHREAD_SECTION_SIZE,
						LTHREAD_PREALLOC);
		if (new_sched->per_lthread_cache == NULL)
			break;

		/* Initialize per scheduler local free tls cache */
		alloc_status = SCHED_ALLOC_TLS_CACHE;
		new_sched->tls_cache =
			_lthread_objcache_create("TLS cache",
						sizeof(struct lthread_tls),
						LTHREAD_PREALLOC);
		if (new_sched->tls_cache == NULL)
			break;

		/* Initialize per scheduler local free cond var cache */
		alloc_status = SCHED_ALLOC_COND_CACHE;
		new_sched->cond_cache =
			_lthread_objcache_create("cond cache",
						sizeof(struct lthread_cond),
						LTHREAD_PREALLOC);
		if (new_sched->cond_cache == NULL)
			break;

		/* Initialize per scheduler local free mutex cache */
		alloc_status = SCHED_ALLOC_MUTEX_CACHE;
		new_sched->mutex_cache =
			_lthread_objcache_create("mutex cache",
						sizeof(struct lthread_mutex),
						LTHREAD_PREALLOC);
		if (new_sched->mutex_cache == NULL)
			break;

		alloc_status = SCHED_ALLOC_OK;
	} while (0);

	/* roll back on any failure */
	switch (alloc_status) {
	case SCHED_ALLOC_MUTEX_CACHE:
		_lthread_objcache_destroy(new_sched->cond_cache);
		/* fall through */
	case SCHED_ALLOC_COND_CACHE:
		_lthread_objcache_destroy(new_sched->tls_cache);
		/* fall through */
	case SCHED_ALLOC_TLS_CACHE:
		_lthread_objcache_destroy(new_sched->per_lthread_cache);
		/* fall through */
	case SCHED_ALLOC_PERLT_CACHE:
		_lthread_objcache_destroy(new_sched->stack_cache);
		/* fall through */
	case SCHED_ALLOC_STACK_CACHE:
		_lthread_objcache_destroy(new_sched->lthread_cache);
		/* fall through */
	case SCHED_ALLOC_LTHREAD_CACHE:
		_lthread_queue_destroy(new_sched->pready);
		/* fall through */
	case SCHED_ALLOC_PREADY_QUEUE:
		_lthread_queue_destroy(new_sched->ready);
		/* fall through */
	case SCHED_ALLOC_READY_QUEUE:
		_qnode_pool_destroy(new_sched->qnode_pool);
		/* fall through */
	case SCHED_ALLOC_QNODE_POOL:
		/* fall through */
	case SCHED_ALLOC_OK:
		break;
	}
	return alloc_status;
}