Esempio n. 1
0
int pthread_cond_broadcast(pthread_cond_t *cv)
{
	int key = irq_lock();

	while (!sys_dlist_is_empty(&cv->wait_q)) {
		ready_one_thread(&cv->wait_q);
	}

	swap_or_unlock(key);

	return 0;
}
Esempio n. 2
0
void k_mem_pool_free(struct k_mem_block *block)
{
	int i, key, need_sched = 0;
	struct k_mem_pool *p = get_pool(block->id.pool);
	size_t lsizes[p->n_levels];

	/* As in k_mem_pool_alloc(), we build a table of level sizes
	 * to avoid having to store it in precious RAM bytes.
	 * Overhead here is somewhat higher because free_block()
	 * doesn't inherently need to traverse all the larger
	 * sublevels.
	 */
	lsizes[0] = _ALIGN4(p->max_sz);
	for (i = 1; i <= block->id.level; i++) {
		lsizes[i] = _ALIGN4(lsizes[i-1] / 4);
	}

	free_block(get_pool(block->id.pool), block->id.level,
		   lsizes, block->id.block);

	/* Wake up anyone blocked on this pool and let them repeat
	 * their allocation attempts
	 */
	key = irq_lock();

	while (!sys_dlist_is_empty(&p->wait_q)) {
		struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);

		_unpend_thread(th);
		_abort_thread_timeout(th);
		_ready_thread(th);
		need_sched = 1;
	}

	if (need_sched && !_is_in_isr()) {
		_reschedule_threads(key);
	} else {
		irq_unlock(key);
	}
}
Esempio n. 3
0
int pthread_barrier_wait(pthread_barrier_t *b)
{
	int key = irq_lock();

	b->count++;

	if (b->count >= b->max) {
		b->count = 0;

		while (!sys_dlist_is_empty(&b->wait_q)) {
			ready_one_thread(&b->wait_q);
		}

		if (!__must_switch_threads()) {
			irq_unlock(key);
			return 0;
		}
	} else {
		_pend_current_thread(&b->wait_q, K_FOREVER);
	}

	return _Swap(key);
}
Esempio n. 4
0
static bool level_empty(struct k_mem_pool *p, int l)
{
	return sys_dlist_is_empty(&p->levels[l].free_list);
}