Esempio n. 1
0
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
		     size_t size, s32_t timeout)
{
	int ret, key;
	s64_t end = 0;

	__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");

	if (timeout > 0) {
		end = _tick_get() + _ms_to_ticks(timeout);
	}

	while (1) {
		ret = pool_alloc(p, block, size);

		if (ret == 0 || timeout == K_NO_WAIT ||
		    ret == -EAGAIN || (ret && ret != -ENOMEM)) {
			return ret;
		}

		key = irq_lock();
		_pend_current_thread(&p->wait_q, timeout);
		_Swap(key);

		if (timeout != K_FOREVER) {
			timeout = end - _tick_get();

			if (timeout < 0) {
				break;
			}
		}
	}

	return -EAGAIN;
}
Esempio n. 2
0
void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period)
{
	__ASSERT(duration >= 0 && period >= 0 &&
		 (duration != 0 || period != 0), "invalid parameters\n");

	volatile int32_t period_in_ticks, duration_in_ticks;

	period_in_ticks = _ms_to_ticks(period);
	duration_in_ticks = _TICK_ALIGN + _ms_to_ticks(duration);

	unsigned int key = irq_lock();

	if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
		_abort_timeout(&timer->timeout);
	}

	timer->period = period_in_ticks;
	_add_timeout(NULL, &timer->timeout, &timer->wait_q, duration_in_ticks);
	timer->status = 0;
	irq_unlock(key);
}
Esempio n. 3
0
static void schedule_new_thread(struct k_thread *thread, s32_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
	if (delay == 0) {
		start_thread(thread);
	} else {
		s32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay);
		int key = irq_lock();

		_add_thread_timeout(thread, NULL, ticks);
		irq_unlock(key);
	}
#else
	ARG_UNUSED(delay);
	start_thread(thread);
#endif
}
Esempio n. 4
0
static int _SemMux_Take(int type, void * priv, unsigned long max_delay_ms){

	struct MUX_t * mux;
	struct SEM_t * sem;

	bool r = pdTRUE;

	uint8_t CurrentTaskId = rtos_get_current_task();

	switch (type) {

	case BINARY_MUTEX:
		mux = (struct MUX_t *) priv;

		if(max_delay_ms){
			r = rtos_mutex_lock_delay(mux->muxid, _ms_to_ticks(max_delay_ms));
		}else{
			r = rtos_mutex_try_lock(mux->muxid);
		}

		break;

	case RECURSIVE_MUTEX:
		mux = (struct MUX_t *) priv;

		if (rtos_get_mutex_holder(mux->muxid) == CurrentTaskId) {
			mux->TskHoldCnt++;
		} else {
			if (max_delay_ms) {
				r = rtos_mutex_lock_delay(mux->muxid,
						_ms_to_ticks(max_delay_ms));
			} else {
				r = rtos_mutex_try_lock(mux->muxid);
			}
			if (r) {
				/*
				 * Holder let go of the mutex between the initial check and
				 * the actual lock attempt, or there was no holder.
				 * Either way, this task now owns it.
				 */
				assert(mux->TskHoldCnt == 0);
				mux->TskHoldCnt++;
			}
		}

		break;

	case SEMAPHORE:
	case COUNTING_SEMAPHORE:
		sem = (struct SEM_t *)priv;

		if(max_delay_ms){
			r = rtos_sem_wait_delay(sem->semid, _ms_to_ticks(max_delay_ms));
		}else{
			r = rtos_sem_try_wait(sem->semid);
		}

		break;

	default:
		r = pdFALSE;
		break;
	}

	return r;
}