示例#1
0
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
		     size_t size, s32_t timeout)
{
	int ret, key;
	s64_t end = 0;

	__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");

	if (timeout > 0) {
		end = _tick_get() + _ms_to_ticks(timeout);
	}

	while (1) {
		ret = pool_alloc(p, block, size);

		if (ret == 0 || timeout == K_NO_WAIT ||
		    ret == -EAGAIN || (ret && ret != -ENOMEM)) {
			return ret;
		}

		key = irq_lock();
		_pend_current_thread(&p->wait_q, timeout);
		_Swap(key);

		if (timeout != K_FOREVER) {
			timeout = end - _tick_get();

			if (timeout < 0) {
				break;
			}
		}
	}

	return -EAGAIN;
}
示例#2
0
文件: timer.c 项目: sunkaizhu/zephyr
uint32_t k_timer_status_sync(struct k_timer *timer)
{
	__ASSERT(!_is_in_isr(), "");

	unsigned int key = irq_lock();
	uint32_t result = timer->status;

	if (result == 0) {
		if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
			/* wait for timer to expire or stop */
			_pend_current_thread(&timer->wait_q, K_FOREVER);
			_Swap(key);

			/* get updated timer status */
			key = irq_lock();
			result = timer->status;
		} else {
			/* timer is already stopped */
		}
	} else {
		/* timer has already expired at least once */
	}

	timer->status = 0;
	irq_unlock(key);

	return result;
}
示例#3
0
int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs > 0) {
		/* take first available message from queue */
		memcpy(data, q->read_ptr, q->msg_size);
		q->read_ptr += q->msg_size;
		if (q->read_ptr == q->buffer_end) {
			q->read_ptr = q->buffer_start;
		}
		q->used_msgs--;

		/* handle first thread waiting to write (if any) */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* add thread's message to queue */
			memcpy(q->write_ptr, pending_thread->base.swap_data,
			       q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;

			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for a message to become available */
		result = -ENOMSG;
	} else {
		/* wait for get message success or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
示例#4
0
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs < q->max_msgs) {
		/* message queue isn't full */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* give message to waiting thread */
			memcpy(pending_thread->base.swap_data, data,
			       q->msg_size);
			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		} else {
			/* put message in queue */
			memcpy(q->write_ptr, data, q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for message space to become available */
		result = -ENOMSG;
	} else {
		/* wait for put message success, failure, or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
示例#5
0
文件: queue.c 项目: bboozzoo/zephyr
void *k_queue_get(struct k_queue *queue, s32_t timeout)
{
	unsigned int key;
	void *data;

	key = irq_lock();

	if (likely(!sys_slist_is_empty(&queue->data_q))) {
		data = sys_slist_get_not_empty(&queue->data_q);
		irq_unlock(key);
		return data;
	}

	if (timeout == K_NO_WAIT) {
		irq_unlock(key);
		return NULL;
	}

	_pend_current_thread(&queue->wait_q, timeout);

	return _Swap(key) ? NULL : _current->base.swap_data;
}
示例#6
0
文件: pthread.c 项目: rsalveti/zephyr
int pthread_barrier_wait(pthread_barrier_t *b)
{
	int key = irq_lock();

	b->count++;

	if (b->count >= b->max) {
		b->count = 0;

		while (!sys_dlist_is_empty(&b->wait_q)) {
			ready_one_thread(&b->wait_q);
		}

		if (!__must_switch_threads()) {
			irq_unlock(key);
			return 0;
		}
	} else {
		_pend_current_thread(&b->wait_q, K_FOREVER);
	}

	return _Swap(key);
}
示例#7
0
文件: pthread.c 项目: rsalveti/zephyr
static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
{
	__ASSERT(mut->sem->count == 0, "");

	int ret, key = irq_lock();

	mut->sem->count = 1;
	ready_one_thread(&mut->sem->wait_q);
	_pend_current_thread(&cv->wait_q, timeout);

	ret = _Swap(key);

	/* FIXME: this extra lock (and the potential context switch it
	 * can cause) could be optimized out.  At the point of the
	 * signal/broadcast, it's possible to detect whether or not we
	 * will be swapping back to this particular thread and lock it
	 * (i.e. leave the lock variable unchanged) on our behalf.
	 * But that requires putting scheduler intelligence into this
	 * higher level abstraction and is probably not worth it.
	 */
	pthread_mutex_lock(mut);

	return ret == -EAGAIN ? -ETIMEDOUT : ret;
}
示例#8
0
文件: poll.c 项目: bboozzoo/zephyr
int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
{
	__ASSERT(!_is_in_isr(), "");
	__ASSERT(events, "NULL events\n");
	__ASSERT(num_events > 0, "zero events\n");

	int last_registered = -1, in_use = 0, rc;
	unsigned int key;

	key = irq_lock();
	set_polling_state(_current);
	irq_unlock(key);

	/*
	 * We can get by with one poller structure for all events for now:
	 * if/when we allow multiple threads to poll on the same object, we
	 * will need one per poll event associated with an object.
	 */
	struct _poller poller = { .thread = _current };

	/* find events whose condition is already fulfilled */
	for (int ii = 0; ii < num_events; ii++) {
		u32_t state;

		key = irq_lock();
		if (is_condition_met(&events[ii], &state)) {
			set_event_ready(&events[ii], state);
			clear_polling_state(_current);
		} else if (timeout != K_NO_WAIT && is_polling() && !in_use) {
			rc = register_event(&events[ii]);
			if (rc == 0) {
				events[ii].poller = &poller;
				++last_registered;
			} else if (rc == -EADDRINUSE) {
				/* setting in_use also prevents any further
				 * registrations by the current thread
				 */
				in_use = -EADDRINUSE;
				events[ii].state = K_POLL_STATE_EADDRINUSE;
				clear_polling_state(_current);
			} else {
				__ASSERT(0, "unexpected return code\n");
			}
		}
		irq_unlock(key);
	}

	key = irq_lock();

	/*
	 * If we're not polling anymore, it means that at least one event
	 * condition is met, either when looping through the events here or
	 * because one of the events registered has had its state changed, or
	 * that one of the objects we wanted to poll on already had a thread
	 * polling on it. We can remove all registrations and return either
	 * success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
	 * the events that were available are still flagged as such, and it is
	 * valid for the caller to consider them available, as if this function
	 * returned success.
	 */
	if (!is_polling()) {
		clear_event_registrations(events, last_registered, key);
		irq_unlock(key);
		return in_use;
	}

	clear_polling_state(_current);

	if (timeout == K_NO_WAIT) {
		irq_unlock(key);
		return -EAGAIN;
	}

	_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);

	_pend_current_thread(&wait_q, timeout);

	int swap_rc = _Swap(key);

	/*
	 * Clear all event registrations. If events happen while we're in this
	 * loop, and we already had one that triggered, that's OK: they will
	 * end up in the list of events that are ready; if we timed out, and
	 * events happen while we're in this loop, that is OK as well since
	 * we've already know the return code (-EAGAIN), and even if they are
	 * added to the list of events that occurred, the user has to check the
	 * return code first, which invalidates the whole list of event states.
	 */
	key = irq_lock();
	clear_event_registrations(events, last_registered, key);
	irq_unlock(key);

	return swap_rc;
}
示例#9
0
文件: pipes.c 项目: rsalveti/zephyr
int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
	       size_t *bytes_read, size_t min_xfer, s32_t timeout)
{
	struct k_thread    *writer;
	struct k_pipe_desc *desc;
	sys_dlist_t    xfer_list;
	unsigned int   key;
	size_t         num_bytes_read = 0;
	size_t         bytes_copied;

	__ASSERT(min_xfer <= bytes_to_read, "");
	__ASSERT(bytes_read != NULL, "");

	key = irq_lock();

	/*
	 * Create a list of "working readers" into which the data will be
	 * directly copied.
	 */

	if (!_pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
				pipe->bytes_used, bytes_to_read,
				min_xfer, timeout)) {
		irq_unlock(key);
		*bytes_read = 0;
		return -EIO;
	}

	_sched_lock();
	irq_unlock(key);

	num_bytes_read = _pipe_buffer_get(pipe, data, bytes_to_read);

	/*
	 * 1. 'xfer_list' currently contains a list of writer threads that can
	 *     have their write requests fulfilled by the current call.
	 * 2. 'writer' if not NULL points to a thread on the writer wait_q
	 *    that can post some of its requested data.
	 * 3. Data will be copied from each writer's buffer to either the
	 *    reader's buffer and/or to the pipe's circular buffer.
	 * 4. Interrupts are unlocked but the scheduler is locked to allow
	 *    ticks to be delivered but no scheduling to occur
	 * 5. If 'writer' times out while we are copying data, not only do we
	 *    still have a pointer to it, but it can not execute until this
	 *    call is complete so it is still safe to copy data from it.
	 */

	struct k_thread *thread = (struct k_thread *)
				  sys_dlist_get(&xfer_list);
	while (thread && (num_bytes_read < bytes_to_read)) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_xfer(data + num_bytes_read,
					  bytes_to_read - num_bytes_read,
					  desc->buffer, desc->bytes_to_xfer);

		num_bytes_read       += bytes_copied;
		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;

		/*
		 * It is expected that the write request will be satisfied.
		 * However, if the read request was satisfied before the
		 * write request was satisfied, then the write request must
		 * finish later when writing to the pipe's circular buffer.
		 */
		if (num_bytes_read == bytes_to_read) {
			break;
		}
		_pipe_thread_ready(thread);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	if (writer && (num_bytes_read < bytes_to_read)) {
		desc = (struct k_pipe_desc *)writer->base.swap_data;
		bytes_copied = _pipe_xfer(data + num_bytes_read,
					  bytes_to_read - num_bytes_read,
					  desc->buffer, desc->bytes_to_xfer);

		num_bytes_read       += bytes_copied;
		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;
	}

	/*
	 * Copy as much data as possible from the writers (if any)
	 * into the pipe's circular buffer.
	 */

	while (thread) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
						desc->bytes_to_xfer);

		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;

		/* Write request has been satsified */
		_pipe_thread_ready(thread);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	if (writer) {
		desc = (struct k_pipe_desc *)writer->base.swap_data;
		bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
						desc->bytes_to_xfer);

		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;
	}

	if (num_bytes_read == bytes_to_read) {
		k_sched_unlock();

		*bytes_read = num_bytes_read;

		return 0;
	}

	/* Not all data was read. */

	struct k_pipe_desc  pipe_desc;

	pipe_desc.buffer        = data + num_bytes_read;
	pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;

	if (timeout != K_NO_WAIT) {
		_current->base.swap_data = &pipe_desc;
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_current_thread(&pipe->wait_q.readers, timeout);
		_Swap(key);
	} else {
		k_sched_unlock();
	}

	*bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer;

	return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
				 bytes_to_read);
}
示例#10
0
文件: pipes.c 项目: rsalveti/zephyr
/**
 * @brief Internal API used to send data to a pipe
 */
int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
			 unsigned char *data, size_t bytes_to_write,
			 size_t *bytes_written, size_t min_xfer,
			 s32_t timeout)
{
	struct k_thread    *reader;
	struct k_pipe_desc *desc;
	sys_dlist_t    xfer_list;
	unsigned int   key;
	size_t         num_bytes_written = 0;
	size_t         bytes_copied;

#if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0)
	ARG_UNUSED(async_desc);
#endif

	key = irq_lock();

	/*
	 * Create a list of "working readers" into which the data will be
	 * directly copied.
	 */

	if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
				pipe->size - pipe->bytes_used, bytes_to_write,
				min_xfer, timeout)) {
		irq_unlock(key);
		*bytes_written = 0;
		return -EIO;
	}

	_sched_lock();
	irq_unlock(key);

	/*
	 * 1. 'xfer_list' currently contains a list of reader threads that can
	 * have their read requests fulfilled by the current call.
	 * 2. 'reader' if not NULL points to a thread on the reader wait_q
	 * that can get some of its requested data.
	 * 3. Interrupts are unlocked but the scheduler is locked to allow
	 * ticks to be delivered but no scheduling to occur
	 * 4. If 'reader' times out while we are copying data, not only do we
	 * still have a pointer to it, but it can not execute until this call
	 * is complete so it is still safe to copy data to it.
	 */

	struct k_thread *thread = (struct k_thread *)
				  sys_dlist_get(&xfer_list);
	while (thread) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
					  data + num_bytes_written,
					  bytes_to_write - num_bytes_written);

		num_bytes_written   += bytes_copied;
		desc->buffer        += bytes_copied;
		desc->bytes_to_xfer -= bytes_copied;

		/* The thread's read request has been satisfied. Ready it. */
		key = irq_lock();
		_ready_thread(thread);
		irq_unlock(key);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	/*
	 * Copy any data to the reader that we left on the wait_q.
	 * It is possible no data will be copied.
	 */
	if (reader) {
		desc = (struct k_pipe_desc *)reader->base.swap_data;
		bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
					  data + num_bytes_written,
					  bytes_to_write - num_bytes_written);

		num_bytes_written   += bytes_copied;
		desc->buffer        += bytes_copied;
		desc->bytes_to_xfer -= bytes_copied;
	}

	/*
	 * As much data as possible has been directly copied to any waiting
	 * readers. Add as much as possible to the pipe's circular buffer.
	 */

	num_bytes_written +=
		_pipe_buffer_put(pipe, data + num_bytes_written,
				 bytes_to_write - num_bytes_written);

	if (num_bytes_written == bytes_to_write) {
		*bytes_written = num_bytes_written;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
		if (async_desc != NULL) {
			_pipe_async_finish(async_desc);
		}
#endif
		k_sched_unlock();
		return 0;
	}

	/* Not all data was copied. */

#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
	if (async_desc != NULL) {
		/*
		 * Lock interrupts and unlock the scheduler before
		 * manipulating the writers wait_q.
		 */
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_thread((struct k_thread *) &async_desc->thread,
			     &pipe->wait_q.writers, K_FOREVER);
		_reschedule_threads(key);
		return 0;
	}
#endif

	struct k_pipe_desc  pipe_desc;

	pipe_desc.buffer         = data + num_bytes_written;
	pipe_desc.bytes_to_xfer  = bytes_to_write - num_bytes_written;

	if (timeout != K_NO_WAIT) {
		_current->base.swap_data = &pipe_desc;
		/*
		 * Lock interrupts and unlock the scheduler before
		 * manipulating the writers wait_q.
		 */
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_current_thread(&pipe->wait_q.writers, timeout);
		_Swap(key);
	} else {
		k_sched_unlock();
	}

	*bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer;

	return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
				 bytes_to_write);
}