示例#1
0
文件: thread.c 项目: bboozzoo/zephyr
void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *))
{
	unsigned int  key;

	__ASSERT(!_is_in_isr(), "");

	_sched_lock();

	/* Invoke func() on each static thread in the specified group set. */

	_FOREACH_STATIC_THREAD(thread_data) {
		if (is_in_any_group(thread_data, groups)) {
			key = irq_lock();
			func(thread_data->thread);
			irq_unlock(key);
		}
	}

	/*
	 * If the current thread is still in a ready state, then let the
	 * "unlock scheduler" code determine if any rescheduling is needed.
	 */
	if (_is_thread_ready(_current)) {
		k_sched_unlock();
		return;
	}

	/* The current thread is no longer in a ready state--reschedule. */
	key = irq_lock();
	_sched_unlock_no_reschedule();
	_Swap(key);
}
示例#2
0
文件: thread.c 项目: sunkaizhu/zephyr
void _init_static_threads(void)
{
	unsigned int  key;

	_FOREACH_STATIC_THREAD(thread_data) {
		_task_group_adjust(thread_data);
		_new_thread(
			thread_data->init_stack,
			thread_data->init_stack_size,
			thread_data->init_entry,
			thread_data->init_p1,
			thread_data->init_p2,
			thread_data->init_p3,
			thread_data->init_prio,
			thread_data->init_options);

		thread_data->thread->init_data = thread_data;
	}

	_sched_lock();
	/* Start all (legacy) threads that are part of the EXE task group */
	_k_thread_group_op(K_TASK_GROUP_EXE, _k_thread_single_start);

	/*
	 * Non-legacy static threads may be started immediately or after a
	 * previously specified delay. Even though the scheduler is locked,
	 * ticks can still be delivered and processed. Lock interrupts so
	 * that the countdown until execution begins from the same tick.
	 *
	 * Note that static threads defined using the legacy API have a
	 * delay of K_FOREVER.
	 */
	key = irq_lock();
	_FOREACH_STATIC_THREAD(thread_data) {
		if (thread_data->init_delay != K_FOREVER) {
			schedule_new_thread(thread_data->thread,
					    thread_data->init_delay);
		}
	}
	irq_unlock(key);
	k_sched_unlock();
}
示例#3
0
文件: pipes.c 项目: rsalveti/zephyr
int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
	       size_t *bytes_read, size_t min_xfer, s32_t timeout)
{
	struct k_thread    *writer;
	struct k_pipe_desc *desc;
	sys_dlist_t    xfer_list;
	unsigned int   key;
	size_t         num_bytes_read = 0;
	size_t         bytes_copied;

	__ASSERT(min_xfer <= bytes_to_read, "");
	__ASSERT(bytes_read != NULL, "");

	key = irq_lock();

	/*
	 * Create a list of "working readers" into which the data will be
	 * directly copied.
	 */

	if (!_pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
				pipe->bytes_used, bytes_to_read,
				min_xfer, timeout)) {
		irq_unlock(key);
		*bytes_read = 0;
		return -EIO;
	}

	_sched_lock();
	irq_unlock(key);

	num_bytes_read = _pipe_buffer_get(pipe, data, bytes_to_read);

	/*
	 * 1. 'xfer_list' currently contains a list of writer threads that can
	 *     have their write requests fulfilled by the current call.
	 * 2. 'writer' if not NULL points to a thread on the writer wait_q
	 *    that can post some of its requested data.
	 * 3. Data will be copied from each writer's buffer to either the
	 *    reader's buffer and/or to the pipe's circular buffer.
	 * 4. Interrupts are unlocked but the scheduler is locked to allow
	 *    ticks to be delivered but no scheduling to occur
	 * 5. If 'writer' times out while we are copying data, not only do we
	 *    still have a pointer to it, but it can not execute until this
	 *    call is complete so it is still safe to copy data from it.
	 */

	struct k_thread *thread = (struct k_thread *)
				  sys_dlist_get(&xfer_list);
	while (thread && (num_bytes_read < bytes_to_read)) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_xfer(data + num_bytes_read,
					  bytes_to_read - num_bytes_read,
					  desc->buffer, desc->bytes_to_xfer);

		num_bytes_read       += bytes_copied;
		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;

		/*
		 * It is expected that the write request will be satisfied.
		 * However, if the read request was satisfied before the
		 * write request was satisfied, then the write request must
		 * finish later when writing to the pipe's circular buffer.
		 */
		if (num_bytes_read == bytes_to_read) {
			break;
		}
		_pipe_thread_ready(thread);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	if (writer && (num_bytes_read < bytes_to_read)) {
		desc = (struct k_pipe_desc *)writer->base.swap_data;
		bytes_copied = _pipe_xfer(data + num_bytes_read,
					  bytes_to_read - num_bytes_read,
					  desc->buffer, desc->bytes_to_xfer);

		num_bytes_read       += bytes_copied;
		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;
	}

	/*
	 * Copy as much data as possible from the writers (if any)
	 * into the pipe's circular buffer.
	 */

	while (thread) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
						desc->bytes_to_xfer);

		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;

		/* Write request has been satsified */
		_pipe_thread_ready(thread);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	if (writer) {
		desc = (struct k_pipe_desc *)writer->base.swap_data;
		bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
						desc->bytes_to_xfer);

		desc->buffer         += bytes_copied;
		desc->bytes_to_xfer  -= bytes_copied;
	}

	if (num_bytes_read == bytes_to_read) {
		k_sched_unlock();

		*bytes_read = num_bytes_read;

		return 0;
	}

	/* Not all data was read. */

	struct k_pipe_desc  pipe_desc;

	pipe_desc.buffer        = data + num_bytes_read;
	pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;

	if (timeout != K_NO_WAIT) {
		_current->base.swap_data = &pipe_desc;
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_current_thread(&pipe->wait_q.readers, timeout);
		_Swap(key);
	} else {
		k_sched_unlock();
	}

	*bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer;

	return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
				 bytes_to_read);
}
示例#4
0
文件: pipes.c 项目: rsalveti/zephyr
/**
 * @brief Internal API used to send data to a pipe
 */
int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
			 unsigned char *data, size_t bytes_to_write,
			 size_t *bytes_written, size_t min_xfer,
			 s32_t timeout)
{
	struct k_thread    *reader;
	struct k_pipe_desc *desc;
	sys_dlist_t    xfer_list;
	unsigned int   key;
	size_t         num_bytes_written = 0;
	size_t         bytes_copied;

#if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0)
	ARG_UNUSED(async_desc);
#endif

	key = irq_lock();

	/*
	 * Create a list of "working readers" into which the data will be
	 * directly copied.
	 */

	if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
				pipe->size - pipe->bytes_used, bytes_to_write,
				min_xfer, timeout)) {
		irq_unlock(key);
		*bytes_written = 0;
		return -EIO;
	}

	_sched_lock();
	irq_unlock(key);

	/*
	 * 1. 'xfer_list' currently contains a list of reader threads that can
	 * have their read requests fulfilled by the current call.
	 * 2. 'reader' if not NULL points to a thread on the reader wait_q
	 * that can get some of its requested data.
	 * 3. Interrupts are unlocked but the scheduler is locked to allow
	 * ticks to be delivered but no scheduling to occur
	 * 4. If 'reader' times out while we are copying data, not only do we
	 * still have a pointer to it, but it can not execute until this call
	 * is complete so it is still safe to copy data to it.
	 */

	struct k_thread *thread = (struct k_thread *)
				  sys_dlist_get(&xfer_list);
	while (thread) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
					  data + num_bytes_written,
					  bytes_to_write - num_bytes_written);

		num_bytes_written   += bytes_copied;
		desc->buffer        += bytes_copied;
		desc->bytes_to_xfer -= bytes_copied;

		/* The thread's read request has been satisfied. Ready it. */
		key = irq_lock();
		_ready_thread(thread);
		irq_unlock(key);

		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
	}

	/*
	 * Copy any data to the reader that we left on the wait_q.
	 * It is possible no data will be copied.
	 */
	if (reader) {
		desc = (struct k_pipe_desc *)reader->base.swap_data;
		bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
					  data + num_bytes_written,
					  bytes_to_write - num_bytes_written);

		num_bytes_written   += bytes_copied;
		desc->buffer        += bytes_copied;
		desc->bytes_to_xfer -= bytes_copied;
	}

	/*
	 * As much data as possible has been directly copied to any waiting
	 * readers. Add as much as possible to the pipe's circular buffer.
	 */

	num_bytes_written +=
		_pipe_buffer_put(pipe, data + num_bytes_written,
				 bytes_to_write - num_bytes_written);

	if (num_bytes_written == bytes_to_write) {
		*bytes_written = num_bytes_written;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
		if (async_desc != NULL) {
			_pipe_async_finish(async_desc);
		}
#endif
		k_sched_unlock();
		return 0;
	}

	/* Not all data was copied. */

#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
	if (async_desc != NULL) {
		/*
		 * Lock interrupts and unlock the scheduler before
		 * manipulating the writers wait_q.
		 */
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_thread((struct k_thread *) &async_desc->thread,
			     &pipe->wait_q.writers, K_FOREVER);
		_reschedule_threads(key);
		return 0;
	}
#endif

	struct k_pipe_desc  pipe_desc;

	pipe_desc.buffer         = data + num_bytes_written;
	pipe_desc.bytes_to_xfer  = bytes_to_write - num_bytes_written;

	if (timeout != K_NO_WAIT) {
		_current->base.swap_data = &pipe_desc;
		/*
		 * Lock interrupts and unlock the scheduler before
		 * manipulating the writers wait_q.
		 */
		key = irq_lock();
		_sched_unlock_no_reschedule();
		_pend_current_thread(&pipe->wait_q.writers, timeout);
		_Swap(key);
	} else {
		k_sched_unlock();
	}

	*bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer;

	return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
				 bytes_to_write);
}