Пример #1
0
static void init_mem_pool(struct k_mem_pool *p)
{
	int i;
	size_t buflen = p->n_max * p->max_sz, sz = p->max_sz;
	u32_t *bits = p->buf + buflen;

	sys_dlist_init(&p->wait_q);

	for (i = 0; i < p->n_levels; i++) {
		int nblocks = buflen / sz;

		sys_dlist_init(&p->levels[i].free_list);

		if (nblocks < 32) {
			p->max_inline_level = i;
		} else {
			p->levels[i].bits_p = bits;
			bits += (nblocks + 31)/32;
		}

		sz = _ALIGN4(sz / 4);
	}

	for (i = 0; i < p->n_max; i++) {
		void *block = block_ptr(p, p->max_sz, i);

		sys_dlist_append(&p->levels[0].free_list, block);
		set_free_bit(p, 0, i);
	}
}
Пример #2
0
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
{
	pipe->buffer = buffer;
	pipe->size = size;
	pipe->bytes_used = 0;
	pipe->read_index = 0;
	pipe->write_index = 0;
	sys_dlist_init(&pipe->wait_q.writers);
	sys_dlist_init(&pipe->wait_q.readers);
	SYS_TRACING_OBJ_INIT(k_pipe, pipe);
}
Пример #3
0
void k_queue_init(struct k_queue *queue)
{
	sys_slist_init(&queue->data_q);
	sys_dlist_init(&queue->wait_q);

	_INIT_OBJ_POLL_EVENT(queue);

	SYS_TRACING_OBJ_INIT(k_queue, queue);
}
Пример #4
0
void k_mutex_init(struct k_mutex *mutex)
{

    if (NULL == mutex) {
        BT_ERR("mutex is NULL\n");
        return ;
    }

    aos_mutex_new(&mutex->mutex);
    sys_dlist_init(&mutex->poll_events);
    return ;
}
Пример #5
0
int k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit)
{
    int ret;

    if (NULL == sem) {
        BT_ERR("sem is NULL\n");
        return -EINVAL;
    }

    ret = aos_sem_new(&sem->sem, initial_count);
    sys_dlist_init(&sem->poll_events);
    return ret;
}
Пример #6
0
void k_msgq_init(struct k_msgq *q, char *buffer,
		 size_t msg_size, u32_t max_msgs)
{
	q->msg_size = msg_size;
	q->max_msgs = max_msgs;
	q->buffer_start = buffer;
	q->buffer_end = buffer + (max_msgs * msg_size);
	q->read_ptr = buffer;
	q->write_ptr = buffer;
	q->used_msgs = 0;
	sys_dlist_init(&q->wait_q);
	SYS_TRACING_OBJ_INIT(k_msgq, q);
}
Пример #7
0
void k_timer_init(struct k_timer *timer,
		  void (*expiry_fn)(struct k_timer *),
		  void (*stop_fn)(struct k_timer *))
{
	timer->expiry_fn = expiry_fn;
	timer->stop_fn = stop_fn;
	timer->status = 0;

	sys_dlist_init(&timer->wait_q);
	_init_timeout(&timer->timeout, _timer_expiration_handler);
	SYS_TRACING_OBJ_INIT(k_timer, timer);

	timer->_legacy_data = NULL;
}
Пример #8
0
void k_queue_init(struct k_queue *queue)
{
    void *msg_start;
    int size = 20;
    int stat;
    uint8_t blk_size = sizeof(void *) + 1;

    msg_start = (void *)aos_malloc(size * blk_size);
    assert(msg_start);

    queue->_queue = (aos_queue_t *)aos_malloc(sizeof(aos_queue_t));
    assert(queue->_queue);

    stat = aos_queue_new(queue->_queue, msg_start, size * blk_size, blk_size);
    assert(stat == 0);

    sys_dlist_init(&queue->poll_events);
}
Пример #9
0
/**
 *
 * @brief Initializes kernel data structures
 *
 * This routine initializes various kernel data structures, including
 * the init and idle threads and any architecture-specific initialization.
 *
 * Note that all fields of "_kernel" are set to zero on entry, which may
 * be all the initialization many of them require.
 *
 * @return N/A
 */
static void prepare_multithreading(struct k_thread *dummy_thread)
{
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
	ARG_UNUSED(dummy_thread);
#else
	/*
	 * Initialize the current execution thread to permit a level of
	 * debugging output if an exception should happen during kernel
	 * initialization.  However, don't waste effort initializing the
	 * fields of the dummy thread beyond those needed to identify it as a
	 * dummy thread.
	 */

	_current = dummy_thread;

	dummy_thread->base.user_options = K_ESSENTIAL;
	dummy_thread->base.thread_state = _THREAD_DUMMY;
#endif

	/* _kernel.ready_q is all zeroes */


	/*
	 * The interrupt library needs to be initialized early since a series
	 * of handlers are installed into the interrupt table to catch
	 * spurious interrupts. This must be performed before other kernel
	 * subsystems install bonafide handlers, or before hardware device
	 * drivers are initialized.
	 */

	_IntLibInit();

	/* ready the init/main and idle threads */

	for (int ii = 0; ii < K_NUM_PRIORITIES; ii++) {
		sys_dlist_init(&_ready_q.q[ii]);
	}

	/*
	 * prime the cache with the main thread since:
	 *
	 * - the cache can never be NULL
	 * - the main thread will be the one to run first
	 * - no other thread is initialized yet and thus their priority fields
	 *   contain garbage, which would prevent the cache loading algorithm
	 *   to work as intended
	 */
	_ready_q.cache = _main_thread;

	_new_thread(_main_thread, _main_stack,
		    MAIN_STACK_SIZE, _main, NULL, NULL, NULL,
		    CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
	_mark_thread_as_started(_main_thread);
	_add_thread_to_ready_q(_main_thread);

#ifdef CONFIG_MULTITHREADING
	_new_thread(_idle_thread, _idle_stack,
		    IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
		    K_LOWEST_THREAD_PRIO, K_ESSENTIAL);
	_mark_thread_as_started(_idle_thread);
	_add_thread_to_ready_q(_idle_thread);
#endif

	initialize_timeouts();

	/* perform any architecture-specific initialization */

	kernel_arch_init();
}
Пример #10
0
/**
 * @brief Prepare a working set of readers/writers
 *
 * Prepare a list of "working threads" into/from which the data
 * will be directly copied. This list is useful as it is used to ...
 *
 *  1. avoid double copying
 *  2. minimize interrupt latency as interrupts are unlocked
 *     while copying data
 *  3. ensure a timeout can not make the request impossible to satisfy
 *
 * The list is populated with previously pended threads that will be ready to
 * run after the pipe call is complete.
 *
 * Important things to remember when reading from the pipe ...
 * 1. If there are writers int @a wait_q, then the pipe's buffer is full.
 * 2. Conversely if the pipe's buffer is not full, there are no writers.
 * 3. The amount of available data in the pipe is the sum the bytes used in
 *    the pipe (@a pipe_space) and all the requests from the waiting writers.
 * 4. Since data is read from the pipe's buffer first, the working set must
 *    include writers that will (try to) re-fill the pipe's buffer afterwards.
 *
 * Important things to remember when writing to the pipe ...
 * 1. If there are readers in @a wait_q, then the pipe's buffer is empty.
 * 2. Conversely if the pipe's buffer is not empty, then there are no readers.
 * 3. The amount of space available in the pipe is the sum of the bytes unused
 *    in the pipe (@a pipe_space) and all the requests from the waiting readers.
 *
 * @return false if request is unsatisfiable, otherwise true
 */
static bool _pipe_xfer_prepare(sys_dlist_t      *xfer_list,
			       struct k_thread **waiter,
			       _wait_q_t        *wait_q,
			       size_t            pipe_space,
			       size_t            bytes_to_xfer,
			       size_t            min_xfer,
			       s32_t           timeout)
{
	sys_dnode_t      *node;
	struct k_thread  *thread;
	struct k_pipe_desc *desc;
	size_t num_bytes = 0;

	if (timeout == K_NO_WAIT) {
		for (node = sys_dlist_peek_head(wait_q); node != NULL;
		     node = sys_dlist_peek_next(wait_q, node)) {
			thread = (struct k_thread *)node;
			desc = (struct k_pipe_desc *)thread->base.swap_data;

			num_bytes += desc->bytes_to_xfer;

			if (num_bytes >= bytes_to_xfer) {
				break;
			}
		}

		if (num_bytes + pipe_space < min_xfer) {
			return false;
		}
	}

	/*
	 * Either @a timeout is not K_NO_WAIT (so the thread may pend) or
	 * the entire request can be satisfied. Generate the working list.
	 */

	sys_dlist_init(xfer_list);
	num_bytes = 0;

	while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) {
		desc = (struct k_pipe_desc *)thread->base.swap_data;
		num_bytes += desc->bytes_to_xfer;

		if (num_bytes > bytes_to_xfer) {
			/*
			 * This request can not be fully satisfied.
			 * Do not remove it from the wait_q.
			 * Do not abort its timeout (if applicable).
			 * Do not add it to the transfer list
			 */
			break;
		}

		/*
		 * This request can be fully satisfied.
		 * Remove it from the wait_q.
		 * Abort its timeout.
		 * Add it to the transfer list.
		 */
		_unpend_thread(thread);
		_abort_thread_timeout(thread);
		sys_dlist_append(xfer_list, &thread->base.k_q_node);
	}

	*waiter = (num_bytes > bytes_to_xfer) ? thread : NULL;

	return true;
}