static void init_mem_pool(struct k_mem_pool *p) { int i; size_t buflen = p->n_max * p->max_sz, sz = p->max_sz; u32_t *bits = p->buf + buflen; sys_dlist_init(&p->wait_q); for (i = 0; i < p->n_levels; i++) { int nblocks = buflen / sz; sys_dlist_init(&p->levels[i].free_list); if (nblocks < 32) { p->max_inline_level = i; } else { p->levels[i].bits_p = bits; bits += (nblocks + 31)/32; } sz = _ALIGN4(sz / 4); } for (i = 0; i < p->n_max; i++) { void *block = block_ptr(p, p->max_sz, i); sys_dlist_append(&p->levels[0].free_list, block); set_free_bit(p, 0, i); } }
static void free_block(struct k_mem_pool *p, int level, size_t *lsizes, int bn) { int i, key, lsz = lsizes[level]; void *block = block_ptr(p, lsz, bn); key = irq_lock(); set_free_bit(p, level, bn); if (level && partner_bits(p, level, bn) == 0xf) { for (i = 0; i < 4; i++) { int b = (bn & ~3) + i; clear_free_bit(p, level, b); if (b != bn && block_fits(p, block_ptr(p, lsz, b), lsz)) { sys_dlist_remove(block_ptr(p, lsz, b)); } } irq_unlock(key); free_block(p, level-1, lsizes, bn / 4); /* tail recursion! */ return; } if (block_fits(p, block, lsz)) { sys_dlist_append(&p->levels[level].free_list, block); } irq_unlock(key); }
/* Takes a block of a given level, splits it into four blocks of the * next smaller level, puts three into the free list as in * free_block() but without the need to check adjacent bits or * recombine, and returns the remaining smaller block. */ static void *break_block(struct k_mem_pool *p, void *block, int l, size_t *lsizes) { int i, bn, key; key = irq_lock(); bn = block_num(p, block, lsizes[l]); for (i = 1; i < 4; i++) { int lbn = 4*bn + i; int lsz = lsizes[l + 1]; void *block2 = (lsz * i) + (char *)block; set_free_bit(p, l + 1, lbn); if (block_fits(p, block2, lsz)) { sys_dlist_append(&p->levels[l + 1].free_list, block2); } } irq_unlock(key); return block; }
/** * @brief Prepare a working set of readers/writers * * Prepare a list of "working threads" into/from which the data * will be directly copied. This list is useful as it is used to ... * * 1. avoid double copying * 2. minimize interrupt latency as interrupts are unlocked * while copying data * 3. ensure a timeout can not make the request impossible to satisfy * * The list is populated with previously pended threads that will be ready to * run after the pipe call is complete. * * Important things to remember when reading from the pipe ... * 1. If there are writers int @a wait_q, then the pipe's buffer is full. * 2. Conversely if the pipe's buffer is not full, there are no writers. * 3. The amount of available data in the pipe is the sum the bytes used in * the pipe (@a pipe_space) and all the requests from the waiting writers. * 4. Since data is read from the pipe's buffer first, the working set must * include writers that will (try to) re-fill the pipe's buffer afterwards. * * Important things to remember when writing to the pipe ... * 1. If there are readers in @a wait_q, then the pipe's buffer is empty. * 2. Conversely if the pipe's buffer is not empty, then there are no readers. * 3. The amount of space available in the pipe is the sum of the bytes unused * in the pipe (@a pipe_space) and all the requests from the waiting readers. * * @return false if request is unsatisfiable, otherwise true */ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list, struct k_thread **waiter, _wait_q_t *wait_q, size_t pipe_space, size_t bytes_to_xfer, size_t min_xfer, s32_t timeout) { sys_dnode_t *node; struct k_thread *thread; struct k_pipe_desc *desc; size_t num_bytes = 0; if (timeout == K_NO_WAIT) { for (node = sys_dlist_peek_head(wait_q); node != NULL; node = sys_dlist_peek_next(wait_q, node)) { thread = (struct k_thread *)node; desc = (struct k_pipe_desc *)thread->base.swap_data; num_bytes += desc->bytes_to_xfer; if (num_bytes >= bytes_to_xfer) { break; } } if (num_bytes + pipe_space < min_xfer) { return false; } } /* * Either @a timeout is not K_NO_WAIT (so the thread may pend) or * the entire request can be satisfied. Generate the working list. */ sys_dlist_init(xfer_list); num_bytes = 0; while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) { desc = (struct k_pipe_desc *)thread->base.swap_data; num_bytes += desc->bytes_to_xfer; if (num_bytes > bytes_to_xfer) { /* * This request can not be fully satisfied. * Do not remove it from the wait_q. * Do not abort its timeout (if applicable). * Do not add it to the transfer list */ break; } /* * This request can be fully satisfied. * Remove it from the wait_q. * Abort its timeout. * Add it to the transfer list. */ _unpend_thread(thread); _abort_thread_timeout(thread); sys_dlist_append(xfer_list, &thread->base.k_q_node); } *waiter = (num_bytes > bytes_to_xfer) ? thread : NULL; return true; }