struct block *blist_clone(struct block *blist, int header_len, int len, uint32_t offset) { int ret; struct block *newb = block_alloc(header_len, MEM_WAIT); do { ret = __blist_clone_to(blist, newb, len, offset); if (ret) block_add_extd(newb, ret, MEM_WAIT); } while (ret); return newb; }
/* given a queue, makes a single block with header_len reserved space in the * block main body, and the contents of [offset, len + offset) pointed to in the * new blocks ext_data. */ struct block *qclone(struct queue *q, int header_len, int len, uint32_t offset) { int ret; struct block *newb = block_alloc(header_len, MEM_WAIT); /* the while loop should rarely be used: it would require someone * concurrently adding to the queue. */ do { /* TODO: RCU: protecting the q list (b->next) (need read lock) */ spin_lock_irqsave(&q->lock); ret = __blist_clone_to(q->bfirst, newb, len, offset); spin_unlock_irqsave(&q->lock); if (ret) block_add_extd(newb, ret, MEM_WAIT); } while (ret); return newb; }
/* * write to a queue. only Maxatomic bytes at a time is atomic. */ int qwrite(struct queue *q, void *vp, int len) { int n, sofar; struct block *b; uint8_t *p = vp; void *ext_buf; QDEBUG if (!islo()) printd("qwrite hi %p\n", getcallerpc(&q)); sofar = 0; do { n = len - sofar; /* This is 64K, the max amount per single block. Still a good value? */ if (n > Maxatomic) n = Maxatomic; /* If n is small, we don't need to bother with the extra_data. But * until the whole stack can handle extd blocks, we'll use them * unconditionally. */ #ifdef CONFIG_BLOCK_EXTRAS /* allocb builds in 128 bytes of header space to all blocks, but this is * only available via padblock (to the left). we also need some space * for pullupblock for some basic headers (like icmp) that get written * in directly */ b = allocb(64); ext_buf = kmalloc(n, 0); memcpy(ext_buf, p + sofar, n); block_add_extd(b, 1, KMALLOC_WAIT); /* returns 0 on success */ b->extra_data[0].base = (uintptr_t)ext_buf; b->extra_data[0].off = 0; b->extra_data[0].len = n; b->extra_len += n; #else b = allocb(n); memmove(b->wp, p + sofar, n); b->wp += n; #endif qbwrite(q, b); sofar += n; } while (sofar < len && (q->state & Qmsg) == 0); return len; }
/* Append an extra data buffer @base with offset @off of length @len to block * @b. Reuse an unused extra data slot if there's any. * Return 0 on success or -1 on error. */ int block_append_extra(struct block *b, uintptr_t base, uint32_t off, uint32_t len, int mem_flags) { unsigned int nr_bufs = b->nr_extra_bufs + 1; struct extra_bdata *ebd; ebd = next_unused_slot(b); if (!ebd) { if (block_add_extd(b, nr_bufs, mem_flags) != 0) return -1; ebd = next_unused_slot(b); assert(ebd); } ebd->base = base; ebd->off = off; ebd->len = len; b->extra_len += ebd->len; return 0; }
/* Helper, allocs a block and copies [from, from + len) into it. Returns the * block on success, 0 on failure. */ static struct block *build_block(void *from, size_t len, int mem_flags) { struct block *b; void *ext_buf; /* If len is small, we don't need to bother with the extra_data. But until * the whole stack can handle extd blocks, we'll use them unconditionally. * */ #ifdef CONFIG_BLOCK_EXTRAS /* allocb builds in 128 bytes of header space to all blocks, but this is * only available via padblock (to the left). we also need some space * for pullupblock for some basic headers (like icmp) that get written * in directly */ b = block_alloc(64, mem_flags); if (!b) return 0; ext_buf = kmalloc(len, mem_flags); if (!ext_buf) { kfree(b); return 0; } memcpy(ext_buf, from, len); if (block_add_extd(b, 1, mem_flags)) { kfree(ext_buf); kfree(b); return 0; } b->extra_data[0].base = (uintptr_t)ext_buf; b->extra_data[0].off = 0; b->extra_data[0].len = len; b->extra_len += len; #else b = block_alloc(n, mem_flags); if (!b) return 0; memmove(b->wp, from, len); b->wp += len; #endif return b; }
/* given a string of blocks, fills the new block's extra_data with the contents * of the blist [offset, len + offset) * * returns 0 on success. the only failure is if the extra_data array was too * small, so this returns a positive integer saying how big the extra_data needs * to be. * * callers are responsible for protecting the list structure. */ static int __blist_clone_to(struct block *blist, struct block *newb, int len, uint32_t offset) { struct block *b, *first; unsigned int nr_bufs = 0; unsigned int b_idx, newb_idx = 0; uint8_t *first_main_body = 0; /* find the first block; keep offset relative to the latest b in the list */ for (b = blist; b; b = b->next) { if (BLEN(b) > offset) break; offset -= BLEN(b); } /* qcopy semantics: if you asked for an offset outside the block list, you * get an empty block back */ if (!b) return 0; first = b; /* upper bound for how many buffers we'll need in newb */ for (/* b is set*/; b; b = b->next) { nr_bufs += 1 + b->nr_extra_bufs; /* 1 for the main body */ } /* we might be holding a spinlock here, so we won't wait for kmalloc */ if (block_add_extd(newb, nr_bufs, 0) != 0) { /* caller will need to alloc these, then re-call us */ return nr_bufs; } for (b = first; b && len; b = b->next) { b_idx = 0; if (offset) { if (offset < BHLEN(b)) { /* off is in the main body */ len -= point_to_body(b, b->rp + offset, newb, newb_idx, len); newb_idx++; } else { /* off is in one of the buffers (or just past the last one). * we're not going to point to b's main body at all. */ offset -= BHLEN(b); assert(b->extra_data); /* assuming these extrabufs are packed, or at least that len * isn't gibberish */ while (b->extra_data[b_idx].len <= offset) { offset -= b->extra_data[b_idx].len; b_idx++; } /* now offset is set to our offset in the b_idx'th buf */ len -= point_to_buf(b, b_idx, offset, newb, newb_idx, len); newb_idx++; b_idx++; } offset = 0; } else { len -= point_to_body(b, b->rp, newb, newb_idx, len); newb_idx++; } /* knock out all remaining bufs. we only did one point_to_ op by now, * and any point_to_ could be our last if it consumed all of len. */ for (int i = b_idx; (i < b->nr_extra_bufs) && len; i++) { len -= point_to_buf(b, i, 0, newb, newb_idx, len); newb_idx++; } } return 0; }