/* Returns a block with the remaining contents of b all in the main body of the * returned block. Replace old references to b with the returned value (which * may still be 'b', if no change was needed. */ struct block *linearizeblock(struct block *b) { struct block *newb; size_t len; struct extra_bdata *ebd; if (!b->extra_len) return b; newb = block_alloc(BLEN(b), MEM_WAIT); len = BHLEN(b); memcpy(newb->wp, b->rp, len); newb->wp += len; len = b->extra_len; for (int i = 0; (i < b->nr_extra_bufs) && len; i++) { ebd = &b->extra_data[i]; if (!ebd->base || !ebd->len) continue; memcpy(newb->wp, (void*)(ebd->base + ebd->off), ebd->len); newb->wp += ebd->len; len -= ebd->len; } /* TODO: any other flags that need copied over? */ if (b->flag & BCKSUM_FLAGS) { newb->flag |= (b->flag & BCKSUM_FLAGS); newb->checksum_start = b->checksum_start; newb->checksum_offset = b->checksum_offset; newb->mss = b->mss; } freeb(b); return newb; }
/* * throw away up to count bytes from a * list of blocks. Return count of bytes * thrown away. */ static int _pullblock(struct block **bph, int count, int free) { struct block *bp; int n, bytes; bytes = 0; if (bph == NULL) return 0; while (*bph != NULL && count != 0) { bp = *bph; n = MIN(BHLEN(bp), count); bytes += n; count -= n; bp->rp += n; n = pullext(bp, count); bytes += n; count -= n; QDEBUG checkb(bp, "pullblock "); if (BLEN(bp) == 0 && (free || count)) { *bph = bp->next; bp->next = NULL; freeb(bp); } } return bytes; }
uint16_t ptclcsum_one(struct block *bp, int offset, int len) { uint8_t *addr; uint32_t losum, hisum; uint16_t csum; int odd, blocklen, x, i, boff; struct extra_bdata *ebd; hisum = 0; losum = 0; odd = 0; if (offset < BHLEN(bp)) { x = MIN(len, BHLEN(bp) - offset); odd = (odd + x) & 1; addr = bp->rp + offset; losum = ptclbsum(addr, x); len -= x; offset = 0; } else { offset -= BHLEN(bp); } for (int i = 0; (i < bp->nr_extra_bufs) && len; i++) { ebd = &bp->extra_data[i]; boff = MIN(offset, ebd->len); if (offset) { offset -= boff; if (offset) continue; } x = MIN(len, ebd->len - boff); addr = (void *)(ebd->base + ebd->off); if (odd) hisum += ptclbsum(addr, x); else losum += ptclbsum(addr, x); odd = (odd + x) & 1; len -= x; } losum += hisum >> 8; losum += (hisum & 0xff) << 8; while ((csum = losum >> 16) != 0) losum = csum + (losum & 0xffff); return losum & 0xffff; }
/* Copy up to len bytes from q->bfirst to @to, leaving the block in place. May * return with less than len, but greater than 0, even if there is more * available in q. * * At any moment that we have copied anything and things are tricky, we can just * return. The trickiness comes from a bunch of variables: is the main body * empty? How do we split the ebd? If our alloc fails, then we can fall back * to @to's main body, but only if we haven't used it yet. */ static size_t copy_from_first_block(struct queue *q, struct block *to, size_t len) { struct block *from = q->bfirst; size_t copy_amt, amt; struct extra_bdata *ebd; assert(len < BLEN(from)); /* sanity */ /* Try to extract from the main body */ copy_amt = MIN(BHLEN(from), len); if (copy_amt) { copy_amt = copy_to_block_body(to, from->rp, copy_amt); from->rp += copy_amt; /* We only change dlen, (data len), not q->len, since the q still has * the same block memory allocation (no kfrees happened) */ q->dlen -= copy_amt; } /* Try to extract the remainder from the extra data */ len -= copy_amt; for (int i = 0; (i < from->nr_extra_bufs) && len; i++) { ebd = &from->extra_data[i]; if (!ebd->base || !ebd->len) continue; if (len >= ebd->len) { amt = move_ebd(ebd, to, from, q); if (!amt) { /* our internal alloc could have failed. this ebd is now the * last one we'll consider. let's handle it separately and put * it in the main body. */ if (copy_amt) return copy_amt; copy_amt = copy_to_block_body(to, (void*)ebd->base + ebd->off, ebd->len); block_and_q_lost_extra(from, q, copy_amt); break; } len -= amt; copy_amt += amt; continue; } else { /* If we're here, we reached our final ebd, which we'll need to * split to get anything from it. */ if (copy_amt) return copy_amt; copy_amt = copy_to_block_body(to, (void*)ebd->base + ebd->off, len); ebd->off += copy_amt; ebd->len -= copy_amt; block_and_q_lost_extra(from, q, copy_amt); break; } } if (len) assert(copy_amt); /* sanity */ return copy_amt; }
/* * make sure the first block has at least n bytes in its main body */ struct block *pullupqueue(struct queue *q, int n) { struct block *b; /* TODO: lock to protect the queue links? */ if ((BHLEN(q->bfirst) >= n)) return q->bfirst; q->bfirst = pullupblock(q->bfirst, n); for (b = q->bfirst; b != NULL && b->next != NULL; b = b->next) ; q->blast = b; return q->bfirst; }
void printblock(struct block *b) { unsigned char *c; unsigned int off, elen; struct extra_bdata *e; if (b == NULL) { printk("block is null\n"); return; } printk("block of BLEN = %d, with %d header and %d data in %d extras\n", BLEN(b), BHLEN(b), b->extra_len, b->nr_extra_bufs); printk("header:\n"); printk("%2x:\t", 0); off = 0; for (c = b->rp; c < b->wp; c++) { printk(" %02x", *c & 0xff); off++; if (off % 8 == 0) { printk("\n"); printk("%2x:\t", off); } } printk("\n"); elen = b->extra_len; for (int i = 0; (i < b->nr_extra_bufs) && elen; i++) { e = &b->extra_data[i]; if (e->len == 0) continue; elen -= e->len; printk("data %d:\n", i); printk("%2x:\t", 0); for (off = 0; off < e->len; off++) { c = (unsigned char *)e->base + e->off + off; printk(" %02x", *c & 0xff); if ((off + 1) % 8 == 0 && off +1 < e->len) { printk("\n"); printk("%2x:\t", off + 1); } } } printk("\n"); }
static size_t read_from_block(struct block *b, uint8_t *to, size_t amt) { size_t copy_amt, retval = 0; struct extra_bdata *ebd; copy_amt = MIN(BHLEN(b), amt); memcpy(to, b->rp, copy_amt); /* advance the rp, since this block not be completely consumed and future * reads need to know where to pick up from */ b->rp += copy_amt; to += copy_amt; amt -= copy_amt; retval += copy_amt; for (int i = 0; (i < b->nr_extra_bufs) && amt; i++) { ebd = &b->extra_data[i]; /* skip empty entires. if we track this in the struct block, we can * just start the for loop early */ if (!ebd->base || !ebd->len) continue; copy_amt = MIN(ebd->len, amt); memcpy(to, (void*)(ebd->base + ebd->off), copy_amt); /* we're actually consuming the entries, just like how we advance rp up * above, and might only consume part of one. */ ebd->len -= copy_amt; ebd->off += copy_amt; b->extra_len -= copy_amt; if (!ebd->len) { /* we don't actually have to decref here. it's also done in * freeb(). this is the earliest we can free. */ kfree((void*)ebd->base); ebd->base = ebd->off = 0; } to += copy_amt; amt -= copy_amt; retval += copy_amt; } return retval; }
/* given a string of blocks, fills the new block's extra_data with the contents * of the blist [offset, len + offset) * * returns 0 on success. the only failure is if the extra_data array was too * small, so this returns a positive integer saying how big the extra_data needs * to be. * * callers are responsible for protecting the list structure. */ static int __blist_clone_to(struct block *blist, struct block *newb, int len, uint32_t offset) { struct block *b, *first; unsigned int nr_bufs = 0; unsigned int b_idx, newb_idx = 0; uint8_t *first_main_body = 0; /* find the first block; keep offset relative to the latest b in the list */ for (b = blist; b; b = b->next) { if (BLEN(b) > offset) break; offset -= BLEN(b); } /* qcopy semantics: if you asked for an offset outside the block list, you * get an empty block back */ if (!b) return 0; first = b; /* upper bound for how many buffers we'll need in newb */ for (/* b is set*/; b; b = b->next) { nr_bufs += 1 + b->nr_extra_bufs; /* 1 for the main body */ } /* we might be holding a spinlock here, so we won't wait for kmalloc */ if (block_add_extd(newb, nr_bufs, 0) != 0) { /* caller will need to alloc these, then re-call us */ return nr_bufs; } for (b = first; b && len; b = b->next) { b_idx = 0; if (offset) { if (offset < BHLEN(b)) { /* off is in the main body */ len -= point_to_body(b, b->rp + offset, newb, newb_idx, len); newb_idx++; } else { /* off is in one of the buffers (or just past the last one). * we're not going to point to b's main body at all. */ offset -= BHLEN(b); assert(b->extra_data); /* assuming these extrabufs are packed, or at least that len * isn't gibberish */ while (b->extra_data[b_idx].len <= offset) { offset -= b->extra_data[b_idx].len; b_idx++; } /* now offset is set to our offset in the b_idx'th buf */ len -= point_to_buf(b, b_idx, offset, newb, newb_idx, len); newb_idx++; b_idx++; } offset = 0; } else { len -= point_to_body(b, b->rp, newb, newb_idx, len); newb_idx++; } /* knock out all remaining bufs. we only did one point_to_ op by now, * and any point_to_ could be our last if it consumed all of len. */ for (int i = b_idx; (i < b->nr_extra_bufs) && len; i++) { len -= point_to_buf(b, i, 0, newb, newb_idx, len); newb_idx++; } } return 0; }
/* Adjust block @bp so that its size is exactly @len. * If the size is increased, fill in the new contents with zeros. * If the size is decreased, discard some of the old contents at the tail. */ struct block *adjustblock(struct block *bp, int len) { struct extra_bdata *ebd; void *buf; int i; if (len < 0) { freeb(bp); return NULL; } if (len == BLEN(bp)) return bp; /* Shrink within block main body. */ if (len <= BHLEN(bp)) { free_block_extra(bp); bp->wp = bp->rp + len; QDEBUG checkb(bp, "adjustblock 1"); return bp; } /* Need to grow. */ if (len > BLEN(bp)) { /* Grow within block main body. */ if (bp->extra_len == 0 && bp->rp + len <= bp->lim) { memset(bp->wp, 0, len - BLEN(bp)); bp->wp = bp->rp + len; QDEBUG checkb(bp, "adjustblock 2"); return bp; } /* Grow with extra data buffers. */ buf = kzmalloc(len - BLEN(bp), MEM_WAIT); block_append_extra(bp, (uintptr_t)buf, 0, len - BLEN(bp), MEM_WAIT); QDEBUG checkb(bp, "adjustblock 3"); return bp; } /* Shrink extra data buffers. * len is how much of ebd we need to keep. * extra_len is re-accumulated. */ assert(bp->extra_len > 0); len -= BHLEN(bp); bp->extra_len = 0; for (i = 0; i < bp->nr_extra_bufs; i++) { ebd = &bp->extra_data[i]; if (len <= ebd->len) break; len -= ebd->len; bp->extra_len += ebd->len; } /* If len becomes zero, extra_data[i] should be freed. */ if (len > 0) { ebd = &bp->extra_data[i]; ebd->len = len; bp->extra_len += ebd->len; i++; } for (; i < bp->nr_extra_bufs; i++) { ebd = &bp->extra_data[i]; if (ebd->base) kfree((void*)ebd->base); ebd->base = ebd->off = ebd->len = 0; } QDEBUG checkb(bp, "adjustblock 4"); return bp; }
/* * make sure the first block has at least n bytes in its main body */ struct block *pullupblock(struct block *bp, int n) { int i, len, seglen; struct block *nbp; struct extra_bdata *ebd; /* * this should almost always be true, it's * just to avoid every caller checking. */ if (BHLEN(bp) >= n) return bp; /* a start at explicit main-body / header management */ if (bp->extra_len) { if (n > bp->lim - bp->rp) { /* would need to realloc a new block and copy everything over. */ panic("can't pullup %d bytes, no place to put it: bp->lim %p, bp->rp %p, bp->lim-bp->rp %d\n", n, bp->lim, bp->rp, bp->lim-bp->rp); } len = n - BHLEN(bp); if (len > bp->extra_len) panic("pullup more than extra (%d, %d, %d)\n", n, BHLEN(bp), bp->extra_len); QDEBUG checkb(bp, "before pullup"); for (int i = 0; (i < bp->nr_extra_bufs) && len; i++) { ebd = &bp->extra_data[i]; if (!ebd->base || !ebd->len) continue; seglen = MIN(ebd->len, len); memcpy(bp->wp, (void*)(ebd->base + ebd->off), seglen); bp->wp += seglen; len -= seglen; ebd->len -= seglen; ebd->off += seglen; bp->extra_len -= seglen; if (ebd->len == 0) { kfree((void *)ebd->base); ebd->off = 0; ebd->base = 0; } } /* maybe just call pullupblock recursively here */ if (len) panic("pullup %d bytes overdrawn\n", len); QDEBUG checkb(bp, "after pullup"); return bp; } /* * if not enough room in the first block, * add another to the front of the list. */ if (bp->lim - bp->rp < n) { nbp = block_alloc(n, MEM_WAIT); nbp->next = bp; bp = nbp; } /* * copy bytes from the trailing blocks into the first */ n -= BLEN(bp); while ((nbp = bp->next)) { i = BLEN(nbp); if (i > n) { memmove(bp->wp, nbp->rp, n); pullupblockcnt++; bp->wp += n; nbp->rp += n; QDEBUG checkb(bp, "pullupblock 1"); return bp; } else { memmove(bp->wp, nbp->rp, i); pullupblockcnt++; bp->wp += i; bp->next = nbp->next; nbp->next = 0; freeb(nbp); n -= i; if (n == 0) { QDEBUG checkb(bp, "pullupblock 2"); return bp; } } } freeb(bp); return 0; }
/* * throw away the next 'len' bytes in the queue * returning the number actually discarded */ int qdiscard(struct queue *q, int len) { struct block *b; int dowakeup, n, sofar, body_amt, extra_amt; struct extra_bdata *ebd; spin_lock_irqsave(&q->lock); for (sofar = 0; sofar < len; sofar += n) { b = q->bfirst; if (b == NULL) break; QDEBUG checkb(b, "qdiscard"); n = BLEN(b); if (n <= len - sofar) { q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); freeb(b); } else { n = len - sofar; q->dlen -= n; /* partial block removal */ body_amt = MIN(BHLEN(b), n); b->rp += body_amt; extra_amt = n - body_amt; /* reduce q->len by the amount we remove from the extras. The * header will always be accounted for above, during block removal. * */ q->len -= extra_amt; for (int i = 0; (i < b->nr_extra_bufs) && extra_amt; i++) { ebd = &b->extra_data[i]; if (!ebd->base || !ebd->len) continue; if (extra_amt >= ebd->len) { /* remove the entire entry, note the kfree release */ b->extra_len -= ebd->len; extra_amt -= ebd->len; kfree((void*)ebd->base); ebd->base = ebd->off = ebd->len = 0; continue; } ebd->off += extra_amt; ebd->len -= extra_amt; b->extra_len -= extra_amt; extra_amt = 0; } } } /* * if writer flow controlled, restart * * This used to be * q->len < q->limit/2 * but it slows down tcp too much for certain write sizes. * I really don't understand it completely. It may be * due to the queue draining so fast that the transmission * stalls waiting for the app to produce more data. - presotto */ if ((q->state & Qflow) && q->len < q->limit) { q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->wr); qwake_cb(q, FDTAP_FILT_WRITABLE); } return sofar; }