int qpass(struct queue *q, struct block *b) { int dlen, len, dowakeup; /* sync with qread */ dowakeup = 0; spin_lock_irqsave(&q->lock); if (q->len >= q->limit) { freeblist(b); spin_unlock_irqsave(&q->lock); return -1; } if (q->state & Qclosed) { len = blocklen(b); freeblist(b); spin_unlock_irqsave(&q->lock); return len; } /* add buffer to queue */ if (q->bfirst) q->blast->next = b; else q->bfirst = b; len = BALLOC(b); dlen = BLEN(b); QDEBUG checkb(b, "qpass"); while (b->next) { b = b->next; QDEBUG checkb(b, "qpass"); len += BALLOC(b); dlen += BLEN(b); } q->blast = b; q->len += len; q->dlen += dlen; if (q->len >= q->limit / 2) q->state |= Qflow; if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return len; }
int qpass(Queue *q, Block *b) { int dlen, len, dowakeup; /* sync with qread */ dowakeup = 0; ilock(q); if(q->len >= q->limit){ freeblist(b); iunlock(q); return -1; } if(q->state & Qclosed){ len = BALLOC(b); freeblist(b); iunlock(q); return len; } /* add buffer to queue */ if(q->bfirst) q->blast->next = b; else q->bfirst = b; len = BALLOC(b); dlen = BLEN(b); QDEBUG checkb(b, "qpass"); while(b->next){ b = b->next; QDEBUG checkb(b, "qpass"); len += BALLOC(b); dlen += BLEN(b); } q->blast = b; q->len += len; q->dlen += dlen; if(q->len >= q->limit/2) q->state |= Qflow; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } iunlock(q); if(dowakeup) wakeup(&q->rr); return len; }
long qibwrite(struct queue *q, struct block *b) { int n, dowakeup; dowakeup = 0; n = BLEN(b); spin_lock_irqsave(&q->lock); QDEBUG checkb(b, "qibwrite"); if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); if (dowakeup) { if (q->kick) q->kick(q->arg); rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return n; }
/* * get next block from a queue, return null if nothing there */ Block* qget(Queue *q) { int dowakeup; Block *b; /* sync with qwrite */ ilock(q); b = q->bfirst; if(b == nil){ q->state |= Qstarve; iunlock(q); return nil; } q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); QDEBUG checkb(b, "qget"); /* if writer flow controlled, restart */ if((q->state & Qflow) && q->len < q->limit/2){ q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; iunlock(q); if(dowakeup) wakeup(&q->wr); return b; }
/* * used by print() to write to a queue. Since we may be splhi or not in * a process, don't qlock. * * this routine merges adjacent blocks if block n+1 will fit into * the free space of block n. */ int qiwrite(Queue *q, void *vp, int len) { int n, sofar, dowakeup; Block *b; uchar *p = vp; dowakeup = 0; sofar = 0; do { n = len-sofar; if(n > Maxatomic) n = Maxatomic; b = iallocb(n); if(b == nil) break; memmove(b->wp, p+sofar, n); b->wp += n; ilock(q); /* we use an artificially high limit for kernel prints since anything * over the limit gets dropped */ if(q->dlen >= 16*1024){ iunlock(q); freeb(b); break; } QDEBUG checkb(b, "qiwrite"); if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } iunlock(q); if(dowakeup){ if(q->kick) q->kick(q->arg); wakeup(&q->rr); } sofar += n; } while(sofar < len && (q->state & Qmsg) == 0); return sofar; }
int qproduce(Queue *q, void *vp, int len) { Block *b; int dowakeup; uchar *p = vp; /* sync with qread */ dowakeup = 0; lock(&q->l); if(q->state & Qclosed){ unlock(&q->l); return -1; } /* no waiting receivers, room in buffer? */ if(q->len >= q->limit){ q->state |= Qflow; unlock(&q->l); return -1; } /* save in buffer */ b = iallocb(len); if(b == 0){ unlock(&q->l); print("qproduce: iallocb failed\n"); return -1; } memmove(b->wp, p, len); b->wp += len; if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; /* b->next = 0; done by allocb() */ q->len += BALLOC(b); q->dlen += BLEN(b); QDEBUG checkb(b, "qproduce"); if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } if(q->len >= q->limit) q->state |= Qflow; unlock(&q->l); if(dowakeup) Wakeup(&q->rr); return len; }
/* * put a block back to the front of the queue * called with q ilocked */ void qputback(struct queue *q, struct block *b) { b->next = q->bfirst; if (q->bfirst == NULL) q->blast = b; q->bfirst = b; q->len += BALLOC(b); q->dlen += BLEN(b); }
/* * put a block back to the front of the queue * called with q ilocked */ void qputback(Queue *q, Block *b) { b->next = q->bfirst; if(q->bfirst == nil) q->blast = b; q->bfirst = b; q->len += BALLOC(b); q->dlen += BLEN(b); }
/* Helper: removes and returns the first block from q */ static struct block *pop_first_block(struct queue *q) { struct block *b = q->bfirst; q->len -= BALLOC(b); q->dlen -= BLEN(b); q->bfirst = b->next; b->next = 0; return b; }
/* * return count of space in blocks */ int blockalloclen(struct block *bp) { int len; len = 0; while (bp) { len += BALLOC(bp); bp = bp->next; } return len; }
/* Helper: enqueues a list of blocks to a queue. Returns the total length. */ static size_t enqueue_blist(struct queue *q, struct block *b) { size_t len, dlen; if (q->bfirst) q->blast->next = b; else q->bfirst = b; len = BALLOC(b); dlen = BLEN(b); while (b->next) { b = b->next; len += BALLOC(b); dlen += BLEN(b); } q->blast = b; q->len += len; q->dlen += dlen; return dlen; }
/* * used by print() to write to a queue. Since we may be splhi or not in * a process, don't qlock. */ int qiwrite(Queue *q, void *vp, int len) { int n, sofar, dowakeup; Block *b; uchar *p = vp; dowakeup = 0; sofar = 0; do { n = len-sofar; if(n > Maxatomic) n = Maxatomic; b = iallocb(n); if (b == 0) { print("qiwrite: iallocb failed\n"); break; } memmove(b->wp, p+sofar, n); b->wp += n; lock(&q->l); QDEBUG checkb(b, "qiwrite"); if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; q->len += BALLOC(b); q->dlen += n; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } unlock(&q->l); if(dowakeup){ if(q->kick) q->kick(q->arg); Wakeup(&q->rr); } sofar += n; } while(sofar < len && (q->state & Qmsg) == 0); return sofar; }
/* * called with q ilocked */ struct block *qremove(struct queue *q) { struct block *b; b = q->bfirst; if (b == NULL) return NULL; q->bfirst = b->next; b->next = NULL; q->dlen -= BLEN(b); q->len -= BALLOC(b); QDEBUG checkb(b, "qremove"); return b; }
/* * throw away the next 'len' bytes in the queue */ int qdiscard(Queue *q, int len) { Block *b; int dowakeup, n, sofar; ilock(q); for(sofar = 0; sofar < len; sofar += n){ b = q->bfirst; if(b == nil) break; QDEBUG checkb(b, "qdiscard"); n = BLEN(b); if(n <= len - sofar){ q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); freeb(b); } else { n = len - sofar; b->rp += n; q->dlen -= n; } } /* * if writer flow controlled, restart * * This used to be * q->len < q->limit/2 * but it slows down tcp too much for certain write sizes. * I really don't understand it completely. It may be * due to the queue draining so fast that the transmission * stalls waiting for the app to produce more data. - presotto */ if((q->state & Qflow) && q->len < q->limit){ q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; iunlock(q); if(dowakeup) wakeup(&q->wr); return sofar; }
/* * called with q ilocked */ Block* qremove(Queue *q) { Block *b; b = q->bfirst; if(b == nil) return nil; q->bfirst = b->next; b->next = nil; q->dlen -= BLEN(b); q->len -= BALLOC(b); QDEBUG checkb(b, "qremove"); return b; }
/* * if the allocated space is way out of line with the used * space, reallocate to a smaller block */ Block* packblock(Block *bp) { Block **l, *nbp; int n; for(l = &bp; (nbp = *l) != nil; l = &(*l)->next){ n = BLEN(nbp); if((n<<2) < BALLOC(nbp)){ *l = allocb(n); memmove((*l)->wp, nbp->rp, n); (*l)->wp += n; (*l)->next = nbp->next; freeb(nbp); } } return bp; }
/* * if the allocated space is way out of line with the used * space, reallocate to a smaller block */ struct block *packblock(struct block *bp) { struct block **l, *nbp; int n; if (bp->extra_len) return bp; for (l = &bp; *l; l = &(*l)->next) { nbp = *l; n = BLEN(nbp); if ((n << 2) < BALLOC(nbp)) { *l = block_alloc(n, MEM_WAIT); memmove((*l)->wp, nbp->rp, n); (*l)->wp += n; (*l)->next = nbp->next; freeb(nbp); } } return bp; }
/* * throw away the next 'len' bytes in the queue */ int qdiscard(Queue *q, int len) { Block *b; int dowakeup, n, sofar; lock(&q->l); for(sofar = 0; sofar < len; sofar += n){ b = q->bfirst; if(b == nil) break; QDEBUG checkb(b, "qdiscard"); n = BLEN(b); if(n <= len - sofar){ q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); freeb(b); } else { n = len - sofar; b->rp += n; q->dlen -= n; } } /* if writer flow controlled, restart */ if((q->state & Qflow) && q->len < q->limit){ q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; unlock(&q->l); if(dowakeup) Wakeup(&q->wr); return sofar; }
/* * get next block from a queue, return null if nothing there */ struct block *qget(struct queue *q) { int dowakeup; struct block *b; /* sync with qwrite */ spin_lock_irqsave(&q->lock); b = q->bfirst; if (b == NULL) { q->state |= Qstarve; spin_unlock_irqsave(&q->lock); return NULL; } q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); QDEBUG checkb(b, "qget"); /* if writer flow controlled, restart */ if ((q->state & Qflow) && q->len < q->limit / 2) { q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->wr); /* We only send the writable event on wakeup, which is edge triggered */ qwake_cb(q, FDTAP_FILT_WRITABLE); } return b; }
/* * add a block to a queue obeying flow control */ long qbwrite(Queue *q, Block *b) { int n, dowakeup; volatile struct {Block *b;} cb; dowakeup = 0; n = BLEN(b); if(q->bypass){ (*q->bypass)(q->arg, b); return n; } cb.b = b; qlock(&q->wlock); if(waserror()){ if(cb.b != nil) freeb(cb.b); qunlock(&q->wlock); nexterror(); } lock(&q->l); /* give up if the queue is closed */ if(q->state & Qclosed){ unlock(&q->l); error(q->err); } /* if nonblocking, don't queue over the limit */ if(q->len >= q->limit){ if(q->noblock){ unlock(&q->l); freeb(b); poperror(); qunlock(&q->wlock); return n; } } /* queue the block */ if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; b->next = 0; q->len += BALLOC(b); q->dlen += n; QDEBUG checkb(b, "qbwrite"); cb.b = nil; if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } unlock(&q->l); /* get output going again */ if(q->kick && (dowakeup || (q->state&Qkick))) q->kick(q->arg); if(dowakeup) Wakeup(&q->rr); /* * flow control, wait for queue to get below the limit * before allowing the process to continue and queue * more. We do this here so that postnote can only * interrupt us after the data has been queued. This * means that things like 9p flushes and ssl messages * will not be disrupted by software interrupts. * * Note - this is moderately dangerous since a process * that keeps getting interrupted and rewriting will * queue infinite crud. */ for(;;){ if(q->noblock || qnotfull(q)) break; lock(&q->l); q->state |= Qflow; unlock(&q->l); Sleep(&q->wr, qnotfull, q); } qunlock(&q->wlock); poperror(); return n; }
/* * Interrupt level copy out of a queue, return # bytes copied. */ int qconsume(Queue *q, void *vp, int len) { Block *b; int n, dowakeup; uchar *p = vp; Block *tofree = nil; /* sync with qwrite */ ilock(q); for(;;) { b = q->bfirst; if(b == 0){ q->state |= Qstarve; iunlock(q); return -1; } QDEBUG checkb(b, "qconsume 1"); n = BLEN(b); if(n > 0) break; q->bfirst = b->next; q->len -= BALLOC(b); /* remember to free this */ b->next = tofree; tofree = b; }; if(n < len) len = n; memmove(p, b->rp, len); consumecnt += n; b->rp += len; q->dlen -= len; /* discard the block if we're done with it */ if((q->state & Qmsg) || len == n){ q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); /* remember to free this */ b->next = tofree; tofree = b; } /* if writer flow controlled, restart */ if((q->state & Qflow) && q->len < q->limit/2){ q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; iunlock(q); if(dowakeup) wakeup(&q->wr); if(tofree != nil) freeblist(tofree); return len; }
/* * add a block to a queue obeying flow control */ long qbwrite(struct queue *q, struct block *b) { ERRSTACK(1); int n, dowakeup; volatile bool should_free_b = TRUE; n = BLEN(b); if (q->bypass) { (*q->bypass) (q->arg, b); return n; } dowakeup = 0; qlock(&q->wlock); if (waserror()) { if (b != NULL && should_free_b) freeb(b); qunlock(&q->wlock); nexterror(); } spin_lock_irqsave(&q->lock); /* give up if the queue is closed */ if (q->state & Qclosed) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } /* if nonblocking, don't queue over the limit */ if (q->len >= q->limit) { /* drop overflow takes priority over regular non-blocking */ if (q->state & Qdropoverflow) { spin_unlock_irqsave(&q->lock); freeb(b); dropcnt += n; qunlock(&q->wlock); poperror(); return n; } if (q->state & Qnonblock) { spin_unlock_irqsave(&q->lock); freeb(b); error(EAGAIN, "queue full"); } } /* queue the block */ should_free_b = FALSE; if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; b->next = 0; q->len += BALLOC(b); q->dlen += n; QDEBUG checkb(b, "qbwrite"); b = NULL; /* make sure other end gets awakened */ if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); /* get output going again */ if (q->kick && (dowakeup || (q->state & Qkick))) q->kick(q->arg); /* wakeup anyone consuming at the other end */ if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } /* * flow control, wait for queue to get below the limit * before allowing the process to continue and queue * more. We do this here so that postnote can only * interrupt us after the data has been queued. This * means that things like 9p flushes and ssl messages * will not be disrupted by software interrupts. * * Note - this is moderately dangerous since a process * that keeps getting interrupted and rewriting will * queue infinite crud. */ for (;;) { if ((q->state & (Qdropoverflow | Qnonblock)) || qnotfull(q)) break; spin_lock_irqsave(&q->lock); q->state |= Qflow; spin_unlock_irqsave(&q->lock); rendez_sleep(&q->wr, qnotfull, q); } qunlock(&q->wlock); poperror(); return n; }
/* * add a block to a queue obeying flow control */ long qbwrite(Queue *q, Block *b) { int n, dowakeup; Proc *p; n = BLEN(b); if(q->bypass){ (*q->bypass)(q->arg, b); return n; } dowakeup = 0; qlock(&q->wlock); if(waserror()){ if(b != nil) freeb(b); qunlock(&q->wlock); nexterror(); } ilock(q); /* give up if the queue is closed */ if(q->state & Qclosed){ iunlock(q); error(q->err); } /* if nonblocking, don't queue over the limit */ if(q->len >= q->limit){ if(q->noblock){ iunlock(q); freeb(b); noblockcnt += n; qunlock(&q->wlock); poperror(); return n; } } /* queue the block */ if(q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; b->next = 0; q->len += BALLOC(b); q->dlen += n; QDEBUG checkb(b, "qbwrite"); b = nil; /* make sure other end gets awakened */ if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } iunlock(q); /* get output going again */ if(q->kick && (dowakeup || (q->state&Qkick))) q->kick(q->arg); /* wakeup anyone consuming at the other end */ if(dowakeup){ p = wakeup(&q->rr); /* if we just wokeup a higher priority process, let it run */ if(p != nil && p->priority > up->priority) sched(); } /* * flow control, wait for queue to get below the limit * before allowing the process to continue and queue * more. We do this here so that postnote can only * interrupt us after the data has been queued. This * means that things like 9p flushes and ssl messages * will not be disrupted by software interrupts. * * Note - this is moderately dangerous since a process * that keeps getting interrupted and rewriting will * queue infinite crud. */ for(;;){ if(q->noblock || qnotfull(q)) break; ilock(q); q->state |= Qflow; iunlock(q); sleep(&q->wr, qnotfull, q); } USED(b); qunlock(&q->wlock); poperror(); return n; }
/* * Interrupt level copy out of a queue, return # bytes copied. */ int qconsume(struct queue *q, void *vp, int len) { struct block *b; int n, dowakeup; uint8_t *p = vp; struct block *tofree = NULL; /* sync with qwrite */ spin_lock_irqsave(&q->lock); for (;;) { b = q->bfirst; if (b == 0) { q->state |= Qstarve; spin_unlock_irqsave(&q->lock); return -1; } QDEBUG checkb(b, "qconsume 1"); n = BLEN(b); if (n > 0) break; q->bfirst = b->next; q->len -= BALLOC(b); /* remember to free this */ b->next = tofree; tofree = b; }; PANIC_EXTRA(b); if (n < len) len = n; memmove(p, b->rp, len); consumecnt += n; b->rp += len; q->dlen -= len; /* discard the block if we're done with it */ if ((q->state & Qmsg) || len == n) { q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); /* remember to free this */ b->next = tofree; tofree = b; } /* if writer flow controlled, restart */ if ((q->state & Qflow) && q->len < q->limit / 2) { q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->wr); qwake_cb(q, FDTAP_FILT_WRITABLE); } if (tofree != NULL) freeblist(tofree); return len; }
/* * throw away the next 'len' bytes in the queue * returning the number actually discarded */ int qdiscard(struct queue *q, int len) { struct block *b; int dowakeup, n, sofar, body_amt, extra_amt; struct extra_bdata *ebd; spin_lock_irqsave(&q->lock); for (sofar = 0; sofar < len; sofar += n) { b = q->bfirst; if (b == NULL) break; QDEBUG checkb(b, "qdiscard"); n = BLEN(b); if (n <= len - sofar) { q->bfirst = b->next; b->next = 0; q->len -= BALLOC(b); q->dlen -= BLEN(b); freeb(b); } else { n = len - sofar; q->dlen -= n; /* partial block removal */ body_amt = MIN(BHLEN(b), n); b->rp += body_amt; extra_amt = n - body_amt; /* reduce q->len by the amount we remove from the extras. The * header will always be accounted for above, during block removal. * */ q->len -= extra_amt; for (int i = 0; (i < b->nr_extra_bufs) && extra_amt; i++) { ebd = &b->extra_data[i]; if (!ebd->base || !ebd->len) continue; if (extra_amt >= ebd->len) { /* remove the entire entry, note the kfree release */ b->extra_len -= ebd->len; extra_amt -= ebd->len; kfree((void*)ebd->base); ebd->base = ebd->off = ebd->len = 0; continue; } ebd->off += extra_amt; ebd->len -= extra_amt; b->extra_len -= extra_amt; extra_amt = 0; } } } /* * if writer flow controlled, restart * * This used to be * q->len < q->limit/2 * but it slows down tcp too much for certain write sizes. * I really don't understand it completely. It may be * due to the queue draining so fast that the transmission * stalls waiting for the app to produce more data. - presotto */ if ((q->state & Qflow) && q->len < q->limit) { q->state &= ~Qflow; dowakeup = 1; } else dowakeup = 0; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->wr); qwake_cb(q, FDTAP_FILT_WRITABLE); } return sofar; }
int qproduce(struct queue *q, void *vp, int len) { struct block *b; int dowakeup; uint8_t *p = vp; /* sync with qread */ dowakeup = 0; spin_lock_irqsave(&q->lock); /* no waiting receivers, room in buffer? */ if (q->len >= q->limit) { q->state |= Qflow; spin_unlock_irqsave(&q->lock); return -1; } /* save in buffer */ /* use Qcoalesce here to save storage */ // TODO: Consider removing the Qcoalesce flag and force a coalescing // strategy by default. b = q->blast; if ((q->state & Qcoalesce) == 0 || q->bfirst == NULL || b->lim - b->wp < len) { /* need a new block */ b = iallocb(len); if (b == 0) { spin_unlock_irqsave(&q->lock); return 0; } if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; /* b->next = 0; done by iallocb() */ q->len += BALLOC(b); } PANIC_EXTRA(b); memmove(b->wp, p, len); producecnt += len; b->wp += len; q->dlen += len; QDEBUG checkb(b, "qproduce"); if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } if (q->len >= q->limit) q->state |= Qflow; spin_unlock_irqsave(&q->lock); if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } return len; }
/* * add a block to a queue obeying flow control */ long qbwrite(Queue *q, Block *b) { int n, dowakeup; Proc *p; n = BLEN(b); if(q->bypass != nil){ (*q->bypass)(q->arg, b); return n; } dowakeup = 0; if(waserror()){ freeb(b); nexterror(); } ilock(q); /* give up if the queue is closed */ if(q->state & Qclosed){ iunlock(q); error(q->err); } /* don't queue over the limit */ if(q->len >= q->limit && q->noblock){ iunlock(q); freeb(b); poperror(); return n; } /* queue the block */ if(q->bfirst != nil) q->blast->next = b; else q->bfirst = b; q->blast = b; b->next = nil; q->len += BALLOC(b); q->dlen += n; QDEBUG checkb(b, "qbwrite"); /* make sure other end gets awakened */ if(q->state & Qstarve){ q->state &= ~Qstarve; dowakeup = 1; } iunlock(q); poperror(); /* get output going again */ if(q->kick != nil && (dowakeup || (q->state&Qkick))) q->kick(q->arg); /* wakeup anyone consuming at the other end */ if(dowakeup){ p = wakeup(&q->rr); /* if we just wokeup a higher priority process, let it run */ if(p != nil && p->priority > up->priority) sched(); } /* * flow control, before allowing the process to continue and * queue more. We do this here so that postnote can only * interrupt us after the data has been queued. This means that * things like 9p flushes and ssl messages will not be disrupted * by software interrupts. */ qflow(q); return n; }