/* Block, waiting for the queue to be non-empty or closed. Returns with * the spinlock held. Returns TRUE when there queue is not empty, FALSE if it * was naturally closed. Throws an error o/w. */ static bool qwait_and_ilock(struct queue *q, int qio_flags) { while (1) { spin_lock_irqsave(&q->lock); if (q->bfirst != NULL) return TRUE; if (q->state & Qclosed) { if (++q->eof > 3) { spin_unlock_irqsave(&q->lock); error(EFAIL, "multiple reads on a closed queue"); } if (q->err[0]) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } return FALSE; } /* We set Qstarve regardless of whether we are non-blocking or not. * Qstarve tracks the edge detection of the queue being empty. */ q->state |= Qstarve; if (qio_flags & QIO_NON_BLOCK) { spin_unlock_irqsave(&q->lock); error(EAGAIN, "queue empty"); } spin_unlock_irqsave(&q->lock); /* may throw an error() */ rendez_sleep(&q->rr, notempty, q); } }
/* Wait for the queue to be non-empty or closed. Returns TRUE for a successful * wait, FALSE on Qclose (without error) * * Called with q ilocked. May error out, back through the caller, with * the irqsave lock unlocked. */ static bool qwait(struct queue *q) { /* wait for data */ for (;;) { if (q->bfirst != NULL) break; if (q->state & Qclosed) { if (++q->eof > 3) { spin_unlock_irqsave(&q->lock); error(EFAIL, "multiple reads on a closed queue"); } if (*q->err && strcmp(q->err, errno_to_string(ECONNABORTED)) != 0) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } return FALSE; } /* We set Qstarve regardless of whether we are non-blocking or not. * Qstarve tracks the edge detection of the queue being empty. */ q->state |= Qstarve; if (q->state & Qnonblock) { spin_unlock_irqsave(&q->lock); error(EAGAIN, "queue empty"); } spin_unlock_irqsave(&q->lock); /* may throw an error() */ rendez_sleep(&q->rr, notempty, q); spin_lock_irqsave(&q->lock); } return TRUE; }
static void dbgproc(void *) { Dbgkey *dp; setpri(PriRealtime); for (;;) { do { rendez_sleep(&dbg, dbgwork, 0); dp = dbg.work; } while (dp == NULL); dp->f(dp->r); dbg.work = NULL; } }
long netlogread(struct Fs *f, void *a, uint32_t unused, long n) { ERRSTACK(1); int i, d; char *p, *rptr; qlock(&f->alog->qlock); if (waserror()) { qunlock(&f->alog->qlock); nexterror(); } for (;;) { spin_lock(&f->alog->lock); if (f->alog->len) { if (n > f->alog->len) n = f->alog->len; d = 0; rptr = f->alog->rptr; f->alog->rptr += n; if (f->alog->rptr >= f->alog->end) { d = f->alog->rptr - f->alog->end; f->alog->rptr = f->alog->buf + d; } f->alog->len -= n; spin_unlock(&f->alog->lock); i = n - d; p = a; memmove(p, rptr, i); memmove(p + i, f->alog->buf, d); break; } else spin_unlock(&f->alog->lock); rendez_sleep(&f->alog->r, netlogready, f); } qunlock(&f->alog->qlock); poperror(); return n; }
static void rxmitproc(void *v) { ERRSTACK(2); struct arp *arp = v; uint64_t wakeupat; arp->rxmitp = current; if (waserror()) { arp->rxmitp = 0; poperror(); warn("arp rxmit ktask exited"); return; } for (;;) { wakeupat = rxmitsols(arp); if (wakeupat == 0) rendez_sleep(&arp->rxmtq, rxready, v); else if (wakeupat > ReTransTimer / 4) kthread_usleep(wakeupat * 1000); } poperror(); }
/* Adds block (which can be a list of blocks) to the queue, subject to * qio_flags. Returns the length written on success or -1 on non-throwable * error. Adjust qio_flags to control the value-added features!. */ static ssize_t __qbwrite(struct queue *q, struct block *b, int qio_flags) { ssize_t ret; bool dowakeup = FALSE; bool was_empty; if (q->bypass) { ret = blocklen(b); (*q->bypass) (q->arg, b); return ret; } spin_lock_irqsave(&q->lock); was_empty = q->len == 0; if (q->state & Qclosed) { spin_unlock_irqsave(&q->lock); freeblist(b); if (!(qio_flags & QIO_CAN_ERR_SLEEP)) return -1; if (q->err[0]) error(EFAIL, q->err); else error(EFAIL, "connection closed"); } if ((qio_flags & QIO_LIMIT) && (q->len >= q->limit)) { /* drop overflow takes priority over regular non-blocking */ if ((qio_flags & QIO_DROP_OVERFLOW) || (q->state & Qdropoverflow)) { spin_unlock_irqsave(&q->lock); freeb(b); return -1; } /* People shouldn't set NON_BLOCK without CAN_ERR, but we can be nice * and catch it. */ if ((qio_flags & QIO_CAN_ERR_SLEEP) && (qio_flags & QIO_NON_BLOCK)) { spin_unlock_irqsave(&q->lock); freeb(b); error(EAGAIN, "queue full"); } } ret = enqueue_blist(q, b); QDEBUG checkb(b, "__qbwrite"); /* make sure other end gets awakened */ if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = TRUE; } spin_unlock_irqsave(&q->lock); /* TODO: not sure if the usage of a kick is mutually exclusive with a * wakeup, meaning that actual users either want a kick or have qreaders. */ if (q->kick && (dowakeup || (q->state & Qkick))) q->kick(q->arg); if (dowakeup) rendez_wakeup(&q->rr); if (was_empty) qwake_cb(q, FDTAP_FILT_READABLE); /* * flow control, wait for queue to get below the limit * before allowing the process to continue and queue * more. We do this here so that postnote can only * interrupt us after the data has been queued. This * means that things like 9p flushes and ssl messages * will not be disrupted by software interrupts. * * Note - this is moderately dangerous since a process * that keeps getting interrupted and rewriting will * queue infinite crud. */ if ((qio_flags & QIO_CAN_ERR_SLEEP) && !(q->state & Qdropoverflow) && !(qio_flags & QIO_NON_BLOCK)) { /* This is a racy peek at the q status. If we accidentally block, we * set Qflow, so someone should wake us. If we accidentally don't * block, we just returned to the user and let them slip a block past * flow control. */ while (!qnotfull(q)) { spin_lock_irqsave(&q->lock); q->state |= Qflow; spin_unlock_irqsave(&q->lock); rendez_sleep(&q->wr, qnotfull, q); } } return ret; }
/* * add a block to a queue obeying flow control */ long qbwrite(struct queue *q, struct block *b) { ERRSTACK(1); int n, dowakeup; volatile bool should_free_b = TRUE; n = BLEN(b); if (q->bypass) { (*q->bypass) (q->arg, b); return n; } dowakeup = 0; qlock(&q->wlock); if (waserror()) { if (b != NULL && should_free_b) freeb(b); qunlock(&q->wlock); nexterror(); } spin_lock_irqsave(&q->lock); /* give up if the queue is closed */ if (q->state & Qclosed) { spin_unlock_irqsave(&q->lock); error(EFAIL, q->err); } /* if nonblocking, don't queue over the limit */ if (q->len >= q->limit) { /* drop overflow takes priority over regular non-blocking */ if (q->state & Qdropoverflow) { spin_unlock_irqsave(&q->lock); freeb(b); dropcnt += n; qunlock(&q->wlock); poperror(); return n; } if (q->state & Qnonblock) { spin_unlock_irqsave(&q->lock); freeb(b); error(EAGAIN, "queue full"); } } /* queue the block */ should_free_b = FALSE; if (q->bfirst) q->blast->next = b; else q->bfirst = b; q->blast = b; b->next = 0; q->len += BALLOC(b); q->dlen += n; QDEBUG checkb(b, "qbwrite"); b = NULL; /* make sure other end gets awakened */ if (q->state & Qstarve) { q->state &= ~Qstarve; dowakeup = 1; } spin_unlock_irqsave(&q->lock); /* get output going again */ if (q->kick && (dowakeup || (q->state & Qkick))) q->kick(q->arg); /* wakeup anyone consuming at the other end */ if (dowakeup) { rendez_wakeup(&q->rr); qwake_cb(q, FDTAP_FILT_READABLE); } /* * flow control, wait for queue to get below the limit * before allowing the process to continue and queue * more. We do this here so that postnote can only * interrupt us after the data has been queued. This * means that things like 9p flushes and ssl messages * will not be disrupted by software interrupts. * * Note - this is moderately dangerous since a process * that keeps getting interrupted and rewriting will * queue infinite crud. */ for (;;) { if ((q->state & (Qdropoverflow | Qnonblock)) || qnotfull(q)) break; spin_lock_irqsave(&q->lock); q->state |= Qflow; spin_unlock_irqsave(&q->lock); rendez_sleep(&q->wr, qnotfull, q); } qunlock(&q->wlock); poperror(); return n; }