static int ld_virtio_start(struct ld_softc *ld, struct buf *bp) { /* splbio */ struct ld_virtio_softc *sc = device_private(ld->sc_dv); struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq[0]; struct virtio_blk_req *vr; int r; int isread = (bp->b_flags & B_READ); int slot; if (sc->sc_readonly && !isread) return EIO; r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) return r; vr = &sc->sc_reqs[slot]; r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload, bp->b_data, bp->b_bcount, NULL, ((isread?BUS_DMA_READ:BUS_DMA_WRITE) |BUS_DMA_NOWAIT)); if (r != 0) return r; r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2); if (r != 0) { bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload); return r; } vr->vr_bp = bp; vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT; vr->vr_hdr.ioprio = 0; vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512; bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, bp->b_bcount, isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_PREREAD); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), true); virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), false); virtio_enqueue_commit(vsc, vq, slot, true); return 0; }
static void viornd_get(size_t bytes, void *priv) { struct viornd_softc *sc = priv; struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq; int slot; #if VIORND_DEBUG aprint_normal("%s: asked for %d bytes of entropy\n", __func__, VIORND_BUFSIZE); #endif mutex_enter(&sc->sc_mutex); if (sc->sc_active) { goto out; } bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE, BUS_DMASYNC_PREREAD); if (virtio_enqueue_prep(vsc, vq, &slot)) { goto out; } if (virtio_enqueue_reserve(vsc, vq, slot, 1)) { virtio_enqueue_abort(vsc, vq, slot); goto out; } virtio_enqueue(vsc, vq, slot, sc->sc_dmamap, 0); virtio_enqueue_commit(vsc, vq, slot, 1); sc->sc_active = true; out: mutex_exit(&sc->sc_mutex); }
void viomb_deflate(struct viomb_softc *sc) { struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio; struct balloon_req *b; struct vm_page *p; struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE]; u_int64_t nvpages; int i, slot; nvpages = sc->sc_actual - sc->sc_npages; if (nvpages > PGS_PER_REQ) nvpages = PGS_PER_REQ; b = &sc->sc_req; b->bl_nentries = nvpages; TAILQ_INIT(&b->bl_pglist); for (i = 0; i < nvpages; i++) { p = TAILQ_FIRST(&sc->sc_balloon_pages); if (p == NULL){ b->bl_nentries = i - 1; break; } TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq); TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq); b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE; } if (virtio_enqueue_prep(vq, &slot)) { printf("%s:virtio_get_slot(def) vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } if (virtio_enqueue_reserve(vq, slot, 1)) { printf("%s:virtio_enqueue_reserve() vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, VRING_READ); if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)) uvm_pglistfree(&b->bl_pglist); virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY); return; err: while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) { TAILQ_REMOVE(&b->bl_pglist, p, pageq); TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq); } return; }
void vioconstart(struct tty *tp) { struct viocon_softc *sc = dev2sc(tp->t_dev); struct virtio_softc *vsc; struct viocon_port *vp = dev2port(tp->t_dev); struct virtqueue *vq; u_char *buf; int s, cnt, slot, ret, ndone; vsc = sc->sc_virtio; vq = vp->vp_tx; s = spltty(); ndone = viocon_tx_drain(vp, vq); if (ISSET(tp->t_state, TS_BUSY)) { if (ndone > 0) CLR(tp->t_state, TS_BUSY); else goto out; } if (ISSET(tp->t_state, TS_TIMEOUT | TS_TTSTOP)) goto out; if (tp->t_outq.c_cc == 0) goto out; ndone = 0; while (tp->t_outq.c_cc > 0) { ret = virtio_enqueue_prep(vq, &slot); if (ret == EAGAIN) break; KASSERT(ret == 0); ret = virtio_enqueue_reserve(vq, slot, 1); KASSERT(ret == 0); buf = vp->vp_tx_buf + slot * BUFSIZE; cnt = q_to_b(&tp->t_outq, buf, BUFSIZE); bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, 1); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } if (ret == EAGAIN) SET(tp->t_state, TS_BUSY); if (ndone > 0) virtio_notify(vsc, vq); ttwakeupwr(tp); out: splx(s); }
void viomb_inflate(struct viomb_softc *sc) { struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio; struct balloon_req *b; struct vm_page *p; struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE]; u_int32_t nvpages; int slot, error, i = 0; nvpages = sc->sc_npages - sc->sc_actual; if (nvpages > PGS_PER_REQ) nvpages = PGS_PER_REQ; b = &sc->sc_req; if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0, dma_constraint.ucr_high, 0, 0, &b->bl_pglist, nvpages, UVM_PLA_NOWAIT))) { printf("%s unable to allocate %u physmem pages," "error %d\n", DEVNAME(sc), nvpages, error); return; } b->bl_nentries = nvpages; TAILQ_FOREACH(p, &b->bl_pglist, pageq) b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE; KASSERT(i == nvpages); if ((virtio_enqueue_prep(vq, &slot)) > 0) { printf("%s:virtio_enqueue_prep() vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } if (virtio_enqueue_reserve(vq, slot, 1)) { printf("%s:virtio_enqueue_reserve vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, VRING_READ); virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY); return; err: uvm_pglistfree(&b->bl_pglist); return; }
void viornd_tick(void *arg) { struct viornd_softc *sc = arg; struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq; int slot; bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE, BUS_DMASYNC_PREREAD); if (virtio_enqueue_prep(vq, &slot) != 0 || virtio_enqueue_reserve(vq, slot, 1) != 0) { panic("%s: virtqueue enqueue failed", sc->sc_dev.dv_xname); } virtio_enqueue(vq, slot, sc->sc_dmamap, 0); virtio_enqueue_commit(vsc, vq, slot, 1); }
void viocon_rx_fill(struct viocon_port *vp) { struct virtqueue *vq = vp->vp_rx; struct virtio_softc *vsc = vp->vp_sc->sc_virtio; int r, slot, ndone = 0; while ((r = virtio_enqueue_prep(vq, &slot)) == 0) { if (virtio_enqueue_reserve(vq, slot, 1) != 0) break; bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, BUS_DMASYNC_PREREAD); virtio_enqueue_p(vq, slot, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, 0); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } KASSERT(r == 0 || r == EAGAIN); if (ndone > 0) virtio_notify(vsc, vq); }
static int ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt) { struct ld_virtio_softc *sc = device_private(ld->sc_dv); struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq; struct virtio_blk_req *vr; int slot, r; if (sc->sc_readonly) return EIO; r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) { if (r == EAGAIN) { /* no free slot; dequeue first */ delay(100); ld_virtio_vq_done(vq); r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) return r; } return r; } vr = &sc->sc_reqs[slot]; r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload, data, blkcnt*ld->sc_secsize, NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); if (r != 0) return r; r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + VIRTIO_BLK_MIN_SEGMENTS); if (r != 0) { bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload); return r; } vr->vr_bp = (void*)0xdeadbeef; vr->vr_hdr.type = VIRTIO_BLK_T_OUT; vr->vr_hdr.ioprio = 0; vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512; bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, blkcnt*ld->sc_secsize, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_PREREAD); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), true); virtio_enqueue(vsc, vq, slot, vr->vr_payload, true); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), false); virtio_enqueue_commit(vsc, vq, slot, true); for ( ; ; ) { int dslot; r = virtio_dequeue(vsc, vq, &dslot, NULL); if (r != 0) continue; if (dslot != slot) { ld_virtio_vq_done1(sc, vsc, vq, dslot); continue; } else break; } bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, blkcnt*ld->sc_secsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_POSTREAD); if (vr->vr_status == VIRTIO_BLK_S_OK) r = 0; else r = EIO; virtio_dequeue_commit(vsc, vq, slot); return r; }
static void vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t request, void *arg) { struct vioscsi_softc *sc = device_private(chan->chan_adapter->adapt_dev); struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); struct scsipi_xfer *xs; struct scsipi_periph *periph; struct vioscsi_req *vr; struct virtio_scsi_req_hdr *req; struct virtqueue *vq = &sc->sc_vqs[2]; int slot, error; DPRINTF(("%s: enter\n", __func__)); if (request != ADAPTER_REQ_RUN_XFER) { DPRINTF(("%s: unhandled %d\n", __func__, request)); return; } xs = arg; periph = xs->xs_periph; vr = vioscsi_req_get(sc); #ifdef DIAGNOSTIC /* * This should never happen as we track the resources * in the mid-layer. */ if (vr == NULL) { scsipi_printaddr(xs->xs_periph); panic("%s: unable to allocate request\n", __func__); } #endif req = &vr->vr_req; slot = vr - sc->sc_reqs; vr->vr_xs = xs; /* * "The only supported format for the LUN field is: first byte set to * 1, second byte set to target, third and fourth byte representing a * single level LUN structure, followed by four zero bytes." */ if (periph->periph_target >= 256 || periph->periph_lun >= 16384) { DPRINTF(("%s: bad target %u or lun %u\n", __func__, periph->periph_target, periph->periph_lun)); goto stuffup; } req->lun[0] = 1; req->lun[1] = periph->periph_target - 1; req->lun[2] = 0x40 | (periph->periph_lun >> 8); req->lun[3] = periph->periph_lun; memset(req->lun + 4, 0, 4); DPRINTF(("%s: command for %u:%u at slot %d\n", __func__, periph->periph_target - 1, periph->periph_lun, slot)); if ((size_t)xs->cmdlen > sizeof(req->cdb)) { DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__, (size_t)xs->cmdlen, sizeof(req->cdb))); goto stuffup; } memset(req->cdb, 0, sizeof(req->cdb)); memcpy(req->cdb, xs->cmd, xs->cmdlen); error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data, xs->data, xs->datalen, NULL, XS2DMA(xs)); switch (error) { case 0: break; case ENOMEM: case EAGAIN: xs->error = XS_RESOURCE_SHORTAGE; goto nomore; default: aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n", error); stuffup: xs->error = XS_DRIVER_STUFFUP; nomore: // XXX: free req? scsipi_done(xs); return; } int nsegs = VIRTIO_SCSI_MIN_SEGMENTS; if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) nsegs += vr->vr_data->dm_nsegs; error = virtio_enqueue_reserve(vsc, vq, slot, nsegs); if (error) { DPRINTF(("%s: error reserving %d\n", __func__, error)); goto stuffup; } bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, offsetof(struct vioscsi_req, vr_req), sizeof(struct virtio_scsi_req_hdr), BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, offsetof(struct vioscsi_req, vr_res), sizeof(struct virtio_scsi_res_hdr), BUS_DMASYNC_PREREAD); if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen, XS2DMAPRE(xs)); virtio_enqueue_p(vsc, vq, slot, vr->vr_control, offsetof(struct vioscsi_req, vr_req), sizeof(struct virtio_scsi_req_hdr), 1); if (xs->xs_control & XS_CTL_DATA_OUT) virtio_enqueue(vsc, vq, slot, vr->vr_data, 1); virtio_enqueue_p(vsc, vq, slot, vr->vr_control, offsetof(struct vioscsi_req, vr_res), sizeof(struct virtio_scsi_res_hdr), 0); if (xs->xs_control & XS_CTL_DATA_IN) virtio_enqueue(vsc, vq, slot, vr->vr_data, 0); virtio_enqueue_commit(vsc, vq, slot, 1); if ((xs->xs_control & XS_CTL_POLL) == 0) return; DPRINTF(("%s: polling...\n", __func__)); // XXX: do this better. int timeout = 1000; do { (*vsc->sc_intrhand)(vsc); if (vr->vr_xs != xs) break; delay(1000); } while (--timeout > 0); if (vr->vr_xs == xs) { // XXX: Abort! xs->error = XS_TIMEOUT; xs->resid = xs->datalen; DPRINTF(("%s: polling timeout\n", __func__)); scsipi_done(xs); } DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout)); }