static int ld_virtio_start(struct ld_softc *ld, struct buf *bp) { /* splbio */ struct ld_virtio_softc *sc = device_private(ld->sc_dv); struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq[0]; struct virtio_blk_req *vr; int r; int isread = (bp->b_flags & B_READ); int slot; if (sc->sc_readonly && !isread) return EIO; r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) return r; vr = &sc->sc_reqs[slot]; r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload, bp->b_data, bp->b_bcount, NULL, ((isread?BUS_DMA_READ:BUS_DMA_WRITE) |BUS_DMA_NOWAIT)); if (r != 0) return r; r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2); if (r != 0) { bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload); return r; } vr->vr_bp = bp; vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT; vr->vr_hdr.ioprio = 0; vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512; bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, bp->b_bcount, isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_PREREAD); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), true); virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), false); virtio_enqueue_commit(vsc, vq, slot, true); return 0; }
void viomb_deflate(struct viomb_softc *sc) { struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio; struct balloon_req *b; struct vm_page *p; struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE]; u_int64_t nvpages; int i, slot; nvpages = sc->sc_actual - sc->sc_npages; if (nvpages > PGS_PER_REQ) nvpages = PGS_PER_REQ; b = &sc->sc_req; b->bl_nentries = nvpages; TAILQ_INIT(&b->bl_pglist); for (i = 0; i < nvpages; i++) { p = TAILQ_FIRST(&sc->sc_balloon_pages); if (p == NULL){ b->bl_nentries = i - 1; break; } TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq); TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq); b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE; } if (virtio_enqueue_prep(vq, &slot)) { printf("%s:virtio_get_slot(def) vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } if (virtio_enqueue_reserve(vq, slot, 1)) { printf("%s:virtio_enqueue_reserve() vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, VRING_READ); if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)) uvm_pglistfree(&b->bl_pglist); virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY); return; err: while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) { TAILQ_REMOVE(&b->bl_pglist, p, pageq); TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq); } return; }
void vioconstart(struct tty *tp) { struct viocon_softc *sc = dev2sc(tp->t_dev); struct virtio_softc *vsc; struct viocon_port *vp = dev2port(tp->t_dev); struct virtqueue *vq; u_char *buf; int s, cnt, slot, ret, ndone; vsc = sc->sc_virtio; vq = vp->vp_tx; s = spltty(); ndone = viocon_tx_drain(vp, vq); if (ISSET(tp->t_state, TS_BUSY)) { if (ndone > 0) CLR(tp->t_state, TS_BUSY); else goto out; } if (ISSET(tp->t_state, TS_TIMEOUT | TS_TTSTOP)) goto out; if (tp->t_outq.c_cc == 0) goto out; ndone = 0; while (tp->t_outq.c_cc > 0) { ret = virtio_enqueue_prep(vq, &slot); if (ret == EAGAIN) break; KASSERT(ret == 0); ret = virtio_enqueue_reserve(vq, slot, 1); KASSERT(ret == 0); buf = vp->vp_tx_buf + slot * BUFSIZE; cnt = q_to_b(&tp->t_outq, buf, BUFSIZE); bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, 1); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } if (ret == EAGAIN) SET(tp->t_state, TS_BUSY); if (ndone > 0) virtio_notify(vsc, vq); ttwakeupwr(tp); out: splx(s); }
void viomb_inflate(struct viomb_softc *sc) { struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio; struct balloon_req *b; struct vm_page *p; struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE]; u_int32_t nvpages; int slot, error, i = 0; nvpages = sc->sc_npages - sc->sc_actual; if (nvpages > PGS_PER_REQ) nvpages = PGS_PER_REQ; b = &sc->sc_req; if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0, dma_constraint.ucr_high, 0, 0, &b->bl_pglist, nvpages, UVM_PLA_NOWAIT))) { printf("%s unable to allocate %u physmem pages," "error %d\n", DEVNAME(sc), nvpages, error); return; } b->bl_nentries = nvpages; TAILQ_FOREACH(p, &b->bl_pglist, pageq) b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE; KASSERT(i == nvpages); if ((virtio_enqueue_prep(vq, &slot)) > 0) { printf("%s:virtio_enqueue_prep() vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } if (virtio_enqueue_reserve(vq, slot, 1)) { printf("%s:virtio_enqueue_reserve vq_num %d\n", DEVNAME(sc), vq->vq_num); goto err; } bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, b->bl_dmamap, 0, sizeof(u_int32_t) * nvpages, VRING_READ); virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY); return; err: uvm_pglistfree(&b->bl_pglist); return; }
void viocon_rx_fill(struct viocon_port *vp) { struct virtqueue *vq = vp->vp_rx; struct virtio_softc *vsc = vp->vp_sc->sc_virtio; int r, slot, ndone = 0; while ((r = virtio_enqueue_prep(vq, &slot)) == 0) { if (virtio_enqueue_reserve(vq, slot, 1) != 0) break; bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, BUS_DMASYNC_PREREAD); virtio_enqueue_p(vq, slot, vp->vp_dmamap, slot * BUFSIZE, BUFSIZE, 0); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } KASSERT(r == 0 || r == EAGAIN); if (ndone > 0) virtio_notify(vsc, vq); }
static int ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt) { struct ld_virtio_softc *sc = device_private(ld->sc_dv); struct virtio_softc *vsc = sc->sc_virtio; struct virtqueue *vq = &sc->sc_vq; struct virtio_blk_req *vr; int slot, r; if (sc->sc_readonly) return EIO; r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) { if (r == EAGAIN) { /* no free slot; dequeue first */ delay(100); ld_virtio_vq_done(vq); r = virtio_enqueue_prep(vsc, vq, &slot); if (r != 0) return r; } return r; } vr = &sc->sc_reqs[slot]; r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload, data, blkcnt*ld->sc_secsize, NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); if (r != 0) return r; r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + VIRTIO_BLK_MIN_SEGMENTS); if (r != 0) { bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload); return r; } vr->vr_bp = (void*)0xdeadbeef; vr->vr_hdr.type = VIRTIO_BLK_T_OUT; vr->vr_hdr.ioprio = 0; vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512; bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, blkcnt*ld->sc_secsize, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_PREREAD); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), true); virtio_enqueue(vsc, vq, slot, vr->vr_payload, true); virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), false); virtio_enqueue_commit(vsc, vq, slot, true); for ( ; ; ) { int dslot; r = virtio_dequeue(vsc, vq, &dslot, NULL); if (r != 0) continue; if (dslot != slot) { ld_virtio_vq_done1(sc, vsc, vq, dslot); continue; } else break; } bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, blkcnt*ld->sc_secsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), BUS_DMASYNC_POSTREAD); if (vr->vr_status == VIRTIO_BLK_S_OK) r = 0; else r = EIO; virtio_dequeue_commit(vsc, vq, slot); return r; }