int vioconhwiflow(struct tty *tp, int stop) { struct viocon_port *vp = dev2port(tp->t_dev); int s; s = spltty(); vp->vp_iflow = stop; if (stop) { virtio_stop_vq_intr(vp->vp_sc->sc_virtio, vp->vp_rx); } else { virtio_start_vq_intr(vp->vp_sc->sc_virtio, vp->vp_rx); softintr_schedule(vp->vp_si); } splx(s); return 1; }
void viornd_attach(struct device *parent, struct device *self, void *aux) { struct viornd_softc *sc = (struct viornd_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; unsigned int shift; vsc->sc_vqs = &sc->sc_vq; vsc->sc_nvqs = 1; vsc->sc_config_change = 0; if (vsc->sc_child != NULL) panic("already attached to something else"); vsc->sc_child = self; vsc->sc_ipl = IPL_NET; vsc->sc_intrhand = virtio_vq_intr; sc->sc_virtio = vsc; virtio_negotiate_features(vsc, 0, NULL); if (sc->sc_dev.dv_cfdata->cf_flags & VIORND_ONESHOT) { sc->sc_interval = 0; } else { shift = VIORND_INTERVAL_SHIFT(sc->sc_dev.dv_cfdata->cf_flags); if (shift == 0) shift = VIORND_INTERVAL_SHIFT_DEFAULT; sc->sc_interval = 15 * (1 << shift); } #if VIORND_DEBUG printf(": request interval: %us\n", sc->sc_interval); #endif sc->sc_buf = dma_alloc(VIORND_BUFSIZE, PR_NOWAIT|PR_ZERO); if (sc->sc_buf == NULL) { printf(": Can't alloc dma buffer\n"); goto err; } if (bus_dmamap_create(sc->sc_virtio->sc_dmat, VIORND_BUFSIZE, 1, VIORND_BUFSIZE, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_dmamap)) { printf(": Can't alloc dmamap\n"); goto err; } if (bus_dmamap_load(sc->sc_virtio->sc_dmat, sc->sc_dmamap, sc->sc_buf, VIORND_BUFSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ)) { printf(": Can't load dmamap\n"); goto err2; } if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1, "Entropy request") != 0) { printf(": Can't alloc virtqueue\n"); goto err2; } sc->sc_vq.vq_done = viornd_vq_done; virtio_start_vq_intr(vsc, &sc->sc_vq); timeout_set(&sc->sc_tick, viornd_tick, sc); timeout_add(&sc->sc_tick, 1); printf("\n"); return; err2: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap); err: vsc->sc_child = VIRTIO_CHILD_ERROR; if (sc->sc_buf != NULL) { dma_free(sc->sc_buf, VIORND_BUFSIZE); sc->sc_buf = NULL; } return; }
void viomb_attach(struct device *parent, struct device *self, void *aux) { struct viomb_softc *sc = (struct viomb_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; u_int32_t features; int i; if (vsc->sc_child != NULL) { printf("child already attached for %s; something wrong...\n", parent->dv_xname); return; } /* fail on non-4K page size archs */ if (VIRTIO_PAGE_SIZE != PAGE_SIZE){ printf("non-4K page size arch found, needs %d, got %d\n", VIRTIO_PAGE_SIZE, PAGE_SIZE); return; } sc->sc_virtio = vsc; vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE]; vsc->sc_nvqs = 0; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_config_change = viomb_config_change; vsc->sc_intrhand = virtio_vq_intr; /* negotiate features */ features = VIRTIO_F_RING_INDIRECT_DESC; features = virtio_negotiate_features(vsc, features, viomb_feature_names); if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0)) goto err; vsc->sc_nvqs++; if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0)) goto err; vsc->sc_nvqs++; sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr; sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr; virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]); virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]); viomb_read_config(sc); TAILQ_INIT(&sc->sc_balloon_pages); if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ, PR_NOWAIT|PR_ZERO)) == NULL) { printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ, 1, sizeof(u_int32_t) * PGS_PER_REQ, 0, BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) { printf("%s: dmamap creation failed.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap, &sc->sc_req.bl_pages[0], sizeof(uint32_t) * PGS_PER_REQ, NULL, BUS_DMA_NOWAIT)) { printf("%s: dmamap load failed.\n", DEVNAME(sc)); goto err_dmamap; } sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO); if (sc->sc_taskq == NULL) goto err_dmamap; task_set(&sc->sc_task, viomb_worker, sc, NULL); printf("\n"); return; err_dmamap: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap); err: if (sc->sc_req.bl_pages) dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ); for (i = 0; i < vsc->sc_nvqs; i++) virtio_free_vq(vsc, &sc->sc_vq[i]); vsc->sc_nvqs = 0; vsc->sc_child = VIRTIO_CHILD_ERROR; return; }
int viocon_port_create(struct viocon_softc *sc, int portidx) { struct virtio_softc *vsc = sc->sc_virtio; int rxidx, txidx, allocsize, nsegs; char name[6]; struct viocon_port *vp; caddr_t kva; struct tty *tp; vp = malloc(sizeof(*vp), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); if (vp == NULL) return ENOMEM; sc->sc_ports[portidx] = vp; vp->vp_sc = sc; DBGPRINT("vp: %p\n", vp); if (portidx == 0) rxidx = 0; else rxidx = 2 * (portidx + 1); txidx = rxidx + 1; snprintf(name, sizeof(name), "p%drx", portidx); if (virtio_alloc_vq(vsc, &vsc->sc_vqs[rxidx], rxidx, BUFSIZE, 1, name) != 0) { printf("\nCan't alloc %s virtqueue\n", name); goto err; } vp->vp_rx = &vsc->sc_vqs[rxidx]; vp->vp_rx->vq_done = viocon_rx_intr; vp->vp_si = softintr_establish(IPL_TTY, viocon_rx_soft, vp); DBGPRINT("rx: %p\n", vp->vp_rx); snprintf(name, sizeof(name), "p%dtx", portidx); if (virtio_alloc_vq(vsc, &vsc->sc_vqs[txidx], txidx, BUFSIZE, 1, name) != 0) { printf("\nCan't alloc %s virtqueue\n", name); goto err; } vp->vp_tx = &vsc->sc_vqs[txidx]; vp->vp_tx->vq_done = viocon_tx_intr; DBGPRINT("tx: %p\n", vp->vp_tx); vsc->sc_nvqs += 2; allocsize = (vp->vp_rx->vq_num + vp->vp_tx->vq_num) * BUFSIZE; if (bus_dmamap_create(vsc->sc_dmat, allocsize, 1, allocsize, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vp->vp_dmamap) != 0) goto err; if (bus_dmamem_alloc(vsc->sc_dmat, allocsize, 8, 0, &vp->vp_dmaseg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) goto err; if (bus_dmamem_map(vsc->sc_dmat, &vp->vp_dmaseg, nsegs, allocsize, &kva, BUS_DMA_NOWAIT) != 0) goto err; if (bus_dmamap_load(vsc->sc_dmat, vp->vp_dmamap, kva, allocsize, NULL, BUS_DMA_NOWAIT) != 0) goto err; vp->vp_rx_buf = (unsigned char *)kva; /* * XXX use only a small circular tx buffer instead of many BUFSIZE buffers? */ vp->vp_tx_buf = vp->vp_rx_buf + vp->vp_rx->vq_num * BUFSIZE; if (vsc->sc_features & VIRTIO_CONSOLE_F_SIZE) { vp->vp_cols = virtio_read_device_config_2(vsc, VIRTIO_CONSOLE_COLS); vp->vp_rows = virtio_read_device_config_2(vsc, VIRTIO_CONSOLE_ROWS); } tp = ttymalloc(1000000); tp->t_oproc = vioconstart; tp->t_param = vioconparam; tp->t_hwiflow = vioconhwiflow; tp->t_dev = (sc->sc_dev.dv_unit << 4) | portidx; vp->vp_tty = tp; DBGPRINT("tty: %p\n", tp); virtio_start_vq_intr(vsc, vp->vp_rx); virtio_start_vq_intr(vsc, vp->vp_tx); return 0; err: panic("%s failed", __func__); return -1; }
void viornd_attach( device_t parent, device_t self, void *aux) { struct viornd_softc *sc = device_private(self); struct virtio_softc *vsc = device_private(parent); bus_dma_segment_t segs[1]; int nsegs; int error; uint32_t features; char buf[256]; vsc->sc_vqs = &sc->sc_vq; vsc->sc_nvqs = 1; vsc->sc_config_change = NULL; if (vsc->sc_child != NULL) panic("already attached to something else"); vsc->sc_child = self; vsc->sc_ipl = IPL_NET; vsc->sc_intrhand = virtio_vq_intr; sc->sc_virtio = vsc; sc->sc_dev = self; features = virtio_negotiate_features(vsc, 0); snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features); aprint_normal(": Features: %s\n", buf); aprint_naive("\n"); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); error = bus_dmamem_alloc(vsc->sc_dmat, VIRTIO_PAGE_SIZE, 0, 0, segs, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc dmamem: %d\n", error); goto alloc_failed; } error = bus_dmamem_map(vsc->sc_dmat, segs, nsegs, VIORND_BUFSIZE, &sc->sc_buf, BUS_DMA_NOWAIT); if (error) { aprint_error_dev(sc->sc_dev, "can't map dmamem: %d\n", error); goto map_failed; } error = bus_dmamap_create(vsc->sc_dmat, VIORND_BUFSIZE, 1, VIORND_BUFSIZE, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_dmamap); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc dmamap: %d\n", error); goto create_failed; } error = bus_dmamap_load(vsc->sc_dmat, sc->sc_dmamap, sc->sc_buf, VIORND_BUFSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ); if (error) { aprint_error_dev(sc->sc_dev, "can't load dmamap: %d\n", error); goto load_failed; } error = virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1, "Entropy request"); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc virtqueue: %d\n", error); goto vio_failed; } sc->sc_vq.vq_done = viornd_vq_done; virtio_start_vq_intr(vsc, &sc->sc_vq); rndsource_setcb(&sc->sc_rndsource, viornd_get, sc); rnd_attach_source(&sc->sc_rndsource, device_xname(sc->sc_dev), RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); viornd_get(VIORND_BUFSIZE, sc); return; vio_failed: bus_dmamap_unload(vsc->sc_dmat, sc->sc_dmamap); load_failed: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap); create_failed: bus_dmamem_unmap(vsc->sc_dmat, sc->sc_buf, VIORND_BUFSIZE); map_failed: bus_dmamem_free(vsc->sc_dmat, segs, nsegs); alloc_failed: vsc->sc_child = (void *)1; /* XXX bare constant 1 */ return; }