static void vtballoon_negotiate_features(struct vtballoon_softc *sc) { device_t dev; uint64_t features; dev = sc->vtballoon_dev; features = virtio_negotiate_features(dev, VTBALLOON_FEATURES); sc->vtballoon_features = features; }
static void vtrnd_negotiate_features(struct vtrnd_softc *sc) { device_t dev; uint64_t features; dev = sc->vtrnd_dev; features = VTRND_FEATURES; sc->vtrnd_features = virtio_negotiate_features(dev, features); }
void viocon_attach(struct device *parent, struct device *self, void *aux) { struct viocon_softc *sc = (struct viocon_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; int maxports = 1; if (vsc->sc_child) panic("already attached to something else"); vsc->sc_child = self; vsc->sc_ipl = IPL_TTY; vsc->sc_intrhand = virtio_vq_intr; vsc->sc_config_change = 0; sc->sc_virtio = vsc; sc->sc_max_ports = maxports; vsc->sc_vqs = malloc(2 * (maxports + 1) * sizeof(struct virtqueue), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); sc->sc_ports = malloc(maxports * sizeof(sc->sc_ports[0]), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); if (vsc->sc_vqs == NULL || sc->sc_ports == NULL) { printf("\n%s: Cannot allocate memory\n", __func__); goto err; } virtio_negotiate_features(vsc, VIRTIO_CONSOLE_F_SIZE, viocon_feature_names); printf("\n"); DBGPRINT("softc: %p\n", sc); if (viocon_port_create(sc, 0) != 0) { printf("\n%s: viocon_port_create failed\n", __func__); goto err; } viocon_rx_fill(sc->sc_ports[0]); return; err: vsc->sc_child = VIRTIO_CHILD_ERROR; if (vsc->sc_vqs) free(vsc->sc_vqs, M_DEVBUF, 2 * (maxports + 1) * sizeof(struct virtqueue)); if (sc->sc_ports) free(sc->sc_ports, M_DEVBUF, maxports * sizeof(sc->sc_ports[0])); }
static void ld_virtio_attach(device_t parent, device_t self, void *aux) { struct ld_virtio_softc *sc = device_private(self); struct ld_softc *ld = &sc->sc_ld; struct virtio_softc *vsc = device_private(parent); uint32_t features; char buf[256]; int qsize, maxxfersize, maxnsegs; if (vsc->sc_child != NULL) { aprint_normal(": child already attached for %s; " "something wrong...\n", device_xname(parent)); return; } sc->sc_dev = self; sc->sc_virtio = vsc; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_vqs = &sc->sc_vq; vsc->sc_nvqs = 1; vsc->sc_config_change = NULL; vsc->sc_intrhand = virtio_vq_intr; vsc->sc_flags = 0; features = virtio_negotiate_features(vsc, (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE)); if (features & VIRTIO_BLK_F_RO) sc->sc_readonly = 1; else sc->sc_readonly = 0; snprintb(buf, sizeof(buf), VIRTIO_BLK_FLAG_BITS, features); aprint_normal(": Features: %s\n", buf); aprint_naive("\n"); if (features & VIRTIO_BLK_F_BLK_SIZE) { ld->sc_secsize = virtio_read_device_config_4(vsc, VIRTIO_BLK_CONFIG_BLK_SIZE); } else ld->sc_secsize = 512; /* At least genfs_io assumes maxxfer == MAXPHYS. */ if (features & VIRTIO_BLK_F_SIZE_MAX) { maxxfersize = virtio_read_device_config_4(vsc, VIRTIO_BLK_CONFIG_SIZE_MAX); if (maxxfersize < MAXPHYS) { aprint_error_dev(sc->sc_dev, "Too small SIZE_MAX %dK minimum is %dK\n", maxxfersize / 1024, MAXPHYS / 1024); // goto err; maxxfersize = MAXPHYS; } else if (maxxfersize > MAXPHYS) { aprint_normal_dev(sc->sc_dev, "Clip SEG_MAX from %dK to %dK\n", maxxfersize / 1024, MAXPHYS / 1024); maxxfersize = MAXPHYS; } } else maxxfersize = MAXPHYS; if (features & VIRTIO_BLK_F_SEG_MAX) { maxnsegs = virtio_read_device_config_4(vsc, VIRTIO_BLK_CONFIG_SEG_MAX); if (maxnsegs < VIRTIO_BLK_MIN_SEGMENTS) { aprint_error_dev(sc->sc_dev, "Too small SEG_MAX %d minimum is %d\n", maxnsegs, VIRTIO_BLK_MIN_SEGMENTS); maxnsegs = maxxfersize / NBPG; // goto err; } } else maxnsegs = maxxfersize / NBPG; /* 2 for the minimum size */ maxnsegs += VIRTIO_BLK_MIN_SEGMENTS; if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs, "I/O request") != 0) { goto err; } qsize = sc->sc_vq.vq_num; sc->sc_vq.vq_done = ld_virtio_vq_done; ld->sc_dv = self; ld->sc_secperunit = virtio_read_device_config_8(vsc, VIRTIO_BLK_CONFIG_CAPACITY); ld->sc_maxxfer = maxxfersize; if (features & VIRTIO_BLK_F_GEOMETRY) { ld->sc_ncylinders = virtio_read_device_config_2(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_C); ld->sc_nheads = virtio_read_device_config_1(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_H); ld->sc_nsectors = virtio_read_device_config_1(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_S); } ld->sc_maxqueuecnt = qsize; if (ld_virtio_alloc_reqs(sc, qsize) < 0) goto err; ld->sc_dump = ld_virtio_dump; ld->sc_flush = NULL; ld->sc_start = ld_virtio_start; ld->sc_flags = LDF_ENABLED; ldattach(ld, BUFQ_DISK_DEFAULT_STRAT); return; err: vsc->sc_child = (void*)1; return; }
static void ld_virtio_attach(device_t parent, device_t self, void *aux) { struct ld_virtio_softc *sc = device_private(self); struct ld_softc *ld = &sc->sc_ld; struct virtio_softc *vsc = device_private(parent); uint32_t features; int qsize, maxxfersize; if (vsc->sc_child != NULL) { aprint_normal(": child already attached for %s; " "something wrong...\n", device_xname(parent)); return; } aprint_normal("\n"); aprint_naive("\n"); sc->sc_dev = self; sc->sc_virtio = vsc; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_vqs = &sc->sc_vq[0]; vsc->sc_nvqs = 1; vsc->sc_config_change = 0; vsc->sc_intrhand = virtio_vq_intr; vsc->sc_flags = 0; features = virtio_negotiate_features(vsc, (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE)); if (features & VIRTIO_BLK_F_RO) sc->sc_readonly = 1; else sc->sc_readonly = 0; ld->sc_secsize = 512; if (features & VIRTIO_BLK_F_BLK_SIZE) { ld->sc_secsize = virtio_read_device_config_4(vsc, VIRTIO_BLK_CONFIG_BLK_SIZE); } maxxfersize = MAXPHYS; #if 0 /* At least genfs_io assumes maxxfer == MAXPHYS. */ if (features & VIRTIO_BLK_F_SEG_MAX) { maxxfersize = virtio_read_device_config_4(vsc, VIRTIO_BLK_CONFIG_SEG_MAX) * ld->sc_secsize; if (maxxfersize > MAXPHYS) maxxfersize = MAXPHYS; } #endif if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, maxxfersize, maxxfersize / NBPG + 2, "I/O request") != 0) { goto err; } qsize = sc->sc_vq[0].vq_num; sc->sc_vq[0].vq_done = ld_virtio_vq_done; ld->sc_dv = self; ld->sc_secperunit = virtio_read_device_config_8(vsc, VIRTIO_BLK_CONFIG_CAPACITY); ld->sc_maxxfer = maxxfersize; if (features & VIRTIO_BLK_F_GEOMETRY) { ld->sc_ncylinders = virtio_read_device_config_2(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_C); ld->sc_nheads = virtio_read_device_config_1(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_H); ld->sc_nsectors = virtio_read_device_config_1(vsc, VIRTIO_BLK_CONFIG_GEOMETRY_S); } ld->sc_maxqueuecnt = qsize; if (ld_virtio_alloc_reqs(sc, qsize) < 0) goto err; mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO); ld->sc_dump = ld_virtio_dump; ld->sc_flush = NULL; ld->sc_start = ld_virtio_start; ld->sc_flags = LDF_ENABLED; ldattach(ld); return; err: vsc->sc_child = (void*)1; return; }
void viornd_attach(struct device *parent, struct device *self, void *aux) { struct viornd_softc *sc = (struct viornd_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; unsigned int shift; vsc->sc_vqs = &sc->sc_vq; vsc->sc_nvqs = 1; vsc->sc_config_change = 0; if (vsc->sc_child != NULL) panic("already attached to something else"); vsc->sc_child = self; vsc->sc_ipl = IPL_NET; vsc->sc_intrhand = virtio_vq_intr; sc->sc_virtio = vsc; virtio_negotiate_features(vsc, 0, NULL); if (sc->sc_dev.dv_cfdata->cf_flags & VIORND_ONESHOT) { sc->sc_interval = 0; } else { shift = VIORND_INTERVAL_SHIFT(sc->sc_dev.dv_cfdata->cf_flags); if (shift == 0) shift = VIORND_INTERVAL_SHIFT_DEFAULT; sc->sc_interval = 15 * (1 << shift); } #if VIORND_DEBUG printf(": request interval: %us\n", sc->sc_interval); #endif sc->sc_buf = dma_alloc(VIORND_BUFSIZE, PR_NOWAIT|PR_ZERO); if (sc->sc_buf == NULL) { printf(": Can't alloc dma buffer\n"); goto err; } if (bus_dmamap_create(sc->sc_virtio->sc_dmat, VIORND_BUFSIZE, 1, VIORND_BUFSIZE, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_dmamap)) { printf(": Can't alloc dmamap\n"); goto err; } if (bus_dmamap_load(sc->sc_virtio->sc_dmat, sc->sc_dmamap, sc->sc_buf, VIORND_BUFSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ)) { printf(": Can't load dmamap\n"); goto err2; } if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1, "Entropy request") != 0) { printf(": Can't alloc virtqueue\n"); goto err2; } sc->sc_vq.vq_done = viornd_vq_done; virtio_start_vq_intr(vsc, &sc->sc_vq); timeout_set(&sc->sc_tick, viornd_tick, sc); timeout_add(&sc->sc_tick, 1); printf("\n"); return; err2: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap); err: vsc->sc_child = VIRTIO_CHILD_ERROR; if (sc->sc_buf != NULL) { dma_free(sc->sc_buf, VIORND_BUFSIZE); sc->sc_buf = NULL; } return; }
void viomb_attach(struct device *parent, struct device *self, void *aux) { struct viomb_softc *sc = (struct viomb_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; u_int32_t features; int i; if (vsc->sc_child != NULL) { printf("child already attached for %s; something wrong...\n", parent->dv_xname); return; } /* fail on non-4K page size archs */ if (VIRTIO_PAGE_SIZE != PAGE_SIZE){ printf("non-4K page size arch found, needs %d, got %d\n", VIRTIO_PAGE_SIZE, PAGE_SIZE); return; } sc->sc_virtio = vsc; vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE]; vsc->sc_nvqs = 0; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_config_change = viomb_config_change; vsc->sc_intrhand = virtio_vq_intr; /* negotiate features */ features = VIRTIO_F_RING_INDIRECT_DESC; features = virtio_negotiate_features(vsc, features, viomb_feature_names); if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0)) goto err; vsc->sc_nvqs++; if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0)) goto err; vsc->sc_nvqs++; sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr; sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr; virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]); virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]); viomb_read_config(sc); TAILQ_INIT(&sc->sc_balloon_pages); if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ, PR_NOWAIT|PR_ZERO)) == NULL) { printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ, 1, sizeof(u_int32_t) * PGS_PER_REQ, 0, BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) { printf("%s: dmamap creation failed.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap, &sc->sc_req.bl_pages[0], sizeof(uint32_t) * PGS_PER_REQ, NULL, BUS_DMA_NOWAIT)) { printf("%s: dmamap load failed.\n", DEVNAME(sc)); goto err_dmamap; } sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO); if (sc->sc_taskq == NULL) goto err_dmamap; task_set(&sc->sc_task, viomb_worker, sc, NULL); printf("\n"); return; err_dmamap: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap); err: if (sc->sc_req.bl_pages) dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ); for (i = 0; i < vsc->sc_nvqs; i++) virtio_free_vq(vsc, &sc->sc_vq[i]); vsc->sc_nvqs = 0; vsc->sc_child = VIRTIO_CHILD_ERROR; return; }
void viornd_attach( device_t parent, device_t self, void *aux) { struct viornd_softc *sc = device_private(self); struct virtio_softc *vsc = device_private(parent); bus_dma_segment_t segs[1]; int nsegs; int error; uint32_t features; char buf[256]; vsc->sc_vqs = &sc->sc_vq; vsc->sc_nvqs = 1; vsc->sc_config_change = NULL; if (vsc->sc_child != NULL) panic("already attached to something else"); vsc->sc_child = self; vsc->sc_ipl = IPL_NET; vsc->sc_intrhand = virtio_vq_intr; sc->sc_virtio = vsc; sc->sc_dev = self; features = virtio_negotiate_features(vsc, 0); snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features); aprint_normal(": Features: %s\n", buf); aprint_naive("\n"); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); error = bus_dmamem_alloc(vsc->sc_dmat, VIRTIO_PAGE_SIZE, 0, 0, segs, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc dmamem: %d\n", error); goto alloc_failed; } error = bus_dmamem_map(vsc->sc_dmat, segs, nsegs, VIORND_BUFSIZE, &sc->sc_buf, BUS_DMA_NOWAIT); if (error) { aprint_error_dev(sc->sc_dev, "can't map dmamem: %d\n", error); goto map_failed; } error = bus_dmamap_create(vsc->sc_dmat, VIORND_BUFSIZE, 1, VIORND_BUFSIZE, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_dmamap); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc dmamap: %d\n", error); goto create_failed; } error = bus_dmamap_load(vsc->sc_dmat, sc->sc_dmamap, sc->sc_buf, VIORND_BUFSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_READ); if (error) { aprint_error_dev(sc->sc_dev, "can't load dmamap: %d\n", error); goto load_failed; } error = virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1, "Entropy request"); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc virtqueue: %d\n", error); goto vio_failed; } sc->sc_vq.vq_done = viornd_vq_done; virtio_start_vq_intr(vsc, &sc->sc_vq); rndsource_setcb(&sc->sc_rndsource, viornd_get, sc); rnd_attach_source(&sc->sc_rndsource, device_xname(sc->sc_dev), RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); viornd_get(VIORND_BUFSIZE, sc); return; vio_failed: bus_dmamap_unload(vsc->sc_dmat, sc->sc_dmamap); load_failed: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap); create_failed: bus_dmamem_unmap(vsc->sc_dmat, sc->sc_buf, VIORND_BUFSIZE); map_failed: bus_dmamem_free(vsc->sc_dmat, segs, nsegs); alloc_failed: vsc->sc_child = (void *)1; /* XXX bare constant 1 */ return; }
static void vioscsi_attach(device_t parent, device_t self, void *aux) { struct vioscsi_softc *sc = device_private(self); struct virtio_softc *vsc = device_private(parent); struct scsipi_adapter *adapt = &sc->sc_adapter; struct scsipi_channel *chan = &sc->sc_channel; uint32_t features; char buf[256]; int rv; if (vsc->sc_child != NULL) { aprint_error(": parent %s already has a child\n", device_xname(parent)); return; } sc->sc_dev = self; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_vqs = sc->sc_vqs; vsc->sc_nvqs = __arraycount(sc->sc_vqs); vsc->sc_config_change = NULL; vsc->sc_intrhand = virtio_vq_intr; vsc->sc_flags = 0; features = virtio_negotiate_features(vsc, 0); snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features); aprint_normal(": Features: %s\n", buf); aprint_naive("\n"); uint32_t cmd_per_lun = virtio_read_device_config_4(vsc, VIRTIO_SCSI_CONFIG_CMD_PER_LUN); uint32_t seg_max = virtio_read_device_config_4(vsc, VIRTIO_SCSI_CONFIG_SEG_MAX); uint16_t max_target = virtio_read_device_config_2(vsc, VIRTIO_SCSI_CONFIG_MAX_TARGET); uint16_t max_channel = virtio_read_device_config_2(vsc, VIRTIO_SCSI_CONFIG_MAX_CHANNEL); uint32_t max_lun = virtio_read_device_config_4(vsc, VIRTIO_SCSI_CONFIG_MAX_LUN); sc->sc_seg_max = seg_max; for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) { rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS, 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]); if (rv) { aprint_error_dev(sc->sc_dev, "failed to allocate virtqueue %zu\n", i); return; } sc->sc_vqs[i].vq_done = vioscsi_vq_done; } int qsize = sc->sc_vqs[2].vq_num; aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize); if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max)) return; /* * Fill in the scsipi_adapter. */ memset(adapt, 0, sizeof(*adapt)); adapt->adapt_dev = sc->sc_dev; adapt->adapt_nchannels = max_channel; adapt->adapt_openings = cmd_per_lun; adapt->adapt_max_periph = adapt->adapt_openings; adapt->adapt_request = vioscsi_scsipi_request; adapt->adapt_minphys = minphys; /* * Fill in the scsipi_channel. */ memset(chan, 0, sizeof(*chan)); chan->chan_adapter = adapt; chan->chan_bustype = &scsi_bustype; chan->chan_channel = 0; chan->chan_ntargets = max_target; chan->chan_nluns = max_lun; chan->chan_id = 0; chan->chan_flags = SCSIPI_CHAN_NOSETTLE; config_found(sc->sc_dev, &sc->sc_channel, scsiprint); }