vm_object_t vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) { int error; vm_object_t obj; struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); error = sglist_append_phys(sg, hpa, len); KASSERT(error == 0, ("error %d appending physaddr to sglist", error)); obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL); if (obj != NULL) { /* * VT-x ignores the MTRR settings when figuring out the * memory type for translations obtained through EPT. * * Therefore we explicitly force the pages provided by * this object to be mapped as uncacheable. */ VM_OBJECT_WLOCK(obj); error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE); VM_OBJECT_WUNLOCK(obj); if (error != KERN_SUCCESS) { panic("vmm_mmio_alloc: vm_object_set_memattr error %d", error); } error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0, VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0); if (error != KERN_SUCCESS) { vm_object_deallocate(obj); obj = NULL; } } /* * Drop the reference on the sglist. * * If the scatter/gather object was successfully allocated then it * has incremented the reference count on the sglist. Dropping the * initial reference count ensures that the sglist will be freed * when the object is deallocated. * * If the object could not be allocated then we end up freeing the * sglist. */ sglist_free(sg); return (obj); }
static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; VTBLK_LOCK_INIT(sc, device_get_nameunit(dev)); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtblk_flags |= VTBLK_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc); sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtblk_tq); if (sc->vtblk_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(dev)); vtblk_create_disk(sc); virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); }
static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; lwkt_serialize_init(&sc->vtblk_slz); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) sc->vtblk_flags |= VTBLK_FLAG_BARRIER; if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE)) sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG; vtblk_setup_sysctl(sc); /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } /* * Allocate working sglist. The number of segments may be too * large to safely store on the stack. */ sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); error = virtio_setup_intr(dev, &sc->vtblk_slz); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); }