Esempio n. 1
0
static int virtioblk_write(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) {
  struct virtioblk *vblk = (struct virtioblk *) dev->privdata;
  struct virtioblk_request req;
  struct scatterlist sg[3];
  int rc;

  // Setup write request
  virtioblk_setup_request(&req, sg, buffer, count);
  req.hdr.type = VIRTIO_BLK_T_OUT;
  req.hdr.ioprio = 0;
  req.hdr.sector = blkno;
  
  // Issue request
  rc = virtio_enqueue(&vblk->vq, sg, 2, 1, &req);
  if (rc < 0) return rc;
  virtio_kick(&vblk->vq);
  
  // Wait for request to complete
  enter_wait(THREAD_WAIT_DEVIO);

  // Check status code
  switch (req.status) {
    case VIRTIO_BLK_S_OK: rc = req.size - 1; break;
    case VIRTIO_BLK_S_UNSUPP: rc = -ENODEV; break;
    case VIRTIO_BLK_S_IOERR: rc = -EIO; break;
    default: rc = -EUNKNOWN; break;
  }

  return rc;
}
Esempio n. 2
0
static int
ld_virtio_start(struct ld_softc *ld, struct buf *bp)
{
	/* splbio */
	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
	struct virtio_softc *vsc = sc->sc_virtio;
	struct virtqueue *vq = &sc->sc_vq[0];
	struct virtio_blk_req *vr;
	int r;
	int isread = (bp->b_flags & B_READ);
	int slot;

	if (sc->sc_readonly && !isread)
		return EIO;

	r = virtio_enqueue_prep(vsc, vq, &slot);
	if (r != 0)
		return r;
	vr = &sc->sc_reqs[slot];
	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
			    bp->b_data, bp->b_bcount, NULL,
			    ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
			     |BUS_DMA_NOWAIT));
	if (r != 0)
		return r;

	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2);
	if (r != 0) {
		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
		return r;
	}

	vr->vr_bp = bp;
	vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
	vr->vr_hdr.ioprio = 0;
	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			0, sizeof(struct virtio_blk_req_hdr),
			BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
			0, bp->b_bcount,
			isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			offsetof(struct virtio_blk_req, vr_status),
			sizeof(uint8_t),
			BUS_DMASYNC_PREREAD);

	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
			 0, sizeof(struct virtio_blk_req_hdr),
			 true);
	virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
			 offsetof(struct virtio_blk_req, vr_status),
			 sizeof(uint8_t),
			 false);
	virtio_enqueue_commit(vsc, vq, slot, true);

	return 0;
}
Esempio n. 3
0
static void
viornd_get(size_t bytes, void *priv)
{
        struct viornd_softc *sc = priv;
        struct virtio_softc *vsc = sc->sc_virtio;
        struct virtqueue *vq = &sc->sc_vq;
        int slot;

#if VIORND_DEBUG
	aprint_normal("%s: asked for %d bytes of entropy\n", __func__,
		      VIORND_BUFSIZE);
#endif
	mutex_enter(&sc->sc_mutex);

	if (sc->sc_active) {
		goto out;
	}

        bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE,
            BUS_DMASYNC_PREREAD);
	if (virtio_enqueue_prep(vsc, vq, &slot)) {
		goto out;
	}
        if (virtio_enqueue_reserve(vsc, vq, slot, 1)) {
		virtio_enqueue_abort(vsc, vq, slot);
		goto out;
	}
        virtio_enqueue(vsc, vq, slot, sc->sc_dmamap, 0);
        virtio_enqueue_commit(vsc, vq, slot, 1);
	sc->sc_active = true;
out:
	mutex_exit(&sc->sc_mutex);
}
Esempio n. 4
0
static int install_virtiocon(struct unit *unit) {
  struct virtiocon *vcon;
  int rc;
  int size;
  int i;

  // Setup unit information
  if (!unit) return -ENOSYS;
  unit->vendorname = "VIRTIO";
  unit->productname = "VIRTIO Virtual Console Device";

  // Allocate memory for device
  vcon = kmalloc(sizeof(struct virtiocon));
  if (vcon == NULL) return -ENOMEM;
  memset(vcon, 0, sizeof(struct virtiocon));

  // Initialize virtual device
  rc = virtio_device_init(&vcon->vd, unit, VIRTIO_CON_F_SIZE);
  if (rc < 0) return rc;

  // Get console device configuration
  virtio_get_config(&vcon->vd, &vcon->config, sizeof(vcon->config));

  // Initialize queues for console
  rc = virtio_queue_init(&vcon->input_queue, &vcon->vd, 0, virtiocon_input_callback);
  if (rc < 0) return rc;
  rc = virtio_queue_init(&vcon->output_queue, &vcon->vd, 1, virtiocon_output_callback);
  if (rc < 0) return rc;

  // Fill input queue
  size = virtio_queue_size(&vcon->input_queue);
  for (i = 0; i < size; ++i) {
    struct scatterlist sg[1];
    char *data = kmalloc(PAGESIZE);
    if (!data) return -ENOMEM;
    sg[0].data = data;
    sg[0].size = PAGESIZE;
    virtio_enqueue(&vcon->input_queue, sg, 0, 1, data);
  }
  virtio_kick(&vcon->input_queue);

  // Create device
  vcon->devno = dev_make("vc#", &virtiocon_driver, unit, vcon);
  virtio_setup_complete(&vcon->vd, 1);
  kprintf(KERN_INFO "%s: virtio console, %dx%d, %d ports, feats=%d\n", 
          device(vcon->devno)->name, 
          vcon->config.cols, vcon->config.rows, vcon->config.max_ports, vcon->vd.features);

  return 0;
}
Esempio n. 5
0
void
viornd_tick(void *arg)
{
	struct viornd_softc *sc = arg;
	struct virtio_softc *vsc = sc->sc_virtio;
	struct virtqueue *vq = &sc->sc_vq;
	int slot;

	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE,
	    BUS_DMASYNC_PREREAD);
	if (virtio_enqueue_prep(vq, &slot) != 0 ||
	    virtio_enqueue_reserve(vq, slot, 1) != 0) {
		panic("%s: virtqueue enqueue failed", sc->sc_dev.dv_xname);
	}
	virtio_enqueue(vq, slot, sc->sc_dmamap, 0);
	virtio_enqueue_commit(vsc, vq, slot, 1);
}
Esempio n. 6
0
static int virtiocon_write(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) {
  struct virtiocon *vcon = (struct virtiocon *) dev->privdata;
  struct scatterlist sg[1];
  int rc;

  // Issue request
  sg[0].data = buffer;
  sg[0].size = count;
  rc = virtio_enqueue(&vcon->output_queue, sg, 1, 0, self());
  if (rc < 0) return rc;
  virtio_kick(&vcon->output_queue);
  
  // Wait for request to complete
  enter_wait(THREAD_WAIT_DEVIO);

  return count;
}
Esempio n. 7
0
static int
ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
{
	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
	struct virtio_softc *vsc = sc->sc_virtio;
	struct virtqueue *vq = &sc->sc_vq;
	struct virtio_blk_req *vr;
	int slot, r;

	if (sc->sc_readonly)
		return EIO;

	r = virtio_enqueue_prep(vsc, vq, &slot);
	if (r != 0) {
		if (r == EAGAIN) { /* no free slot; dequeue first */
			delay(100);
			ld_virtio_vq_done(vq);
			r = virtio_enqueue_prep(vsc, vq, &slot);
			if (r != 0)
				return r;
		}
		return r;
	}
	vr = &sc->sc_reqs[slot];
	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
			    data, blkcnt*ld->sc_secsize, NULL,
			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
	if (r != 0)
		return r;

	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 
	    VIRTIO_BLK_MIN_SEGMENTS);
	if (r != 0) {
		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
		return r;
	}

	vr->vr_bp = (void*)0xdeadbeef;
	vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
	vr->vr_hdr.ioprio = 0;
	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			0, sizeof(struct virtio_blk_req_hdr),
			BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
			0, blkcnt*ld->sc_secsize,
			BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			offsetof(struct virtio_blk_req, vr_status),
			sizeof(uint8_t),
			BUS_DMASYNC_PREREAD);

	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
			 0, sizeof(struct virtio_blk_req_hdr),
			 true);
	virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
			 offsetof(struct virtio_blk_req, vr_status),
			 sizeof(uint8_t),
			 false);
	virtio_enqueue_commit(vsc, vq, slot, true);

	for ( ; ; ) {
		int dslot;

		r = virtio_dequeue(vsc, vq, &dslot, NULL);
		if (r != 0)
			continue;
		if (dslot != slot) {
			ld_virtio_vq_done1(sc, vsc, vq, dslot);
			continue;
		} else
			break;
	}
		
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			0, sizeof(struct virtio_blk_req_hdr),
			BUS_DMASYNC_POSTWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
			0, blkcnt*ld->sc_secsize,
			BUS_DMASYNC_POSTWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
			offsetof(struct virtio_blk_req, vr_status),
			sizeof(uint8_t),
			BUS_DMASYNC_POSTREAD);
	if (vr->vr_status == VIRTIO_BLK_S_OK)
		r = 0;
	else
		r = EIO;
	virtio_dequeue_commit(vsc, vq, slot);

	return r;
}
Esempio n. 8
0
static void
vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
    request, void *arg)
{
	struct vioscsi_softc *sc =
	    device_private(chan->chan_adapter->adapt_dev);
	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
	struct scsipi_xfer *xs; 
	struct scsipi_periph *periph;
	struct vioscsi_req *vr;
	struct virtio_scsi_req_hdr *req;
	struct virtqueue *vq = &sc->sc_vqs[2];
	int slot, error;

	DPRINTF(("%s: enter\n", __func__));

	if (request != ADAPTER_REQ_RUN_XFER) {
		DPRINTF(("%s: unhandled %d\n", __func__, request));
		return;
	}
	
	xs = arg;
	periph = xs->xs_periph;

	vr = vioscsi_req_get(sc);
#ifdef DIAGNOSTIC
	/*
	 * This should never happen as we track the resources
	 * in the mid-layer.
	 */
	if (vr == NULL) {
		scsipi_printaddr(xs->xs_periph);
		panic("%s: unable to allocate request\n", __func__);
	}
#endif
	req = &vr->vr_req;
	slot = vr - sc->sc_reqs;

	vr->vr_xs = xs;

	/*
	 * "The only supported format for the LUN field is: first byte set to
	 * 1, second byte set to target, third and fourth byte representing a
	 * single level LUN structure, followed by four zero bytes."
	 */
	if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
		DPRINTF(("%s: bad target %u or lun %u\n", __func__,
		    periph->periph_target, periph->periph_lun));
		goto stuffup;
	}
	req->lun[0] = 1;
	req->lun[1] = periph->periph_target - 1;
	req->lun[2] = 0x40 | (periph->periph_lun >> 8);
	req->lun[3] = periph->periph_lun;
	memset(req->lun + 4, 0, 4);
	DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
	    periph->periph_target - 1, periph->periph_lun, slot));

	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
		    (size_t)xs->cmdlen, sizeof(req->cdb)));
		goto stuffup;
	}

	memset(req->cdb, 0, sizeof(req->cdb));
	memcpy(req->cdb, xs->cmd, xs->cmdlen);

	error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
	    xs->data, xs->datalen, NULL, XS2DMA(xs));
	switch (error) {
	case 0:
		break;
	case ENOMEM:
	case EAGAIN:
		xs->error = XS_RESOURCE_SHORTAGE;
		goto nomore;
	default:
		aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
		    error);
	stuffup:
		xs->error = XS_DRIVER_STUFFUP;
nomore:
		// XXX: free req?
		scsipi_done(xs);
		return;
	}

	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		nsegs += vr->vr_data->dm_nsegs;

	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
	if (error) {
		DPRINTF(("%s: error reserving %d\n", __func__, error));
		goto stuffup;
	}

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
	    sizeof(struct virtio_scsi_req_hdr),
	    BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr),
	    BUS_DMASYNC_PREREAD);
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
		    XS2DMAPRE(xs));

	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
            sizeof(struct virtio_scsi_req_hdr), 1);
	if (xs->xs_control & XS_CTL_DATA_OUT)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr), 0);
	if (xs->xs_control & XS_CTL_DATA_IN)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
	virtio_enqueue_commit(vsc, vq, slot, 1);

	if ((xs->xs_control & XS_CTL_POLL) == 0)
		return;

	DPRINTF(("%s: polling...\n", __func__));
	// XXX: do this better.
	int timeout = 1000;
	do {
		(*vsc->sc_intrhand)(vsc);
		if (vr->vr_xs != xs)
			break;
		delay(1000);
	} while (--timeout > 0);

	if (vr->vr_xs == xs) {
		// XXX: Abort!
		xs->error = XS_TIMEOUT;
		xs->resid = xs->datalen;
		DPRINTF(("%s: polling timeout\n", __func__));
		scsipi_done(xs);
	}
	DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
}