Ejemplo n.º 1
0
static void virtio_net_tx_lazy(struct vmm_netport *port, void *arg, int budget)
{
	u16 head = 0;
	u32 iov_cnt = 0, pkt_len = 0, total_len = 0;
	struct virtio_net_dev *ndev = arg;
	struct virtio_device *dev = ndev->vdev;
	struct virtio_queue *vq = &ndev->vqs[VIRTIO_NET_TX_QUEUE];
	struct virtio_iovec *iov = ndev->tx_iov;
	struct vmm_mbuf *mb;

	while ((budget > 0) && virtio_queue_available(vq)) {
		head = virtio_queue_get_iovec(vq, iov, &iov_cnt, &total_len);

		/* iov[0] is offload info */
		pkt_len = total_len - iov[0].len;

		if (pkt_len <= VIRTIO_NET_MTU) {
			MGETHDR(mb, 0, 0);
			MEXTMALLOC(mb, pkt_len, M_WAIT);
			virtio_iovec_to_buf_read(dev, 
						 &iov[1], iov_cnt - 1,
						 M_BUFADDR(mb), pkt_len);
			mb->m_len = mb->m_pktlen = pkt_len;
			vmm_port2switch_xfer_mbuf(ndev->port, mb);
		}

		virtio_queue_set_used_elem(vq, head, total_len);

		budget--;
	}

	if (virtio_queue_should_signal(vq)) {
		dev->tra->notify(dev, VIRTIO_NET_TX_QUEUE);
	}
}
Ejemplo n.º 2
0
static void virtio_blk_do_io(struct virtio_device *dev,
			     struct virtio_blk_dev *bdev)
{
	u16 head; 
	u32 i, iov_cnt, len;
	irq_flags_t flags;
	struct virtio_queue *vq = &bdev->vqs[VIRTIO_BLK_IO_QUEUE];
	struct virtio_blk_dev_req *req;
	struct virtio_blk_outhdr hdr;
	struct vmm_blockdev *blk;

	while (virtio_queue_available(vq)) {
		head = virtio_queue_pop(vq);
		req = &bdev->reqs[head];
		head = virtio_queue_get_head_iovec(vq, head, bdev->iov, 
						   &iov_cnt, &len);

		req->vq = vq;
		req->bdev = bdev;
		req->head = head;
		req->read_iov = NULL;
		req->read_iov_cnt = 0;
		req->len = 0;
		for (i = 1; i < (iov_cnt - 1); i++) {
			req->len += bdev->iov[i].len;
		}
		req->status_iov.addr = bdev->iov[iov_cnt - 1].addr;
		req->status_iov.len = bdev->iov[iov_cnt - 1].len;
		req->r.type = VMM_REQUEST_UNKNOWN;
		req->r.lba = 0;
		req->r.bcnt = 0;
		req->r.data = NULL;
		req->r.completed = virtio_blk_req_completed;
		req->r.failed = virtio_blk_req_failed;
		req->r.priv = req;

		len = virtio_iovec_to_buf_read(dev, &bdev->iov[0], 1, 	
						&hdr, sizeof(hdr));
		if (len < sizeof(hdr)) {
			virtio_queue_set_used_elem(req->vq, req->head, 0);
			continue;
		}

		switch (hdr.type) {
		case VIRTIO_BLK_T_IN:
			req->r.type = VMM_REQUEST_READ;
			req->r.lba  = hdr.sector;
			req->r.bcnt = udiv32(req->len, bdev->config.blk_size);
			req->r.data = vmm_malloc(req->len);
			if (!req->r.data) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
				continue;
			}
			len = sizeof(struct virtio_iovec) * (iov_cnt - 2);
			req->read_iov = vmm_malloc(len);
			if (!req->read_iov) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov_cnt = iov_cnt - 2;
			for (i = 0; i < req->read_iov_cnt; i++) {
				req->read_iov[i].addr = bdev->iov[i + 1].addr;
				req->read_iov[i].len = bdev->iov[i + 1].len;
			}
			vmm_spin_lock_irqsave(&bdev->blk_lock, flags);
			blk = bdev->blk;
			vmm_spin_unlock_irqrestore(&bdev->blk_lock, flags);
			/* Note: We will get failed() or complete() callback
			 * even if blk == NULL
			 */
			vmm_blockdev_submit_request(blk, &req->r);
			break;
		case VIRTIO_BLK_T_OUT:
			req->r.type = VMM_REQUEST_WRITE;
			req->r.lba  = hdr.sector;
			req->r.bcnt = udiv32(req->len, bdev->config.blk_size);
			req->r.data = vmm_malloc(req->len);
			if (!req->r.data) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
				continue;
			} else {
				virtio_iovec_to_buf_read(dev, 
							 &bdev->iov[1], 
							 iov_cnt - 2,
							 req->r.data, 
							 req->len);
			}
			vmm_spin_lock_irqsave(&bdev->blk_lock, flags);
			blk = bdev->blk;
			vmm_spin_unlock_irqrestore(&bdev->blk_lock, flags);
			/* Note: We will get failed() or complete() callback
			 * even if blk == NULL
			 */
			vmm_blockdev_submit_request(blk, &req->r);
			break;
		case VIRTIO_BLK_T_FLUSH:
			req->r.type = VMM_REQUEST_WRITE;
			req->r.lba  = 0;
			req->r.bcnt = 0;
			req->r.data = NULL;
			vmm_spin_lock_irqsave(&bdev->blk_lock, flags);
			blk = bdev->blk;
			vmm_spin_unlock_irqrestore(&bdev->blk_lock, flags);
			if (vmm_blockdev_flush_cache(blk)) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
			} else {
				virtio_blk_req_done(req, VIRTIO_BLK_S_OK);
			}
			break;
		case VIRTIO_BLK_T_GET_ID:
			req->len = VIRTIO_BLK_ID_BYTES;
			req->r.type = VMM_REQUEST_READ;
			req->r.lba = 0;
			req->r.bcnt = 0;
			req->r.data = vmm_zalloc(req->len);
			if (!req->r.data) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov = vmm_malloc(sizeof(struct virtio_iovec));
			if (!req->read_iov) {
				virtio_blk_req_done(req, VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov_cnt = 1;
			req->read_iov[0].addr = bdev->iov[1].addr;
			req->read_iov[0].len = bdev->iov[1].len;
			strncpy(req->r.data, bdev->blk_name, req->len);
			virtio_blk_req_done(req, VIRTIO_BLK_S_OK);
			break;
		default:
			break;
		};
	}
}
Ejemplo n.º 3
0
static void virtio_blk_do_io(struct virtio_device *dev,
			     struct virtio_blk_dev *vbdev)
{
	u16 head;
	u32 i, iov_cnt, len;
	struct virtio_queue *vq = &vbdev->vqs[VIRTIO_BLK_IO_QUEUE];
	struct virtio_blk_dev_req *req;
	struct virtio_blk_outhdr hdr;

	while (virtio_queue_available(vq)) {
		head = virtio_queue_pop(vq);
		req = &vbdev->reqs[head];
		head = virtio_queue_get_head_iovec(vq, head, vbdev->iov,
						   &iov_cnt, &len);

		req->vq = vq;
		req->head = head;
		req->read_iov = NULL;
		req->read_iov_cnt = 0;
		req->len = 0;
		for (i = 1; i < (iov_cnt - 1); i++) {
			req->len += vbdev->iov[i].len;
		}
		req->status_iov.addr = vbdev->iov[iov_cnt - 1].addr;
		req->status_iov.len = vbdev->iov[iov_cnt - 1].len;
		vmm_vdisk_set_request_type(&req->r, VMM_VDISK_REQUEST_UNKNOWN);

		len = virtio_iovec_to_buf_read(dev, &vbdev->iov[0], 1,
						&hdr, sizeof(hdr));
		if (len < sizeof(hdr)) {
			virtio_queue_set_used_elem(req->vq, req->head, 0);
			continue;
		}

		switch (hdr.type) {
		case VIRTIO_BLK_T_IN:
			vmm_vdisk_set_request_type(&req->r,
						   VMM_VDISK_REQUEST_READ);
			req->data = vmm_malloc(req->len);
			if (!req->data) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
				continue;
			}
			len = sizeof(struct virtio_iovec) * (iov_cnt - 2);
			req->read_iov = vmm_malloc(len);
			if (!req->read_iov) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov_cnt = iov_cnt - 2;
			for (i = 0; i < req->read_iov_cnt; i++) {
				req->read_iov[i].addr = vbdev->iov[i + 1].addr;
				req->read_iov[i].len = vbdev->iov[i + 1].len;
			}
			DPRINTF("%s: VIRTIO_BLK_T_IN dev=%s "
				"hdr.sector=%ll req->len=%d\n",
				__func__, dev->name,
				(u64)hdr.sector, req->len);
			/* Note: We will get failed() or complete() callback
			 * even when no block device attached to virtual disk
			 */
			vmm_vdisk_submit_request(vbdev->vdisk, &req->r,
						 VMM_VDISK_REQUEST_READ,
						 hdr.sector, req->data, req->len);
			break;
		case VIRTIO_BLK_T_OUT:
			vmm_vdisk_set_request_type(&req->r,
						   VMM_VDISK_REQUEST_WRITE);
			req->data = vmm_malloc(req->len);
			if (!req->data) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
				continue;
			} else {
				virtio_iovec_to_buf_read(dev,
							 &vbdev->iov[1],
							 iov_cnt - 2,
							 req->data,
							 req->len);
			}
			DPRINTF("%s: VIRTIO_BLK_T_OUT dev=%s "
				"hdr.sector=%ll req->len=%d\n",
				__func__, dev->name,
				(u64)hdr.sector, req->len);
			/* Note: We will get failed() or complete() callback
			 * even when no block device attached to virtual disk
			 */
			vmm_vdisk_submit_request(vbdev->vdisk, &req->r,
						 VMM_VDISK_REQUEST_WRITE,
						 hdr.sector, req->data, req->len);
			break;
		case VIRTIO_BLK_T_FLUSH:
			vmm_vdisk_set_request_type(&req->r,
						   VMM_VDISK_REQUEST_WRITE);
			DPRINTF("%s: VIRTIO_BLK_T_FLUSH dev=%s\n",
				__func__, dev->name);
			if (vmm_vdisk_flush_cache(vbdev->vdisk)) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
			} else {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_OK);
			}
			break;
		case VIRTIO_BLK_T_GET_ID:
			vmm_vdisk_set_request_type(&req->r,
						   VMM_VDISK_REQUEST_READ);
			req->len = VIRTIO_BLK_ID_BYTES;
			req->data = vmm_zalloc(req->len);
			if (!req->data) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov = vmm_malloc(sizeof(struct virtio_iovec));
			if (!req->read_iov) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
				continue;
			}
			req->read_iov_cnt = 1;
			req->read_iov[0].addr = vbdev->iov[1].addr;
			req->read_iov[0].len = vbdev->iov[1].len;
			DPRINTF("%s: VIRTIO_BLK_T_GET_ID dev=%s req->len=%d\n",
				__func__, dev->name, req->len);
			if (vmm_vdisk_current_block_device(vbdev->vdisk,
							req->data, req->len)) {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_IOERR);
			} else {
				virtio_blk_req_done(vbdev, req,
						    VIRTIO_BLK_S_OK);
			}
			break;
		default:
			break;
		};
	}
}