示例#1
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
        goto err_no_map;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        if (!ioreq->req.nr_segments) {
            break;
        }

        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
        break;
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    if (ioreq->postsync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }
    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_unmap(ioreq);
err_no_map:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
示例#2
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq_map(ioreq) == -1)
	goto err;

    ioreq->aio_inflight++;
    if (ioreq->presync)
	bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
	break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
	break;
    default:
	/* unknown operation (shouldn't happen -- parse catches this) */
	goto err;
    }

    if (ioreq->postsync)
	bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
示例#3
0
文件: xen_disk.c 项目: Mellanox/qemu
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
    if (ioreq->req.nr_segments &&
        (ioreq->req.operation == BLKIF_OP_WRITE ||
         ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
        ioreq_grant_copy(ioreq)) {
        qemu_vfree(ioreq->buf);
        goto err;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
        return 0;
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                         ioreq->v.size, BLOCK_ACCT_READ);
        ioreq->aio_inflight++;
        blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!ioreq->req.nr_segments) {
            break;
        }

        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                         ioreq->v.size,
                         ioreq->req.operation == BLKIF_OP_WRITE ?
                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
        ioreq->aio_inflight++;
        blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
                        qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_DISCARD:
    {
        struct blkif_request_discard *req = (void *)&ioreq->req;
        if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
            goto err;
        }
        break;
    }
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}