Exemple #1
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
        goto err_no_map;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        if (!ioreq->req.nr_segments) {
            break;
        }

        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
        break;
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    if (ioreq->postsync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }
    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_unmap(ioreq);
err_no_map:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
Exemple #2
0
static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    BlockRequest *blkreq;
    uint64_t sector;

    sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);

    trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);

    if (sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    if (mrb->num_writes == 32) {
        virtio_submit_multiwrite(req->dev->bs, mrb);
    }

    blkreq = &mrb->blkreq[mrb->num_writes];
    blkreq->sector = sector;
    blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE;
    blkreq->qiov = &req->qiov;
    blkreq->cb = virtio_blk_rw_complete;
    blkreq->opaque = req;
    blkreq->error = 0;

    mrb->num_writes++;
}
static void virtio_blk_handle_write(BlockRequest *blkreq, int *num_writes,
    VirtIOBlockReq *req, BlockDriverState **old_bs)
{
    trace_virtio_blk_handle_write(req, req->out->sector, req->qiov.size / 512);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);

    if (req->out->sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    if (req->dev->bs != *old_bs || *num_writes == 32) {
        if (*old_bs != NULL) {
            do_multiwrite(*old_bs, blkreq, *num_writes);
        }
        *num_writes = 0;
        *old_bs = req->dev->bs;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    blkreq[*num_writes].sector = req->out->sector;
    blkreq[*num_writes].nb_sectors = req->qiov.size / 512;
    blkreq[*num_writes].qiov = &req->qiov;
    blkreq[*num_writes].cb = virtio_blk_rw_complete;
    blkreq[*num_writes].opaque = req;
    blkreq[*num_writes].error = 0;

    (*num_writes)++;
}
Exemple #4
0
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
    BlockDriverAIOCB *acb;
    uint64_t sector;

    sector = ldq_p(&req->out->sector);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);

    if (sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    acb = bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
                         req->qiov.size / BDRV_SECTOR_SIZE,
                         virtio_blk_rw_complete, req);
    if (!acb) {
        virtio_blk_rw_complete(req, -EIO);
    }
}
Exemple #5
0
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    virtio_submit_multiwrite(req->dev->bs, mrb);
    bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
}
static void virtio_blk_handle_flush(BlockRequest *blkreq, int *num_writes,
    VirtIOBlockReq *req, BlockDriverState **old_bs)
{
    BlockDriverAIOCB *acb;

    bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    if (*old_bs != NULL) {
        do_multiwrite(*old_bs, blkreq, *num_writes);
    }
    *num_writes = 0;
    *old_bs = req->dev->bs;

    acb = bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
    if (!acb) {
        virtio_blk_flush_complete(req, -EIO);
    }
}
Exemple #7
0
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
    uint64_t sector;

    sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);

    trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);

    if (sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
                   req->qiov.size / BDRV_SECTOR_SIZE,
                   virtio_blk_rw_complete, req);
}
Exemple #8
0
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
                    QEMUSGList *sg, enum BlockAcctType type)
{
    bdrv_acct_start(bs, cookie, sg->size, type);
}