Exemple #1
0
static void qemu_aio_complete(void *opaque, int ret)
{
    struct ioreq *ioreq = opaque;

    if (ret != 0) {
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
        ioreq->aio_errors++;
    }

    ioreq->aio_inflight--;
    if (ioreq->presync) {
        ioreq->presync = 0;
        ioreq_runio_qemu_aio(ioreq);
        return;
    }
    if (ioreq->aio_inflight > 0) {
        return;
    }
    if (ioreq->postsync) {
        ioreq->postsync = 0;
        ioreq->aio_inflight++;
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
        return;
    }

    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    ioreq_unmap(ioreq);
    ioreq_finish(ioreq);
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
    qemu_bh_schedule(ioreq->blkdev->bh);
}
Exemple #2
0
static void qed_write_table_cb(void *opaque, int ret)
{
    QEDWriteTableCB *write_table_cb = opaque;

    trace_qed_write_table_cb(write_table_cb->s,
                             write_table_cb->orig_table,
                             write_table_cb->flush,
                             ret);

    if (ret) {
        goto out;
    }

    if (write_table_cb->flush) {
        /* We still need to flush first */
        write_table_cb->flush = false;
        bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
                       write_table_cb);
        return;
    }

out:
    qemu_vfree(write_table_cb->table);
    gencb_complete(&write_table_cb->gencb, ret);
    return;
}
Exemple #3
0
static BlockDriverAIOCB *blkverify_aio_flush(BlockDriverState *bs,
                                             BlockDriverCompletionFunc *cb,
                                             void *opaque)
{
    BDRVBlkverifyState *s = bs->opaque;

    /* Only flush test file, the raw file is not important */
    return bdrv_aio_flush(s->test_file, cb, opaque);
}
Exemple #4
0
static void qed_flush_after_clear_need_check(void *opaque, int ret)
{
    BDRVQEDState *s = opaque;

    bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);

    /* No need to wait until flush completes */
    qed_unplug_allocating_write_reqs(s);
}
Exemple #5
0
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    virtio_submit_multiwrite(req->dev->bs, mrb);
    bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
}
Exemple #6
0
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    BlockDriverAIOCB *acb;

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    virtio_submit_multiwrite(req->dev->bs, mrb);

    acb = bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
    if (!acb) {
        virtio_blk_flush_complete(req, -EIO);
    }
}
Exemple #7
0
static void qed_need_check_timer_cb(void *opaque)
{
    BDRVQEDState *s = opaque;

    /* The timer should only fire when allocating writes have drained */
    assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));

    trace_qed_need_check_timer_cb(s);

    qed_plug_allocating_write_reqs(s);

    /* Ensure writes are on disk before clearing flag */
    bdrv_aio_flush(s->bs, qed_clear_need_check, s);
}
Exemple #8
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
        goto err_no_map;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
        return 0;
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        if (!ioreq->req.nr_segments) {
            break;
        }

        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
        break;
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_unmap(ioreq);
err_no_map:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
static void virtio_blk_handle_flush(BlockRequest *blkreq, int *num_writes,
    VirtIOBlockReq *req, BlockDriverState **old_bs)
{
    BlockDriverAIOCB *acb;

    bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    if (*old_bs != NULL) {
        do_multiwrite(*old_bs, blkreq, *num_writes);
    }
    *num_writes = 0;
    *old_bs = req->dev->bs;

    acb = bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
    if (!acb) {
        virtio_blk_flush_complete(req, -EIO);
    }
}
Exemple #10
0
static BlockDriverAIOCB *raw_aio_flush(BlockDriverState *bs,
    BlockDriverCompletionFunc *cb, void *opaque)
{
    return bdrv_aio_flush(bs->file, cb, opaque);
}
Exemple #11
0
/* Return FALSE if the submitted request is cancelled. */
static int submit_rand_io (RandomIO * r)
{
    BlockDriverAIOCB *acb = NULL;

    QDEBUG ("TESTER %03d:  %s  test%" PRIX64 " sector_num=%" PRId64
            " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
            r->uuid, r->sector_num, r->nb_sectors, r->qiov.niov);
    printf ("TESTER %03d:  %s  sector_num=%" PRId64 " nb_sectors=%d niov=%d\n",
            r->tester, op_type_str[r->type], r->sector_num, r->nb_sectors,
            r->qiov.niov);

    int ret;
    if (fail_prob <= 0) {
        ret = 0;
    } else if (random () / (double) RAND_MAX <= fail_prob) {
        ret = -EIO;
    } else {
        ret = 0;
    }

    /* This affects whether this request will fail or not. */
    sim_set_disk_io_return_code (ret);

    switch (r->type) {
    case OP_READ:
        if (!(acb = bdrv_aio_readv (bs, r->sector_num, &r->qiov, r->nb_sectors,
                             rand_io_cb, r))) {
            die ("bdrv_aio_readv\n");
        }
        break;
    case OP_WRITE:
        if (!(acb = bdrv_aio_writev (bs, r->sector_num, &r->qiov, r->nb_sectors,
                              rand_io_cb, r))) {
            die ("bdrv_aio_writev\n");
        }
        break;
    case OP_FLUSH:
        if (!(acb = bdrv_aio_flush (bs, rand_io_cb, r))) {
            die ("bdrv_aio_flush\n");
        }
        break;
    case OP_NULL:
        die ("OP_NULL");
        break;
    }

    sim_set_disk_io_return_code (0);        /* Reset to no failure state. */

    if (r->allow_cancel && cancel_prob > 0 &&
                random () / (double) RAND_MAX <= cancel_prob) {
        QDEBUG ("TESTER %03d:  cancel %s test%" PRIX64 " sector_num=%" PRId64
                " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
                r->uuid, r->sector_num, r->nb_sectors, r->qiov.niov);
        printf ("TESTER %03d:  cancel %s sector_num=%" PRId64
                " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
                r->sector_num, r->nb_sectors, r->qiov.niov);
        bdrv_aio_cancel (acb);
        return FALSE;
    } else {
        return TRUE;
    }
}