예제 #1
0
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
    BlockDriverAIOCB *acb;
    uint64_t sector;

    sector = ldq_p(&req->out->sector);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);

    if (sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    acb = bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
                         req->qiov.size / BDRV_SECTOR_SIZE,
                         virtio_blk_rw_complete, req);
    if (!acb) {
        virtio_blk_rw_complete(req, -EIO);
    }
}
예제 #2
0
파일: qed.c 프로젝트: MauroZurita/qemu
/**
 * Update header in-place (does not rewrite backing filename or other strings)
 *
 * This function only updates known header fields in-place and does not affect
 * extra data after the QED header.
 */
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
                             void *opaque)
{
    /* We must write full sectors for O_DIRECT but cannot necessarily generate
     * the data following the header if an unrecognized compat feature is
     * active.  Therefore, first read the sectors containing the header, update
     * them, and write back.
     */

    int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
                   BDRV_SECTOR_SIZE;
    size_t len = nsectors * BDRV_SECTOR_SIZE;
    QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
                                                    cb, opaque);

    write_header_cb->s = s;
    write_header_cb->nsectors = nsectors;
    write_header_cb->buf = qemu_blockalign(s->bs, len);
    write_header_cb->iov.iov_base = write_header_cb->buf;
    write_header_cb->iov.iov_len = len;
    qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);

    bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
                   qed_write_header_read_cb, write_header_cb);
}
예제 #3
0
파일: tar.c 프로젝트: jkivilin/qemu
/* This is where we get a request from a caller to read something */
static BlockDriverAIOCB *tar_aio_readv(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        BlockDriverCompletionFunc *cb, void *opaque)
{
    BDRVTarState *s = bs->opaque;
    SparseCache *sparse;
    int64_t sec_file = sector_num + s->file_sec;
    int64_t start = sector_num * SECTOR_SIZE;
    int64_t end = start + (nb_sectors * SECTOR_SIZE);
    int i;
    TarAIOCB *acb;

    for (i = 0; i < s->sparse_num; i++) {
        sparse = &s->sparse[i];
        if (sparse->start > end) {
            /* We expect the cache to be start increasing */
            break;
        } else if ((sparse->start < start) && (sparse->end <= start)) {
            /* sparse before our offset */
            sec_file -= (sparse->end - sparse->start) / SECTOR_SIZE;
        } else if ((sparse->start <= start) && (sparse->end >= end)) {
            /* all our sectors are sparse */
            char *buf = g_malloc0(nb_sectors * SECTOR_SIZE);

            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            g_free(buf);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        } else if (((sparse->start >= start) && (sparse->start < end)) ||
                   ((sparse->end >= start) && (sparse->end < end))) {
            /* we're semi-sparse (worst case) */
            /* let's go synchronous and read all sectors individually */
            char *buf = g_malloc(nb_sectors * SECTOR_SIZE);
            uint64_t offs;

            for (offs = 0; offs < (nb_sectors * SECTOR_SIZE);
                 offs += SECTOR_SIZE) {
                bdrv_pread(bs, (sector_num * SECTOR_SIZE) + offs,
                           buf + offs, SECTOR_SIZE);
            }

            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        }
    }

    return bdrv_aio_readv(s->hd, sec_file, qiov, nb_sectors,
                          cb, opaque);
}
예제 #4
0
파일: blkverify.c 프로젝트: Aakriti/qemu
static BlockDriverAIOCB *blkverify_aio_readv(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        BlockDriverCompletionFunc *cb, void *opaque)
{
    BDRVBlkverifyState *s = bs->opaque;
    BlkverifyAIOCB *acb = blkverify_aio_get(bs, false, sector_num, qiov,
                                            nb_sectors, cb, opaque);

    acb->verify = blkverify_verify_readv;
    acb->buf = qemu_blockalign(bs->file, qiov->size);
    qemu_iovec_init(&acb->raw_qiov, acb->qiov->niov);
    blkverify_iovec_clone(&acb->raw_qiov, qiov, acb->buf);

    bdrv_aio_readv(s->test_file, sector_num, qiov, nb_sectors,
                   blkverify_aio_cb, acb);
    bdrv_aio_readv(bs->file, sector_num, &acb->raw_qiov, nb_sectors,
                   blkverify_aio_cb, acb);
    return &acb->common;
}
예제 #5
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
        goto err_no_map;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        if (!ioreq->req.nr_segments) {
            break;
        }

        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
        break;
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    if (ioreq->postsync) {
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    }
    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_unmap(ioreq);
err_no_map:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
예제 #6
0
파일: dma-helpers.c 프로젝트: 0-14N/NDroid
static void dma_bdrv_cb(void *opaque, int ret)
{
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
    target_phys_addr_t cur_addr, cur_len;
    void *mem;

    dbs->acb = NULL;
    dbs->sector_num += dbs->iov.size / 512;
    dma_bdrv_unmap(dbs);
    qemu_iovec_reset(&dbs->iov);

    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
        dbs->common.cb(dbs->common.opaque, ret);
        qemu_iovec_destroy(&dbs->iov);
        qemu_aio_release(dbs);
        return;
    }

    while (dbs->sg_cur_index < dbs->sg->nsg) {
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
        if (!mem)
            break;
        qemu_iovec_add(&dbs->iov, mem, cur_len);
        dbs->sg_cur_byte += cur_len;
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
            dbs->sg_cur_byte = 0;
            ++dbs->sg_cur_index;
        }
    }

    if (dbs->iov.size == 0) {
        cpu_register_map_client(dbs, continue_after_map_failure);
        return;
    }

    if (dbs->is_write) {
        dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
                                   dbs->iov.size / 512, dma_bdrv_cb, dbs);
    } else {
        dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
                                  dbs->iov.size / 512, dma_bdrv_cb, dbs);
    }
    if (!dbs->acb) {
        dma_bdrv_unmap(dbs);
        qemu_iovec_destroy(&dbs->iov);
        return;
    }
}
예제 #7
0
파일: qemu-test.c 프로젝트: stweil/qemu
static void compare_full_images_cb (void *opaque, int ret)
{
    CompareFullCB *cf = opaque;

    if (ret) {
        /* Failed. Retry the operation. */
        bdrv_aio_readv (bs, cf->sector_num, &cf->qiov, cf->nb_sectors,
                        compare_full_images_cb, cf);
        return;
    }

    truth_io (cf->truth_buf, cf->sector_num, cf->nb_sectors, TRUE);
    verify (cf->truth_buf, cf->iov.iov_base, cf->sector_num, cf->nb_sectors);

    cf->sector_num += cf->nb_sectors;
    if (cf->sector_num >= total_sectors) {
        /* Finished. */
        free (cf->truth_buf);
        qemu_vfree (cf->iov.iov_base);
        g_free(cf);
        return;
    }

    /* Read more data to compare. */
    if (cf->sector_num + cf->max_nb_sectors > total_sectors) {
        cf->nb_sectors = total_sectors - cf->sector_num;
    } else {
        cf->nb_sectors = cf->max_nb_sectors;
    }
    cf->iov.iov_len = cf->nb_sectors * 512;
    qemu_iovec_init_external (&cf->qiov, &cf->iov, 1);
    if (!bdrv_aio_readv (bs, cf->sector_num, &cf->qiov,
                         cf->nb_sectors, compare_full_images_cb, cf)) {
        die ("bdrv_aio_readv\n");
    }
}
예제 #8
0
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
    BlockDriverAIOCB *acb;

    if (req->out->sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }

    acb = bdrv_aio_readv(req->dev->bs, req->out->sector, &req->qiov,
                         req->qiov.size / BDRV_SECTOR_SIZE,
                         virtio_blk_rw_complete, req);
    if (!acb) {
        virtio_blk_rw_complete(req, -EIO);
    }
}
예제 #9
0
static void qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
                           BlockDriverCompletionFunc *cb, void *opaque)
{
    QEDReadTableCB *read_table_cb = gencb_alloc(sizeof(*read_table_cb),
                                                cb, opaque);
    QEMUIOVector *qiov = &read_table_cb->qiov;

    trace_qed_read_table(s, offset, table);

    read_table_cb->s = s;
    read_table_cb->table = table;
    read_table_cb->iov.iov_base = table->offsets,
    read_table_cb->iov.iov_len = s->header.cluster_size * s->header.table_size,

    qemu_iovec_init_external(qiov, &read_table_cb->iov, 1);
    bdrv_aio_readv(s->bs->file, offset / BDRV_SECTOR_SIZE, qiov,
                   qiov->size / BDRV_SECTOR_SIZE,
                   qed_read_table_cb, read_table_cb);
}
예제 #10
0
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    if (ioreq_map(ioreq) == -1)
	goto err;

    ioreq->aio_inflight++;
    if (ioreq->presync)
	bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        ioreq->aio_inflight++;
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                       qemu_aio_complete, ioreq);
	break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_WRITE_BARRIER:
        ioreq->aio_inflight++;
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                        qemu_aio_complete, ioreq);
	break;
    default:
	/* unknown operation (shouldn't happen -- parse catches this) */
	goto err;
    }

    if (ioreq->postsync)
	bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
예제 #11
0
파일: virtio-blk.c 프로젝트: Samsara00/qemu
static void virtio_blk_handle_read(VirtIOBlockReq *req)
{
    uint64_t sector;

    sector = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector);

    bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);

    trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);

    if (sector & req->dev->sector_mask) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    if (req->qiov.size % req->dev->conf->logical_block_size) {
        virtio_blk_rw_complete(req, -EIO);
        return;
    }
    bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
                   req->qiov.size / BDRV_SECTOR_SIZE,
                   virtio_blk_rw_complete, req);
}
예제 #12
0
파일: qemu-test.c 프로젝트: stweil/qemu
static int compare_full_images (void)
{
    CompareFullCB *cf;
    int old_copy_on_read = FALSE;

    printf ("Performing a full comparison of the truth image and "
            "the test image...\n");

    if (!strncmp (bs->drv->format_name, "fvd", 3)) {
        /* Disable copy-on-read when scanning through the entire image. */
        old_copy_on_read = fvd_get_copy_on_read (bs);
        fvd_set_copy_on_read (bs, FALSE);
    }

    cf = g_malloc(sizeof(CompareFullCB));
    cf->max_nb_sectors = 1048576L / 512;
    cf->nb_sectors = MIN (cf->max_nb_sectors, total_sectors);
    if (posix_memalign ((void **) &cf->truth_buf, 512,
                        cf->max_nb_sectors * 512) != 0) {
        die ("posix_memalign");
    }
    cf->iov.iov_base = qemu_blockalign (bs, cf->max_nb_sectors * 512);
    cf->iov.iov_len = cf->nb_sectors * 512;
    cf->sector_num = 0;
    qemu_iovec_init_external (&cf->qiov, &cf->iov, 1);
    if (!bdrv_aio_readv (bs, cf->sector_num, &cf->qiov,
                         cf->nb_sectors, compare_full_images_cb, cf)) {
        die ("bdrv_aio_readv\n");
    }

    sim_all_tasks ();

    if (!strncmp (bs->drv->format_name, "fvd", 3)) {
        fvd_set_copy_on_read (bs, old_copy_on_read);
    }

    return 0;
}
예제 #13
0
파일: raw.c 프로젝트: AmesianX/qemu-kvm
static BlockDriverAIOCB *raw_aio_readv(BlockDriverState *bs,
    int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
    BlockDriverCompletionFunc *cb, void *opaque)
{
    return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
}
예제 #14
0
static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
{
    BlockDriverState *source = s->common.bs;
    int nb_sectors, sectors_per_chunk, nb_chunks;
    int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
    uint64_t delay_ns = 0;
    MirrorOp *op;
    int pnum;
    int64_t ret;

    s->sector_num = hbitmap_iter_next(&s->hbi);
    if (s->sector_num < 0) {
        bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
        s->sector_num = hbitmap_iter_next(&s->hbi);
        trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
        assert(s->sector_num >= 0);
    }

    hbitmap_next_sector = s->sector_num;
    sector_num = s->sector_num;
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
    end = s->bdev_length / BDRV_SECTOR_SIZE;

    /* Extend the QEMUIOVector to include all adjacent blocks that will
     * be copied in this operation.
     *
     * We have to do this if we have no backing file yet in the destination,
     * and the cluster size is very large.  Then we need to do COW ourselves.
     * The first time a cluster is copied, copy it entirely.  Note that,
     * because both the granularity and the cluster size are powers of two,
     * the number of sectors to copy cannot exceed one cluster.
     *
     * We also want to extend the QEMUIOVector to include more adjacent
     * dirty blocks if possible, to limit the number of I/O operations and
     * run efficiently even with a small granularity.
     */
    nb_chunks = 0;
    nb_sectors = 0;
    next_sector = sector_num;
    next_chunk = sector_num / sectors_per_chunk;

    /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
    while (test_bit(next_chunk, s->in_flight_bitmap)) {
        trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
        s->waiting_for_io = true;
        qemu_coroutine_yield();
        s->waiting_for_io = false;
    }

    do {
        int added_sectors, added_chunks;

        if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
            test_bit(next_chunk, s->in_flight_bitmap)) {
            assert(nb_sectors > 0);
            break;
        }

        added_sectors = sectors_per_chunk;
        if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
            bdrv_round_to_clusters(s->target,
                                   next_sector, added_sectors,
                                   &next_sector, &added_sectors);

            /* On the first iteration, the rounding may make us copy
             * sectors before the first dirty one.
             */
            if (next_sector < sector_num) {
                assert(nb_sectors == 0);
                sector_num = next_sector;
                next_chunk = next_sector / sectors_per_chunk;
            }
        }

        added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
        added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;

        /* When doing COW, it may happen that there is not enough space for
         * a full cluster.  Wait if that is the case.
         */
        while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
            trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
            s->waiting_for_io = true;
            qemu_coroutine_yield();
            s->waiting_for_io = false;
        }
        if (s->buf_free_count < nb_chunks + added_chunks) {
            trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
            break;
        }
        if (IOV_MAX < nb_chunks + added_chunks) {
            trace_mirror_break_iov_max(s, nb_chunks, added_chunks);
            break;
        }

        /* We have enough free space to copy these sectors.  */
        bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);

        nb_sectors += added_sectors;
        nb_chunks += added_chunks;
        next_sector += added_sectors;
        next_chunk += added_chunks;
        if (!s->synced && s->common.speed) {
            delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
        }
    } while (delay_ns == 0 && next_sector < end);

    /* Allocate a MirrorOp that is used as an AIO callback.  */
    op = g_new(MirrorOp, 1);
    op->s = s;
    op->sector_num = sector_num;
    op->nb_sectors = nb_sectors;

    /* Now make a QEMUIOVector taking enough granularity-sized chunks
     * from s->buf_free.
     */
    qemu_iovec_init(&op->qiov, nb_chunks);
    next_sector = sector_num;
    while (nb_chunks-- > 0) {
        MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
        size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;

        QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
        s->buf_free_count--;
        qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));

        /* Advance the HBitmapIter in parallel, so that we do not examine
         * the same sector twice.
         */
        if (next_sector > hbitmap_next_sector
            && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
            hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
        }

        next_sector += sectors_per_chunk;
    }

    bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors);

    /* Copy the dirty cluster.  */
    s->in_flight++;
    s->sectors_in_flight += nb_sectors;
    trace_mirror_one_iteration(s, sector_num, nb_sectors);

    ret = bdrv_get_block_status_above(source, NULL, sector_num,
                                      nb_sectors, &pnum);
    if (ret < 0 || pnum < nb_sectors ||
            (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) {
        bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
                       mirror_read_complete, op);
    } else if (ret & BDRV_BLOCK_ZERO) {
        bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors,
                              s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
                              mirror_write_complete, op);
    } else {
        assert(!(ret & BDRV_BLOCK_DATA));
        bdrv_aio_discard(s->target, sector_num, op->nb_sectors,
                         mirror_write_complete, op);
    }
    return delay_ns;
}
예제 #15
0
파일: mirror.c 프로젝트: akoskovacs/qemu
static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
{
    BlockDriverState *source = s->common.bs;
    int nb_sectors, sectors_per_chunk, nb_chunks;
    int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
    MirrorOp *op;

    s->sector_num = hbitmap_iter_next(&s->hbi);
    if (s->sector_num < 0) {
        bdrv_dirty_iter_init(source, &s->hbi);
        s->sector_num = hbitmap_iter_next(&s->hbi);
        trace_mirror_restart_iter(s, bdrv_get_dirty_count(source));
        assert(s->sector_num >= 0);
    }

    hbitmap_next_sector = s->sector_num;
    sector_num = s->sector_num;
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
    end = s->common.len >> BDRV_SECTOR_BITS;

    /* Extend the QEMUIOVector to include all adjacent blocks that will
     * be copied in this operation.
     *
     * We have to do this if we have no backing file yet in the destination,
     * and the cluster size is very large.  Then we need to do COW ourselves.
     * The first time a cluster is copied, copy it entirely.  Note that,
     * because both the granularity and the cluster size are powers of two,
     * the number of sectors to copy cannot exceed one cluster.
     *
     * We also want to extend the QEMUIOVector to include more adjacent
     * dirty blocks if possible, to limit the number of I/O operations and
     * run efficiently even with a small granularity.
     */
    nb_chunks = 0;
    nb_sectors = 0;
    next_sector = sector_num;
    next_chunk = sector_num / sectors_per_chunk;

    /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
    while (test_bit(next_chunk, s->in_flight_bitmap)) {
        trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
        qemu_coroutine_yield();
    }

    do {
        int added_sectors, added_chunks;

        if (!bdrv_get_dirty(source, next_sector) ||
            test_bit(next_chunk, s->in_flight_bitmap)) {
            assert(nb_sectors > 0);
            break;
        }

        added_sectors = sectors_per_chunk;
        if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
            bdrv_round_to_clusters(s->target,
                                   next_sector, added_sectors,
                                   &next_sector, &added_sectors);

            /* On the first iteration, the rounding may make us copy
             * sectors before the first dirty one.
             */
            if (next_sector < sector_num) {
                assert(nb_sectors == 0);
                sector_num = next_sector;
                next_chunk = next_sector / sectors_per_chunk;
            }
        }

        added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
        added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;

        /* When doing COW, it may happen that there is not enough space for
         * a full cluster.  Wait if that is the case.
         */
        while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
            trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
            qemu_coroutine_yield();
        }
        if (s->buf_free_count < nb_chunks + added_chunks) {
            trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
            break;
        }

        /* We have enough free space to copy these sectors.  */
        bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);

        nb_sectors += added_sectors;
        nb_chunks += added_chunks;
        next_sector += added_sectors;
        next_chunk += added_chunks;
    } while (next_sector < end);

    /* Allocate a MirrorOp that is used as an AIO callback.  */
    op = g_slice_new(MirrorOp);
    op->s = s;
    op->sector_num = sector_num;
    op->nb_sectors = nb_sectors;

    /* Now make a QEMUIOVector taking enough granularity-sized chunks
     * from s->buf_free.
     */
    qemu_iovec_init(&op->qiov, nb_chunks);
    next_sector = sector_num;
    while (nb_chunks-- > 0) {
        MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
        QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
        s->buf_free_count--;
        qemu_iovec_add(&op->qiov, buf, s->granularity);

        /* Advance the HBitmapIter in parallel, so that we do not examine
         * the same sector twice.
         */
        if (next_sector > hbitmap_next_sector && bdrv_get_dirty(source, next_sector)) {
            hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
        }

        next_sector += sectors_per_chunk;
    }

    bdrv_reset_dirty(source, sector_num, nb_sectors);

    /* Copy the dirty cluster.  */
    s->in_flight++;
    trace_mirror_one_iteration(s, sector_num, nb_sectors);
    bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
                   mirror_read_complete, op);
}
예제 #16
0
파일: qemu-test.c 프로젝트: stweil/qemu
/* Return FALSE if the submitted request is cancelled. */
static int submit_rand_io (RandomIO * r)
{
    BlockDriverAIOCB *acb = NULL;

    QDEBUG ("TESTER %03d:  %s  test%" PRIX64 " sector_num=%" PRId64
            " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
            r->uuid, r->sector_num, r->nb_sectors, r->qiov.niov);
    printf ("TESTER %03d:  %s  sector_num=%" PRId64 " nb_sectors=%d niov=%d\n",
            r->tester, op_type_str[r->type], r->sector_num, r->nb_sectors,
            r->qiov.niov);

    int ret;
    if (fail_prob <= 0) {
        ret = 0;
    } else if (random () / (double) RAND_MAX <= fail_prob) {
        ret = -EIO;
    } else {
        ret = 0;
    }

    /* This affects whether this request will fail or not. */
    sim_set_disk_io_return_code (ret);

    switch (r->type) {
    case OP_READ:
        if (!(acb = bdrv_aio_readv (bs, r->sector_num, &r->qiov, r->nb_sectors,
                             rand_io_cb, r))) {
            die ("bdrv_aio_readv\n");
        }
        break;
    case OP_WRITE:
        if (!(acb = bdrv_aio_writev (bs, r->sector_num, &r->qiov, r->nb_sectors,
                              rand_io_cb, r))) {
            die ("bdrv_aio_writev\n");
        }
        break;
    case OP_FLUSH:
        if (!(acb = bdrv_aio_flush (bs, rand_io_cb, r))) {
            die ("bdrv_aio_flush\n");
        }
        break;
    case OP_NULL:
        die ("OP_NULL");
        break;
    }

    sim_set_disk_io_return_code (0);        /* Reset to no failure state. */

    if (r->allow_cancel && cancel_prob > 0 &&
                random () / (double) RAND_MAX <= cancel_prob) {
        QDEBUG ("TESTER %03d:  cancel %s test%" PRIX64 " sector_num=%" PRId64
                " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
                r->uuid, r->sector_num, r->nb_sectors, r->qiov.niov);
        printf ("TESTER %03d:  cancel %s sector_num=%" PRId64
                " nb_sectors=%d niov=%d\n", r->tester, op_type_str[r->type],
                r->sector_num, r->nb_sectors, r->qiov.niov);
        bdrv_aio_cancel (acb);
        return FALSE;
    } else {
        return TRUE;
    }
}