static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int write) { int ret; GlusterAIOCB *acb; BDRVGlusterState *s = bs->opaque; size_t size; off_t offset; offset = sector_num * BDRV_SECTOR_SIZE; size = nb_sectors * BDRV_SECTOR_SIZE; acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque); acb->size = size; acb->ret = 0; acb->finished = NULL; if (write) { ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } else { ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } if (ret < 0) { goto out; } return &acb->common; out: qemu_aio_release(acb); return NULL; }
static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; size_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = size; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); if (write) { ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } else { ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; }