static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { return bdrv_co_readv(bs->file, sector_num, nb_sectors, qiov); }
static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); return bdrv_co_readv(bs->file->bs, sector_num, nb_sectors, qiov); }
static coroutine_fn int block_crypto_co_readv(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BlockCrypto *crypto = bs->opaque; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t bytes_done = 0; uint8_t *cipher_data = NULL; QEMUIOVector hd_qiov; int ret = 0; size_t payload_offset = qcrypto_block_get_payload_offset(crypto->block) / 512; qemu_iovec_init(&hd_qiov, qiov->niov); /* Bounce buffer so we have a linear mem region for * entire sector. XXX optimize so we avoid bounce * buffer in case that qiov->niov == 1 */ cipher_data = qemu_try_blockalign(bs->file->bs, MIN(BLOCK_CRYPTO_MAX_SECTORS * 512, qiov->size)); if (cipher_data == NULL) { ret = -ENOMEM; goto cleanup; } while (remaining_sectors) { cur_nr_sectors = remaining_sectors; if (cur_nr_sectors > BLOCK_CRYPTO_MAX_SECTORS) { cur_nr_sectors = BLOCK_CRYPTO_MAX_SECTORS; } qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cipher_data, cur_nr_sectors * 512); ret = bdrv_co_readv(bs->file, payload_offset + sector_num, cur_nr_sectors, &hd_qiov); if (ret < 0) { goto cleanup; } if (qcrypto_block_decrypt(crypto->block, sector_num, cipher_data, cur_nr_sectors * 512, NULL) < 0) { ret = -EIO; goto cleanup; } qemu_iovec_from_buf(qiov, bytes_done, cipher_data, cur_nr_sectors * 512); remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; } cleanup: qemu_iovec_destroy(&hd_qiov); qemu_vfree(cipher_data); return ret; }