/* * Completes an AIO request (calls the callback and frees the ACB). */ static void win32_aio_process_completion(QEMUWin32AIOState *s, QEMUWin32AIOCB *waiocb, DWORD count) { int ret; s->count--; if (waiocb->ov.Internal != 0) { ret = -EIO; } else { ret = 0; if (count < waiocb->nbytes) { /* Short reads mean EOF, pad with zeros. */ if (waiocb->is_read) { qemu_iovec_memset(waiocb->qiov, count, 0, waiocb->qiov->size - count); } else { ret = -EINVAL; } } } if (!waiocb->is_linear) { if (ret == 0 && waiocb->is_read) { QEMUIOVector *qiov = waiocb->qiov; iov_from_buf(qiov->iov, qiov->niov, 0, waiocb->buf, qiov->size); } qemu_vfree(waiocb->buf); } waiocb->common.cb(waiocb->common.opaque, ret); qemu_aio_release(waiocb); }
static coroutine_fn int ssh_read(BDRVSSHState *s, BlockDriverState *bs, int64_t offset, size_t size, QEMUIOVector *qiov) { ssize_t r; size_t got; char *buf, *end_of_vec; struct iovec *i; DPRINTF("offset=%" PRIi64 " size=%zu", offset, size); ssh_seek(s, offset, SSH_SEEK_READ); /* This keeps track of the current iovec element ('i'), where we * will write to next ('buf'), and the end of the current iovec * ('end_of_vec'). */ i = &qiov->iov[0]; buf = i->iov_base; end_of_vec = i->iov_base + i->iov_len; /* libssh2 has a hard-coded limit of 2000 bytes per request, * although it will also do readahead behind our backs. Therefore * we may have to do repeated reads here until we have read 'size' * bytes. */ for (got = 0; got < size; ) { again: DPRINTF("sftp_read buf=%p size=%zu", buf, end_of_vec - buf); r = libssh2_sftp_read(s->sftp_handle, buf, end_of_vec - buf); DPRINTF("sftp_read returned %zd", r); if (r == LIBSSH2_ERROR_EAGAIN || r == LIBSSH2_ERROR_TIMEOUT) { co_yield(s, bs); goto again; } if (r < 0) { sftp_error_report(s, "read failed"); s->offset = -1; return -EIO; } if (r == 0) { /* EOF: Short read so pad the buffer with zeroes and return it. */ qemu_iovec_memset(qiov, got, 0, size - got); return 0; } got += r; buf += r; s->offset += r; if (buf >= end_of_vec && got < size) { i++; buf = i->iov_base; end_of_vec = i->iov_base + i->iov_len; } } return 0; }
static int coroutine_fn dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVDMGState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; int nb_sectors = bytes >> BDRV_SECTOR_BITS; int ret, i; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); qemu_co_mutex_lock(&s->lock); for (i = 0; i < nb_sectors; i++) { uint32_t sector_offset_in_chunk; void *data; if (dmg_read_chunk(bs, sector_num + i) != 0) { ret = -EIO; goto fail; } /* Special case: current chunk is all zeroes. Do not perform a memcpy as * s->uncompressed_chunk may be too small to cover the large all-zeroes * section. dmg_read_chunk is called to find s->current_chunk */ if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */ qemu_iovec_memset(qiov, i * 512, 0, 512); continue; } sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; data = s->uncompressed_chunk + sector_offset_in_chunk * 512; qemu_iovec_from_buf(qiov, i * 512, data, 512); } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); return ret; }
/* * Completes an AIO request (calls the callback and frees the ACB). */ static void qemu_laio_process_completion(struct qemu_laio_state *s, struct qemu_laiocb *laiocb) { int ret; ret = laiocb->ret; if (ret != -ECANCELED) { if (ret == laiocb->nbytes) { ret = 0; } else if (ret >= 0) { /* Short reads mean EOF, pad with zeros. */ if (laiocb->is_read) { qemu_iovec_memset(laiocb->qiov, ret, 0, laiocb->qiov->size - ret); } else { ret = -EINVAL; } } } laiocb->common.cb(laiocb->common.opaque, ret); qemu_aio_unref(laiocb); }
static coroutine_fn int vhdx_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVVHDXState *s = bs->opaque; int ret = 0; VHDXSectorInfo sinfo; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { /* We are a differencing file, so we need to inspect the sector bitmap * to see if we have the data or not */ if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) { /* not supported yet */ ret = -ENOTSUP; goto exit; } else { vhdx_block_translate(s, sector_num, nb_sectors, &sinfo); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, sinfo.bytes_avail); /* check the payload block state */ switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) { case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */ case PAYLOAD_BLOCK_UNDEFINED: /* fall through */ case PAYLOAD_BLOCK_UNMAPPED: /* fall through */ case PAYLOAD_BLOCK_ZERO: /* return zero */ qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail); break; case PAYLOAD_BLOCK_FULL_PRESENT: qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, sinfo.file_offset >> BDRV_SECTOR_BITS, sinfo.sectors_avail, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto exit; } break; case PAYLOAD_BLOCK_PARTIALLY_PRESENT: /* we don't yet support difference files, fall through * to error */ default: ret = -EIO; goto exit; break; } nb_sectors -= sinfo.sectors_avail; sector_num += sinfo.sectors_avail; bytes_done += sinfo.bytes_avail; } } ret = 0; exit: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); return ret; }