static int nbd_co_send_request(BlockDriverState *bs, struct nbd_request *request, QEMUIOVector *qiov, int offset) { NbdClientSession *s = nbd_get_client_session(bs); AioContext *aio_context; int rc, ret, i; qemu_co_mutex_lock(&s->send_mutex); for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->recv_coroutine[i] == NULL) { s->recv_coroutine[i] = qemu_coroutine_self(); break; } } g_assert(qemu_in_coroutine()); assert(i < MAX_NBD_REQUESTS); request->handle = INDEX_TO_HANDLE(s, i); if (!s->ioc) { qemu_co_mutex_unlock(&s->send_mutex); return -EPIPE; } s->send_coroutine = qemu_coroutine_self(); aio_context = bdrv_get_aio_context(bs); aio_set_fd_handler(aio_context, s->sioc->fd, false, nbd_reply_ready, nbd_restart_write, bs); if (qiov) { qio_channel_set_cork(s->ioc, true); rc = nbd_send_request(s->ioc, request); if (rc >= 0) { ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov, offset, request->len, 0); if (ret != request->len) { rc = -EIO; } } qio_channel_set_cork(s->ioc, false); } else { rc = nbd_send_request(s->ioc, request); } aio_set_fd_handler(aio_context, s->sioc->fd, false, nbd_reply_ready, NULL, bs); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; }
static ssize_t nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply, int len) { NBDClient *client = req->client; int csock = client->sock; ssize_t rc, ret; qemu_co_mutex_lock(&client->send_lock); qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, nbd_restart_write, client); client->send_coroutine = qemu_coroutine_self(); if (!len) { rc = nbd_send_reply(csock, reply); } else { socket_set_cork(csock, 1); rc = nbd_send_reply(csock, reply); if (rc >= 0) { ret = qemu_co_send(csock, req->data, len); if (ret != len) { rc = -EIO; } } socket_set_cork(csock, 0); } client->send_coroutine = NULL; qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, NULL, client); qemu_co_mutex_unlock(&client->send_lock); return rc; }
static int coroutine_fn cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVCloopState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; int nb_sectors = bytes >> BDRV_SECTOR_BITS; int ret, i; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); qemu_co_mutex_lock(&s->lock); for (i = 0; i < nb_sectors; i++) { void *data; uint32_t sector_offset_in_block = ((sector_num + i) % s->sectors_per_block), block_num = (sector_num + i) / s->sectors_per_block; if (cloop_read_block(bs, block_num) != 0) { ret = -EIO; goto fail; } data = s->uncompressed_block + sector_offset_in_block * 512; qemu_iovec_from_buf(qiov, i * 512, data, 512); } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); return ret; }
static int nbd_co_send_request(NbdClientSession *s, struct nbd_request *request, QEMUIOVector *qiov, int offset) { int rc, ret; qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s); if (qiov) { if (!s->is_unix) { socket_set_cork(s->sock, 1); } rc = nbd_send_request(s->sock, request); if (rc >= 0) { ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov, offset, request->len); if (ret != request->len) { rc = -EIO; } } if (!s->is_unix) { socket_set_cork(s->sock, 0); } } else { rc = nbd_send_request(s->sock, request); } qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; }
static int nbd_co_send_request(BlockDriverState *bs, NBDRequest *request, QEMUIOVector *qiov) { NBDClientSession *s = nbd_get_client_session(bs); int rc, ret, i; qemu_co_mutex_lock(&s->send_mutex); while (s->in_flight == MAX_NBD_REQUESTS) { qemu_co_queue_wait(&s->free_sema, &s->send_mutex); } s->in_flight++; for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->recv_coroutine[i] == NULL) { s->recv_coroutine[i] = qemu_coroutine_self(); break; } } g_assert(qemu_in_coroutine()); assert(i < MAX_NBD_REQUESTS); request->handle = INDEX_TO_HANDLE(s, i); if (!s->ioc) { qemu_co_mutex_unlock(&s->send_mutex); return -EPIPE; } if (qiov) { qio_channel_set_cork(s->ioc, true); rc = nbd_send_request(s->ioc, request); if (rc >= 0) { ret = nbd_rwv(s->ioc, qiov->iov, qiov->niov, request->len, false, NULL); if (ret != request->len) { rc = -EIO; } } qio_channel_set_cork(s->ioc, false); } else { rc = nbd_send_request(s->ioc, request); } qemu_co_mutex_unlock(&s->send_mutex); return rc; }
static void nbd_coroutine_end(BDRVNBDState *s, struct nbd_request *request) { int i = HANDLE_TO_INDEX(s, request->handle); s->recv_coroutine[i] = NULL; if (s->in_flight-- == MAX_NBD_REQUESTS) { qemu_co_mutex_unlock(&s->free_sema); } }
static coroutine_fn int dmg_co_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { int ret; BDRVDMGState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = dmg_read(bs, sector_num, buf, nb_sectors); qemu_co_mutex_unlock(&s->lock); return ret; }
static coroutine_fn int vpc_co_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { int ret; BDRVVPCState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = vpc_write(bs, sector_num, buf, nb_sectors); qemu_co_mutex_unlock(&s->lock); return ret; }
static coroutine_fn int ssh_co_flush(BlockDriverState *bs) { BDRVSSHState *s = bs->opaque; int ret; qemu_co_mutex_lock(&s->lock); ret = ssh_flush(s, bs); qemu_co_mutex_unlock(&s->lock); return ret; }
static void coroutine_fn mutex_fn(void *opaque) { CoMutex *m = opaque; qemu_co_mutex_lock(m); assert(!locked); locked = true; qemu_coroutine_yield(); locked = false; qemu_co_mutex_unlock(m); done++; }
static coroutine_fn int ssh_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVSSHState *s = bs->opaque; int ret; qemu_co_mutex_lock(&s->lock); ret = ssh_write(s, bs, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, qiov); qemu_co_mutex_unlock(&s->lock); return ret; }
static int nbd_co_send_request(BlockDriverState *bs, struct nbd_request *request, QEMUIOVector *qiov, int offset) { NbdClientSession *s = nbd_get_client_session(bs); AioContext *aio_context; int rc, ret, i; qemu_co_mutex_lock(&s->send_mutex); for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->recv_coroutine[i] == NULL) { s->recv_coroutine[i] = qemu_coroutine_self(); break; } } assert(i < MAX_NBD_REQUESTS); request->handle = INDEX_TO_HANDLE(s, i); s->send_coroutine = qemu_coroutine_self(); aio_context = bdrv_get_aio_context(bs); aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL, nbd_reply_ready, nbd_restart_write, bs); if (qiov) { if (!s->is_unix) { socket_set_cork(s->sock, 1); } rc = nbd_send_request(s->sock, request); if (rc >= 0) { ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov, offset, request->len); if (ret != request->len) { rc = -EIO; } } if (!s->is_unix) { socket_set_cork(s->sock, 0); } } else { rc = nbd_send_request(s->sock, request); } aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL, nbd_reply_ready, NULL, bs); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; }
static void nbd_coroutine_end(BlockDriverState *bs, NBDRequest *request) { NBDClientSession *s = nbd_get_client_session(bs); int i = HANDLE_TO_INDEX(s, request->handle); s->recv_coroutine[i] = NULL; /* Kick the read_reply_co to get the next reply. */ if (s->read_reply_co) { aio_co_wake(s->read_reply_co); } qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; qemu_co_queue_next(&s->free_sema); qemu_co_mutex_unlock(&s->send_mutex); }
static void nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, NBDReply *reply, QEMUIOVector *qiov) { int i = HANDLE_TO_INDEX(s, request->handle); /* Wait until we're woken up by nbd_read_reply_entry. */ s->requests[i].receiving = true; qemu_coroutine_yield(); s->requests[i].receiving = false; *reply = s->reply; if (reply->handle != request->handle || !s->ioc || s->quit) { reply->error = EIO; } else { if (qiov && reply->error == 0) { assert(request->len == iov_size(qiov->iov, qiov->niov)); if (qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, NULL) < 0) { reply->error = EIO; s->quit = true; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } s->requests[i].coroutine = NULL; /* Kick the read_reply_co to get the next reply. */ if (s->read_reply_co) { aio_co_wake(s->read_reply_co); } qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; qemu_co_queue_next(&s->free_sema); qemu_co_mutex_unlock(&s->send_mutex); }
static int coroutine_fn dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVDMGState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; int nb_sectors = bytes >> BDRV_SECTOR_BITS; int ret, i; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); qemu_co_mutex_lock(&s->lock); for (i = 0; i < nb_sectors; i++) { uint32_t sector_offset_in_chunk; void *data; if (dmg_read_chunk(bs, sector_num + i) != 0) { ret = -EIO; goto fail; } /* Special case: current chunk is all zeroes. Do not perform a memcpy as * s->uncompressed_chunk may be too small to cover the large all-zeroes * section. dmg_read_chunk is called to find s->current_chunk */ if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */ qemu_iovec_memset(qiov, i * 512, 0, 512); continue; } sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; data = s->uncompressed_chunk + sector_offset_in_chunk * 512; qemu_iovec_from_buf(qiov, i * 512, data, 512); } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); return ret; }
static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, struct iovec *iov, int offset) { int rc, ret; qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, nbd_have_request, s); rc = nbd_send_request(s->sock, request); if (rc >= 0 && iov) { ret = qemu_co_sendv(s->sock, iov, request->len, offset); if (ret != request->len) { return -EIO; } } qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, nbd_have_request, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; }
static coroutine_fn int vhdx_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVVHDXState *s = bs->opaque; int ret = 0; VHDXSectorInfo sinfo; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { /* We are a differencing file, so we need to inspect the sector bitmap * to see if we have the data or not */ if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) { /* not supported yet */ ret = -ENOTSUP; goto exit; } else { vhdx_block_translate(s, sector_num, nb_sectors, &sinfo); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, sinfo.bytes_avail); /* check the payload block state */ switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) { case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */ case PAYLOAD_BLOCK_UNDEFINED: /* fall through */ case PAYLOAD_BLOCK_UNMAPPED: /* fall through */ case PAYLOAD_BLOCK_ZERO: /* return zero */ qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail); break; case PAYLOAD_BLOCK_FULL_PRESENT: qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, sinfo.file_offset >> BDRV_SECTOR_BITS, sinfo.sectors_avail, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto exit; } break; case PAYLOAD_BLOCK_PARTIALLY_PRESENT: /* we don't yet support difference files, fall through * to error */ default: ret = -EIO; goto exit; break; } nb_sectors -= sinfo.sectors_avail; sector_num += sinfo.sectors_avail; bytes_done += sinfo.bytes_avail; } } ret = 0; exit: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); return ret; }
static int nbd_co_send_request(BlockDriverState *bs, NBDRequest *request, QEMUIOVector *qiov) { NBDClientSession *s = nbd_get_client_session(bs); int rc, i; qemu_co_mutex_lock(&s->send_mutex); while (s->in_flight == MAX_NBD_REQUESTS) { qemu_co_queue_wait(&s->free_sema, &s->send_mutex); } s->in_flight++; for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->requests[i].coroutine == NULL) { break; } } g_assert(qemu_in_coroutine()); assert(i < MAX_NBD_REQUESTS); s->requests[i].coroutine = qemu_coroutine_self(); s->requests[i].receiving = false; request->handle = INDEX_TO_HANDLE(s, i); if (s->quit) { rc = -EIO; goto err; } if (!s->ioc) { rc = -EPIPE; goto err; } if (qiov) { qio_channel_set_cork(s->ioc, true); rc = nbd_send_request(s->ioc, request); if (rc >= 0 && !s->quit) { assert(request->len == iov_size(qiov->iov, qiov->niov)); if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, NULL) < 0) { rc = -EIO; } } qio_channel_set_cork(s->ioc, false); } else { rc = nbd_send_request(s->ioc, request); } err: if (rc < 0) { s->quit = true; s->requests[i].coroutine = NULL; s->in_flight--; qemu_co_queue_next(&s->free_sema); } qemu_co_mutex_unlock(&s->send_mutex); return rc; }