static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) { struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { if (blkdev->requests_total >= max_requests) { goto out; } /* allocate new struct */ ioreq = g_malloc0(sizeof(*ioreq)); ioreq->blkdev = blkdev; blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { /* get one from freelist */ ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); blkdev->requests_inflight++; out: return ioreq; }
void usb_packet_setup(USBPacket *p, int pid, uint8_t addr, uint8_t ep) { p->pid = pid; p->devaddr = addr; p->devep = ep; p->result = 0; qemu_iovec_reset(&p->iov); }
void qemu_iovec_destroy(QEMUIOVector *qiov) { assert(qiov->nalloc != -1); qemu_iovec_reset(qiov); g_free(qiov->iov); qiov->nalloc = 0; qiov->iov = NULL; }
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep) { assert(!usb_packet_is_inflight(p)); p->pid = pid; p->ep = ep; p->result = 0; qemu_iovec_reset(&p->iov); usb_packet_set_state(p, USB_PACKET_SETUP); }
static void dma_blk_unmap(DMAAIOCB *dbs) { int i; for (i = 0; i < dbs->iov.niov; ++i) { dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, dbs->iov.iov[i].iov_len, dbs->dir, dbs->iov.iov[i].iov_len); } qemu_iovec_reset(&dbs->iov); }
static void dma_bdrv_unmap(DMAAIOCB *dbs) { int i; for (i = 0; i < dbs->iov.niov; ++i) { cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base, dbs->iov.iov[i].iov_len, !dbs->to_dev, dbs->iov.iov[i].iov_len); } qemu_iovec_reset(&dbs->iov); }
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id) { assert(!usb_packet_is_inflight(p)); assert(p->iov.iov != NULL); p->id = id; p->pid = pid; p->ep = ep; p->result = 0; p->parameter = 0; qemu_iovec_reset(&p->iov); usb_packet_set_state(p, USB_PACKET_SETUP); }
static void dma_bdrv_cb(void *opaque, int ret) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; target_phys_addr_t cur_addr, cur_len; void *mem; dbs->acb = NULL; dbs->sector_num += dbs->iov.size / 512; dma_bdrv_unmap(dbs); qemu_iovec_reset(&dbs->iov); if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { dbs->common.cb(dbs->common.opaque, ret); qemu_iovec_destroy(&dbs->iov); qemu_aio_release(dbs); return; } while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write); if (!mem) break; qemu_iovec_add(&dbs->iov, mem, cur_len); dbs->sg_cur_byte += cur_len; if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { dbs->sg_cur_byte = 0; ++dbs->sg_cur_index; } } if (dbs->iov.size == 0) { cpu_register_map_client(dbs, continue_after_map_failure); return; } if (dbs->is_write) { dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov, dbs->iov.size / 512, dma_bdrv_cb, dbs); } else { dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov, dbs->iov.size / 512, dma_bdrv_cb, dbs); } if (!dbs->acb) { dma_bdrv_unmap(dbs); qemu_iovec_destroy(&dbs->iov); return; } }
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep, uint64_t id, bool short_not_ok, bool int_req) { assert(!usb_packet_is_inflight(p)); assert(p->iov.iov != NULL); p->id = id; p->pid = pid; p->ep = ep; p->result = 0; p->parameter = 0; p->short_not_ok = short_not_ok; p->int_req = int_req; p->combined = NULL; qemu_iovec_reset(&p->iov); usb_packet_set_state(p, USB_PACKET_SETUP); }
static void reset_request(XenBlockRequest *request) { memset(&request->req, 0, sizeof(request->req)); request->status = 0; request->start = 0; request->size = 0; request->presync = 0; request->aio_inflight = 0; request->aio_errors = 0; request->dataplane = NULL; memset(&request->list, 0, sizeof(request->list)); memset(&request->acct, 0, sizeof(request->acct)); qemu_iovec_reset(&request->v); }
static void ioreq_reset(struct ioreq *ioreq) { memset(&ioreq->req, 0, sizeof(ioreq->req)); ioreq->status = 0; ioreq->start = 0; ioreq->buf = NULL; ioreq->size = 0; ioreq->presync = 0; ioreq->aio_inflight = 0; ioreq->aio_errors = 0; ioreq->blkdev = NULL; memset(&ioreq->list, 0, sizeof(ioreq->list)); memset(&ioreq->acct, 0, sizeof(ioreq->acct)); qemu_iovec_reset(&ioreq->v); }
static void ioreq_reset(struct ioreq *ioreq) { memset(&ioreq->req, 0, sizeof(ioreq->req)); ioreq->status = 0; ioreq->start = 0; ioreq->presync = 0; ioreq->postsync = 0; memset(ioreq->domids, 0, sizeof(ioreq->domids)); memset(ioreq->refs, 0, sizeof(ioreq->refs)); ioreq->prot = 0; memset(ioreq->page, 0, sizeof(ioreq->page)); ioreq->pages = NULL; ioreq->aio_inflight = 0; ioreq->aio_errors = 0; ioreq->blkdev = NULL; memset(&ioreq->list, 0, sizeof(ioreq->list)); qemu_iovec_reset(&ioreq->v); }
static coroutine_fn int block_crypto_co_writev(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BlockCrypto *crypto = bs->opaque; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t bytes_done = 0; uint8_t *cipher_data = NULL; QEMUIOVector hd_qiov; int ret = 0; size_t payload_offset = qcrypto_block_get_payload_offset(crypto->block) / 512; qemu_iovec_init(&hd_qiov, qiov->niov); /* Bounce buffer so we have a linear mem region for * entire sector. XXX optimize so we avoid bounce * buffer in case that qiov->niov == 1 */ cipher_data = qemu_try_blockalign(bs->file->bs, MIN(BLOCK_CRYPTO_MAX_SECTORS * 512, qiov->size)); if (cipher_data == NULL) { ret = -ENOMEM; goto cleanup; } while (remaining_sectors) { cur_nr_sectors = remaining_sectors; if (cur_nr_sectors > BLOCK_CRYPTO_MAX_SECTORS) { cur_nr_sectors = BLOCK_CRYPTO_MAX_SECTORS; } qemu_iovec_to_buf(qiov, bytes_done, cipher_data, cur_nr_sectors * 512); if (qcrypto_block_encrypt(crypto->block, sector_num, cipher_data, cur_nr_sectors * 512, NULL) < 0) { ret = -EIO; goto cleanup; } qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cipher_data, cur_nr_sectors * 512); ret = bdrv_co_writev(bs->file, payload_offset + sector_num, cur_nr_sectors, &hd_qiov); if (ret < 0) { goto cleanup; } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; } cleanup: qemu_iovec_destroy(&hd_qiov); qemu_vfree(cipher_data); return ret; }
static coroutine_fn int vhdx_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVVHDXState *s = bs->opaque; int ret = 0; VHDXSectorInfo sinfo; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { /* We are a differencing file, so we need to inspect the sector bitmap * to see if we have the data or not */ if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) { /* not supported yet */ ret = -ENOTSUP; goto exit; } else { vhdx_block_translate(s, sector_num, nb_sectors, &sinfo); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, sinfo.bytes_avail); /* check the payload block state */ switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) { case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */ case PAYLOAD_BLOCK_UNDEFINED: /* fall through */ case PAYLOAD_BLOCK_UNMAPPED: /* fall through */ case PAYLOAD_BLOCK_ZERO: /* return zero */ qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail); break; case PAYLOAD_BLOCK_FULL_PRESENT: qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, sinfo.file_offset >> BDRV_SECTOR_BITS, sinfo.sectors_avail, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto exit; } break; case PAYLOAD_BLOCK_PARTIALLY_PRESENT: /* we don't yet support difference files, fall through * to error */ default: ret = -EIO; goto exit; break; } nb_sectors -= sinfo.sectors_avail; sector_num += sinfo.sectors_avail; bytes_done += sinfo.bytes_avail; } } ret = 0; exit: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); return ret; }