static void fvd_store_compact_cancel (FvdAIOCB * acb) { if (acb->store.children) { int i; for (i = 0; i < acb->store.num_children; i++) { if (acb->store.children[i].hd_acb) { bdrv_aio_cancel (acb->store.children[i].hd_acb); } } my_qemu_free (acb->store.children); } if (acb->store.one_child.hd_acb) { bdrv_aio_cancel (acb->store.one_child.hd_acb); } if (acb->jcb.hd_acb) { bdrv_aio_cancel (acb->jcb.hd_acb); free_journal_sectors (acb->common.bs->opaque); } if (acb->jcb.iov.iov_base != NULL) { my_qemu_vfree (acb->jcb.iov.iov_base); } if (acb->jcb.next_wait_for_journal.le_prev) { QLIST_REMOVE (acb, jcb.next_wait_for_journal); } my_qemu_aio_unref (acb); }
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) { struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { if (blkdev->requests_total >= max_requests) { goto out; } /* allocate new struct */ ioreq = g_malloc0(sizeof(*ioreq)); ioreq->blkdev = blkdev; blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { /* get one from freelist */ ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); blkdev->requests_inflight++; out: return ioreq; }
static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) { XenBlockRequest *request = NULL; if (QLIST_EMPTY(&dataplane->freelist)) { if (dataplane->requests_total >= dataplane->max_requests) { goto out; } /* allocate new struct */ request = g_malloc0(sizeof(*request)); request->dataplane = dataplane; /* * We cannot need more pages per requests than this, and since we * re-use requests, allocate the memory once here. It will be freed * xen_block_dataplane_destroy() when the request list is freed. */ request->buf = qemu_memalign(XC_PAGE_SIZE, BLKIF_MAX_SEGMENTS_PER_REQUEST * XC_PAGE_SIZE); dataplane->requests_total++; qemu_iovec_init(&request->v, 1); } else { /* get one from freelist */ request = QLIST_FIRST(&dataplane->freelist); QLIST_REMOVE(request, list); } QLIST_INSERT_HEAD(&dataplane->inflight, request, list); dataplane->requests_inflight++; out: return request; }
static void xen_block_finish_request(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; QLIST_REMOVE(request, list); dataplane->requests_inflight--; }
static void ioreq_finish(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; QLIST_REMOVE(ioreq, list); QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); blkdev->requests_inflight--; blkdev->requests_finished++; }
static void ioreq_release(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; QLIST_REMOVE(ioreq, list); memset(ioreq, 0, sizeof(*ioreq)); ioreq->blkdev = blkdev; QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); blkdev->requests_finished--; }
static void xen_block_release_request(XenBlockRequest *request) { XenBlockDataPlane *dataplane = request->dataplane; QLIST_REMOVE(request, list); reset_request(request); request->dataplane = dataplane; QLIST_INSERT_HEAD(&dataplane->freelist, request, list); dataplane->requests_inflight--; }
/* default qdev clean up function for PCI-to-PCI bridge */ int pci_bridge_exitfn(PCIDevice *pci_dev) { PCIBridge *s = DO_UPCAST(PCIBridge, dev, pci_dev); assert(QLIST_EMPTY(&s->sec_bus.child)); QLIST_REMOVE(&s->sec_bus, sibling); pci_bridge_region_cleanup(s); memory_region_destroy(&s->address_space_mem); memory_region_destroy(&s->address_space_io); /* qbus_free() is called automatically by qdev_free() */ return 0; }
void qdict_del(QDict *qdict, const char *key) { QDictEntry *entry; entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_HASH_SIZE); if (entry) { QLIST_REMOVE(entry, next); qentry_destroy(entry); qdict->size--; } }
static void ioreq_release(struct ioreq *ioreq, bool finish) { struct XenBlkDev *blkdev = ioreq->blkdev; QLIST_REMOVE(ioreq, list); ioreq_reset(ioreq); ioreq->blkdev = blkdev; QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); if (finish) { blkdev->requests_finished--; } else { blkdev->requests_inflight--; } }
static void bus_unparent(struct uc_struct *uc, Object *obj) { BusState *bus = BUS(uc, obj); BusChild *kid; while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { DeviceState *dev = kid->child; object_unparent(uc, OBJECT(dev)); } if (bus->parent) { QLIST_REMOVE(bus, sibling); bus->parent->num_child_bus--; bus->parent = NULL; } }
static int blk_free(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); struct ioreq *ioreq; while (!QLIST_EMPTY(&blkdev->freelist)) { ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); qemu_free(ioreq); } qemu_free(blkdev->params); qemu_free(blkdev->mode); qemu_free(blkdev->type); qemu_free(blkdev->dev); qemu_free(blkdev->devtype); qemu_bh_delete(blkdev->bh); return 0; }
static void qdict_destroy_obj(QObject *obj) { int i; QDict *qdict; assert(obj != NULL); qdict = qobject_to_qdict(obj); for (i = 0; i < QDICT_HASH_SIZE; i++) { QDictEntry *entry = QLIST_FIRST(&qdict->table[i]); while (entry) { QDictEntry *tmp = QLIST_NEXT(entry, next); QLIST_REMOVE(entry, next); qentry_destroy(entry); entry = tmp; } } qemu_free(qdict); }
void yagl_compiled_transfer_destroy(struct yagl_compiled_transfer *ct) { int i; if (ct->in_list) { QLIST_REMOVE(ct, entry); ct->in_list = false; } for (i = 0; i < ct->num_sections; ++i) { cpu_physical_memory_unmap(ct->sections[i].map_base, ct->sections[i].map_len, 0, ct->sections[i].map_len); } g_free(ct->sections); ct->sections = NULL; ct->num_sections = 0; g_free(ct); }
void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) { XenBlockRequest *request; if (!dataplane) { return; } while (!QLIST_EMPTY(&dataplane->freelist)) { request = QLIST_FIRST(&dataplane->freelist); QLIST_REMOVE(request, list); qemu_iovec_destroy(&request->v); qemu_vfree(request->buf); g_free(request); } qemu_bh_delete(dataplane->bh); if (dataplane->iothread) { object_unref(OBJECT(dataplane->iothread)); } g_free(dataplane); }
void yagl_compiled_transfer_prepare(struct yagl_compiled_transfer *ct) { struct yagl_vector v; target_ulong last_page_va = YAGL_TARGET_PAGE_VA(ct->va + ct->len - 1); target_ulong cur_va = ct->va; uint32_t len = ct->len; int i, num_sections; YAGL_LOG_FUNC_ENTER(yagl_compiled_transfer_prepare, "va = 0x%X, len = 0x%X, is_write = %u", (uint32_t)ct->va, ct->len, (uint32_t)ct->is_write); if (ct->in_list) { QLIST_REMOVE(ct, entry); ct->in_list = false; } yagl_vector_init(&v, sizeof(struct yagl_compiled_transfer_section), 0); while (len) { target_ulong start_page_va = YAGL_TARGET_PAGE_VA(cur_va); hwaddr start_page_pa = yagl_pa(start_page_va); target_ulong end_page_va; struct yagl_compiled_transfer_section section; if (!start_page_pa) { YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)start_page_va); goto fail; } end_page_va = start_page_va; while (end_page_va < last_page_va) { target_ulong next_page_va = end_page_va + TARGET_PAGE_SIZE; hwaddr next_page_pa = yagl_pa(next_page_va); if (!next_page_pa) { YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)next_page_va); goto fail; } /* * If the target pages are not linearly spaced, stop. */ if ((next_page_pa < start_page_pa) || ((next_page_pa - start_page_pa) > (next_page_va - start_page_va))) { break; } end_page_va = next_page_va; } section.map_len = end_page_va + TARGET_PAGE_SIZE - start_page_va; section.map_base = cpu_physical_memory_map(start_page_pa, §ion.map_len, 0); if (!section.map_base || !section.map_len) { YAGL_LOG_ERROR("cpu_physical_memory_map(0x%X, %u) failed", (uint32_t)start_page_pa, (uint32_t)section.map_len); goto fail; } section.len = end_page_va + TARGET_PAGE_SIZE - cur_va; if (section.len > len) { section.len = len; } section.base = (char*)section.map_base + YAGL_TARGET_PAGE_OFFSET(cur_va); yagl_vector_push_back(&v, §ion); len -= section.len; cur_va += section.len; } ct->num_sections = yagl_vector_size(&v); ct->sections = yagl_vector_detach(&v); YAGL_LOG_FUNC_EXIT("num_sections = %d", ct->num_sections); return; fail: num_sections = yagl_vector_size(&v); for (i = 0; i < num_sections; ++i) { struct yagl_compiled_transfer_section *section = (struct yagl_compiled_transfer_section*) ((char*)yagl_vector_data(&v) + (i * sizeof(struct yagl_compiled_transfer_section))); cpu_physical_memory_unmap(section->map_base, section->map_len, 0, section->map_len); } yagl_vector_cleanup(&v); YAGL_LOG_FUNC_EXIT(NULL); }