/* This is where we get a request from a caller to read something */ static BlockDriverAIOCB *tar_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BDRVTarState *s = bs->opaque; SparseCache *sparse; int64_t sec_file = sector_num + s->file_sec; int64_t start = sector_num * SECTOR_SIZE; int64_t end = start + (nb_sectors * SECTOR_SIZE); int i; TarAIOCB *acb; for (i = 0; i < s->sparse_num; i++) { sparse = &s->sparse[i]; if (sparse->start > end) { /* We expect the cache to be start increasing */ break; } else if ((sparse->start < start) && (sparse->end <= start)) { /* sparse before our offset */ sec_file -= (sparse->end - sparse->start) / SECTOR_SIZE; } else if ((sparse->start <= start) && (sparse->end >= end)) { /* all our sectors are sparse */ char *buf = g_malloc0(nb_sectors * SECTOR_SIZE); acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque); qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE); g_free(buf); acb->bh = qemu_bh_new(tar_sparse_cb, acb); qemu_bh_schedule(acb->bh); return &acb->common; } else if (((sparse->start >= start) && (sparse->start < end)) || ((sparse->end >= start) && (sparse->end < end))) { /* we're semi-sparse (worst case) */ /* let's go synchronous and read all sectors individually */ char *buf = g_malloc(nb_sectors * SECTOR_SIZE); uint64_t offs; for (offs = 0; offs < (nb_sectors * SECTOR_SIZE); offs += SECTOR_SIZE) { bdrv_pread(bs, (sector_num * SECTOR_SIZE) + offs, buf + offs, SECTOR_SIZE); } qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE); acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque); acb->bh = qemu_bh_new(tar_sparse_cb, acb); qemu_bh_schedule(acb->bh); return &acb->common; } } return bdrv_aio_readv(s->hd, sec_file, qiov, nb_sectors, cb, opaque); }
/* * This aio completion is being called from qemu_rbd_aio_event_reader() * and runs in qemu context. It schedules a bh, but just in case the aio * was not cancelled before. */ static void qemu_rbd_complete_aio(RADOSCB *rcb) { RBDAIOCB *acb = rcb->acb; int64_t r; r = rcb->ret; if (acb->cmd != RBD_AIO_READ) { if (r < 0) { acb->ret = r; acb->error = 1; } else if (!acb->error) { acb->ret = rcb->size; } } else { if (r < 0) { memset(rcb->buf, 0, rcb->size); acb->ret = r; acb->error = 1; } else if (r < rcb->size) { memset(rcb->buf + r, 0, rcb->size - r); if (!acb->error) { acb->ret = rcb->size; } } else if (!acb->error) { acb->ret = r; } } /* Note that acb->bh can be NULL in case where the aio was cancelled */ acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb); qemu_bh_schedule(acb->bh); g_free(rcb); }
static void tpm_tis_realizefn(DeviceState *dev, Error **errp) { TPMState *s = TPM(dev); TPMTISEmuState *tis = &s->s.tis; s->be_driver = qemu_find_tpm(s->backend); if (!s->be_driver) { error_setg(errp, "tpm_tis: backend driver with id %s could not be " "found", s->backend); return; } s->be_driver->fe_model = TPM_MODEL_TPM_TIS; if (s->be_driver->ops->init(s->be_driver, s, tpm_tis_receive_cb)) { error_setg(errp, "tpm_tis: backend driver with id %s could not be " "initialized", s->backend); return; } if (tis->irq_num > 15) { error_setg(errp, "tpm_tis: IRQ %d for TPM TIS is outside valid range " "of 0 to 15.\n", tis->irq_num); return; } tis->bh = qemu_bh_new(tpm_tis_receive_bh, s); isa_init_irq(&s->busdev, &tis->irq, tis->irq_num); }
static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq) { sh_timer_state *s; QEMUBH *bh; s = (sh_timer_state *)qemu_mallocz(sizeof(sh_timer_state)); s->freq = freq; s->feat = feat; s->tcor = 0xffffffff; s->tcnt = 0xffffffff; s->tcpr = 0xdeadbeef; s->tcr = 0; s->enabled = 0; s->irq = irq; bh = qemu_bh_new(sh_timer_tick, s); s->timer = ptimer_init(bh); sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor); sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt); sh_timer_write(s, OFFSET_TCPR >> 2, s->tcpr); sh_timer_write(s, OFFSET_TCR >> 2, s->tcpr); /* ??? Save/restore. */ return s; }
static int xilinx_timer_init(SysBusDevice *dev) { struct timerblock *t = FROM_SYSBUS(typeof (*t), dev); unsigned int i; /* All timers share a single irq line. */ sysbus_init_irq(dev, &t->irq); /* Init all the ptimers. */ t->timers = g_malloc0(sizeof t->timers[0] * num_timers(t)); for (i = 0; i < num_timers(t); i++) { struct xlx_timer *xt = &t->timers[i]; xt->parent = t; xt->nr = i; xt->bh = qemu_bh_new(timer_hit, xt); xt->ptimer = ptimer_init(xt->bh); ptimer_set_freq(xt->ptimer, t->freq_hz); } memory_region_init_io(&t->mmio, &timer_ops, t, "xlnx.xps-timer", R_MAX * 4 * num_timers(t)); sysbus_init_mmio(dev, &t->mmio); return 0; }
static int grlib_gptimer_init(SysBusDevice *dev) { GPTimerUnit *unit = FROM_SYSBUS(typeof(*unit), dev); unsigned int i; assert(unit->nr_timers > 0); assert(unit->nr_timers <= GPTIMER_MAX_TIMERS); unit->timers = g_malloc0(sizeof unit->timers[0] * unit->nr_timers); for (i = 0; i < unit->nr_timers; i++) { GPTimer *timer = &unit->timers[i]; timer->unit = unit; timer->bh = qemu_bh_new(grlib_gptimer_hit, timer); timer->ptimer = ptimer_init(timer->bh); timer->id = i; /* One IRQ line for each timer */ sysbus_init_irq(dev, &timer->irq); ptimer_set_freq(timer->ptimer, unit->freq_hz); } memory_region_init_io(&unit->iomem, &grlib_gptimer_ops, unit, "gptimer", UNIT_REG_SIZE + GPTIMER_REG_SIZE * unit->nr_timers); sysbus_init_mmio(dev, &unit->iomem); return 0; }
static void iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { struct IscsiTask *iTask = opaque; struct scsi_task *task = command_data; iTask->complete = 1; iTask->status = status; iTask->do_retry = 0; iTask->task = task; if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) { iTask->do_retry = 1; goto out; } if (status != SCSI_STATUS_GOOD) { error_report("iSCSI: Failure. %s", iscsi_get_error(iscsi)); } out: if (iTask->co) { iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask); qemu_bh_schedule(iTask->bh); } }
int qemu_init_main_loop(Error **errp) { int ret; GSource *src; Error *local_error = NULL; init_clocks(qemu_timer_notify_cb); ret = qemu_signal_init(errp); if (ret) { return ret; } qemu_aio_context = aio_context_new(&local_error); if (!qemu_aio_context) { error_propagate(errp, local_error); return -EMFILE; } qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL); gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); src = aio_get_g_source(qemu_aio_context); g_source_set_name(src, "aio-context"); g_source_attach(src, NULL); g_source_unref(src); src = iohandler_get_g_source(); g_source_set_name(src, "io-handler"); g_source_attach(src, NULL); g_source_unref(src); return 0; }
static BlockDriverAIOCB *raw_aio_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { RawAIOCB *acb; /* * If O_DIRECT is used and the buffer is not aligned fall back * to synchronous IO. */ BDRVRawState *s = bs->opaque; if (unlikely(s->aligned_buf != NULL && ((uintptr_t) buf % 512))) { QEMUBH *bh; acb = qemu_aio_get(bs, cb, opaque); acb->ret = raw_pwrite(bs, 512 * sector_num, buf, 512 * nb_sectors); bh = qemu_bh_new(raw_aio_em_cb, acb); qemu_bh_schedule(bh); return &acb->common; } acb = raw_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque); if (!acb) return NULL; if (qemu_paio_write(&acb->aiocb) < 0) { raw_aio_remove(acb); return NULL; } return &acb->common; }
static void continue_after_map_failure(void *opaque) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; dbs->bh = qemu_bh_new(reschedule_dma, dbs); qemu_bh_schedule(dbs->bh); }
static void iscsi_schedule_bh(IscsiAIOCB *acb) { if (acb->bh) { return; } acb->bh = qemu_bh_new(iscsi_bh_cb, acb); qemu_bh_schedule(acb->bh); }
/* * This is the callback function for rbd_aio_read and _write * * Note: this function is being called from a non qemu thread so * we need to be careful about what we do here. Generally we only * schedule a BH, and do the rest of the io completion handling * from rbd_finish_bh() which runs in a qemu context. */ static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb) { RBDAIOCB *acb = rcb->acb; rcb->ret = rbd_aio_get_return_value(c); rbd_aio_release(c); acb->bh = qemu_bh_new(rbd_finish_bh, rcb); qemu_bh_schedule(acb->bh); }
void failover_request_active(Error **errp) { if (failover_set_state(FAILOVER_STATUS_NONE, FAILOVER_STATUS_REQUIRE) != FAILOVER_STATUS_NONE) { error_setg(errp, "COLO failover is already actived"); return; } failover_bh = qemu_bh_new(colo_failover_bh, NULL); qemu_bh_schedule(failover_bh); }
static int etraxfs_timer_init(SysBusDevice *dev) { struct etrax_timer *t = FROM_SYSBUS(typeof (*t), dev); t->bh_t0 = qemu_bh_new(timer0_hit, t); t->bh_t1 = qemu_bh_new(timer1_hit, t); t->bh_wd = qemu_bh_new(watchdog_hit, t); t->ptimer_t0 = ptimer_init(t->bh_t0); t->ptimer_t1 = ptimer_init(t->bh_t1); t->ptimer_wd = ptimer_init(t->bh_wd); sysbus_init_irq(dev, &t->irq); sysbus_init_irq(dev, &t->nmi); memory_region_init_io(&t->mmio, &timer_ops, t, "etraxfs-timer", 0x5c); sysbus_init_mmio(dev, &t->mmio); qemu_register_reset(etraxfs_timer_reset, t); return 0; }
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = to_virtio_blk(vdev); VirtIOBlockReq *req; MultiReqBuffer mrb = { .num_writes = 0, }; while ((req = virtio_blk_get_request(s))) { virtio_blk_handle_request(req, &mrb); } virtio_submit_multiwrite(s->bs, &mrb); /* * FIXME: Want to check for completions before returning to guest mode, * so cached reads and writes are reported as quickly as possible. But * that should be done in the generic block layer. */ } static void virtio_blk_dma_restart_bh(void *opaque) { VirtIOBlock *s = opaque; VirtIOBlockReq *req = s->rq; MultiReqBuffer mrb = { .num_writes = 0, }; qemu_bh_delete(s->bh); s->bh = NULL; s->rq = NULL; while (req) { virtio_blk_handle_request(req, &mrb); req = req->next; } virtio_submit_multiwrite(s->bs, &mrb); } static void virtio_blk_dma_restart_cb(void *opaque, int running, RunState state) { VirtIOBlock *s = opaque; if (!running) return; if (!s->bh) { s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); qemu_bh_schedule(s->bh); } }
static void blk_alloc(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); QLIST_INIT(&blkdev->inflight); QLIST_INIT(&blkdev->finished); QLIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); if (xen_mode != XEN_EMULATE) batch_maps = 1; }
static int marin_timer_init(SysBusDevice *dev) { MarinTimerState *s = MARIN_TIMER(dev); sysbus_init_irq(dev, &s->irq); s->bh = qemu_bh_new(marin_timer_tick, s); s->ptimer = ptimer_init(s->bh); ptimer_set_freq(s->ptimer, 50 * 1000 * 1000); return 0; }
static int milkymist_sysctl_init(SysBusDevice *dev) { MilkymistSysctlState *s = FROM_SYSBUS(typeof(*s), dev); sysbus_init_irq(dev, &s->gpio_irq); sysbus_init_irq(dev, &s->timer0_irq); sysbus_init_irq(dev, &s->timer1_irq); s->bh0 = qemu_bh_new(timer0_hit, s); s->bh1 = qemu_bh_new(timer1_hit, s); s->ptimer0 = ptimer_init(s->bh0); s->ptimer1 = ptimer_init(s->bh1); ptimer_set_freq(s->ptimer0, s->freq_hz); ptimer_set_freq(s->ptimer1, s->freq_hz); memory_region_init_io(&s->regs_region, &sysctl_mmio_ops, s, "milkymist-sysctl", R_MAX * 4); sysbus_init_mmio(dev, &s->regs_region); return 0; }
static int iscsi_schedule_bh(QEMUBHFunc *cb, IscsiAIOCB *acb) { acb->bh = qemu_bh_new(cb, acb); if (!acb->bh) { error_report("oom: could not create iscsi bh"); return -EIO; } qemu_bh_schedule(acb->bh); return 0; }
static void etsec_realize(DeviceState *dev, Error **errp) { eTSEC *etsec = ETSEC_COMMON(dev); etsec->nic = qemu_new_nic(&net_etsec_info, &etsec->conf, object_get_typename(OBJECT(dev)), dev->id, etsec); qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a); etsec->bh = qemu_bh_new(etsec_timer_hit, etsec); etsec->ptimer = ptimer_init(etsec->bh, PTIMER_POLICY_DEFAULT); ptimer_set_freq(etsec->ptimer, 100); }
void block_job_defer_to_main_loop(BlockJob *job, BlockJobDeferToMainLoopFn *fn, void *opaque) { BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); data->job = job; data->bh = qemu_bh_new(block_job_defer_to_main_loop_bh, data); data->aio_context = bdrv_get_aio_context(job->bs); data->fn = fn; data->opaque = opaque; qemu_bh_schedule(data->bh); }
static int etraxfs_timer_init(SysBusDevice *dev) { struct etrax_timer *t = FROM_SYSBUS(typeof (*t), dev); int timer_regs; t->bh_t0 = qemu_bh_new(timer0_hit, t); t->bh_t1 = qemu_bh_new(timer1_hit, t); t->bh_wd = qemu_bh_new(watchdog_hit, t); t->ptimer_t0 = ptimer_init(t->bh_t0); t->ptimer_t1 = ptimer_init(t->bh_t1); t->ptimer_wd = ptimer_init(t->bh_wd); sysbus_init_irq(dev, &t->irq); sysbus_init_irq(dev, &t->nmi); timer_regs = cpu_register_io_memory(timer_read, timer_write, t, DEVICE_NATIVE_ENDIAN); sysbus_init_mmio(dev, 0x5c, timer_regs); qemu_register_reset(etraxfs_timer_reset, t); return 0; }
static void imx_gpt_realize(DeviceState *dev, Error **errp) { IMXGPTState *s = IMX_GPT(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); QEMUBH *bh; sysbus_init_irq(sbd, &s->irq); memory_region_init_io(&s->iomem, OBJECT(s), &imx_gpt_ops, s, TYPE_IMX_GPT, 0x00001000); sysbus_init_mmio(sbd, &s->iomem); bh = qemu_bh_new(imx_gpt_timeout, s); s->timer = ptimer_init(bh); }
static arm_timer_state *arm_timer_init(uint32_t freq) { arm_timer_state *s; QEMUBH *bh; s = (arm_timer_state *)g_malloc0(sizeof(arm_timer_state)); s->freq = freq; s->control = TIMER_CTRL_IE; bh = qemu_bh_new(arm_timer_tick, s); s->timer = ptimer_init(bh); vmstate_register(NULL, -1, &vmstate_arm_timer, s); return s; }
static arm_timer_state *arm_timer_init(uint32_t freq) { arm_timer_state *s; QEMUBH *bh; s = (arm_timer_state *)qemu_mallocz(sizeof(arm_timer_state)); s->freq = freq; s->control = TIMER_CTRL_IE; bh = qemu_bh_new(arm_timer_tick, s); s->timer = ptimer_init(bh); register_savevm("arm_timer", -1, 1, arm_timer_save, arm_timer_load, s); return s; }
static int lm32_timer_init(SysBusDevice *dev) { LM32TimerState *s = FROM_SYSBUS(typeof(*s), dev); sysbus_init_irq(dev, &s->irq); s->bh = qemu_bh_new(timer_hit, s); s->ptimer = ptimer_init(s->bh); ptimer_set_freq(s->ptimer, s->freq_hz); memory_region_init_io(&s->iomem, &timer_ops, s, "timer", R_MAX * 4); sysbus_init_mmio(dev, &s->iomem); return 0; }
/* * AIO callback routine called from GlusterFS thread. */ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) { GlusterAIOCB *acb = (GlusterAIOCB *)arg; if (!ret || ret == acb->size) { acb->ret = 0; /* Success */ } else if (ret < 0) { acb->ret = ret; /* Read/Write failed */ } else { acb->ret = -EIO; /* Partial read/write - fail it */ } acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb); qemu_bh_schedule(acb->bh); }
void *etraxfs_dmac_init(target_phys_addr_t base, int nr_channels) { struct fs_dma_ctrl *ctrl = NULL; ctrl = g_malloc0(sizeof *ctrl); ctrl->bh = qemu_bh_new(DMA_run, ctrl); ctrl->nr_channels = nr_channels; ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); ctrl->map = cpu_register_io_memory(dma_read, dma_write, ctrl, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(base, nr_channels * 0x2000, ctrl->map); return ctrl; }
static void blk_alloc(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); LIST_INIT(&blkdev->inflight); LIST_INIT(&blkdev->finished); LIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); if (xen_mode != XEN_EMULATE) batch_maps = 1; if (xc_gnttab_set_max_grants(xendev->gnttabdev, MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n", strerror(errno)); } }
static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { CURLAIOCB *acb; acb = qemu_aio_get(&curl_aiocb_info, bs, cb, opaque); acb->qiov = qiov; acb->sector_num = sector_num; acb->nb_sectors = nb_sectors; acb->bh = qemu_bh_new(curl_readv_bh_cb, acb); qemu_bh_schedule(acb->bh); return &acb->common; }