static void nbd_read(void *opaque) { NBDClient *client = opaque; if (client->recv_coroutine) { qemu_coroutine_enter(client->recv_coroutine, NULL); } else { qemu_coroutine_enter(qemu_coroutine_create(nbd_trip), client); } }
static void test_co_queue(void) { Coroutine *c1; Coroutine *c2; c1 = qemu_coroutine_create(c1_fn); c2 = qemu_coroutine_create(c2_fn); qemu_coroutine_enter(c1, c2); memset(c1, 0xff, sizeof(Coroutine)); qemu_coroutine_enter(c2, NULL); }
static void coroutine_fn verify_entered_step_1(void *opaque) { Coroutine *self = qemu_coroutine_self(); Coroutine *coroutine; g_assert(qemu_coroutine_entered(self)); coroutine = qemu_coroutine_create(verify_entered_step_2, self); g_assert(!qemu_coroutine_entered(coroutine)); qemu_coroutine_enter(coroutine); g_assert(!qemu_coroutine_entered(coroutine)); qemu_coroutine_enter(coroutine); }
static void test_self(void) { Coroutine *coroutine; coroutine = qemu_coroutine_create(verify_self); qemu_coroutine_enter(coroutine, coroutine); }
static int do_co_write_zeroes(int64_t offset, int count, int *total) { Coroutine *co; CoWriteZeroes data = { .offset = offset, .count = count, .total = total, .done = false, }; co = qemu_coroutine_create(co_write_zeroes_entry); qemu_coroutine_enter(co, &data); while (!data.done) { qemu_aio_wait(); } if (data.ret < 0) { return data.ret; } else { return 1; } } static int do_write_compressed(char *buf, int64_t offset, int count, int *total) { int ret; ret = bdrv_write_compressed(bs, offset >> 9, (uint8_t *)buf, count >> 9); if (ret < 0) { return ret; } *total = count; return 1; }
int simple_bus_fdt_init(char *bus_node_path, FDTMachineInfo *fdti, void *unused) { int i; int num_children = qemu_devtree_get_num_children(fdti->fdt, bus_node_path, 1); char **children = qemu_devtree_get_children(fdti->fdt, bus_node_path, 1); int initialRoutinesPending = fdti->routinesPending; DB_PRINT("num child devices: %d\n", num_children); for (i = 0; i < num_children; i++) { struct FDTInitNodeArgs *init_args = g_malloc0(sizeof(*init_args)); init_args->node_path = children[i]; init_args->fdti = fdti; fdti->routinesPending++; qemu_coroutine_enter(qemu_coroutine_create(fdt_init_node), init_args); } if (fdti->routinesPending != initialRoutinesPending) { bdrv_drain_all(); } g_free(children); return 0; }
static void test_lifecycle(void) { Coroutine *coroutine; bool done = false; /* Create, enter, and return from coroutine */ coroutine = qemu_coroutine_create(set_and_exit); qemu_coroutine_enter(coroutine, &done); g_assert(done); /* expect done to be true (first time) */ /* Repeat to check that no state affects this test */ done = false; coroutine = qemu_coroutine_create(set_and_exit); qemu_coroutine_enter(coroutine, &done); g_assert(done); /* expect done to be true (second time) */ }
/* Create a block job that completes with a given return code after a given * number of event loop iterations. The return code is stored in the given * result pointer. * * The event loop iterations can either be handled automatically with a 0 delay * timer, or they can be stepped manually by entering the coroutine. */ static BlockJob *test_block_job_start(unsigned int iterations, bool use_timer, int rc, int *result) { BlockDriverState *bs; TestBlockJob *s; TestBlockJobCBData *data; static unsigned counter; char job_id[24]; data = g_new0(TestBlockJobCBData, 1); bs = bdrv_new(); snprintf(job_id, sizeof(job_id), "job%u", counter++); s = block_job_create(job_id, &test_block_job_driver, bs, 0, BLOCK_JOB_DEFAULT, test_block_job_cb, data, &error_abort); s->iterations = iterations; s->use_timer = use_timer; s->rc = rc; s->result = result; s->common.co = qemu_coroutine_create(test_block_job_run, s); data->job = s; data->result = result; qemu_coroutine_enter(s->common.co); return &s->common; }
void block_job_enter(BlockJob *job) { block_job_iostatus_reset(job); if (job->co && !job->busy) { qemu_coroutine_enter(job->co, NULL); } }
static void mirror_iteration_done(MirrorOp *op, int ret) { MirrorBlockJob *s = op->s; struct iovec *iov; int64_t chunk_num; int i, nb_chunks, sectors_per_chunk; trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); s->in_flight--; iov = op->qiov.iov; for (i = 0; i < op->qiov.niov; i++) { MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); s->buf_free_count++; } sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; chunk_num = op->sector_num / sectors_per_chunk; nb_chunks = op->nb_sectors / sectors_per_chunk; bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); if (s->cow_bitmap && ret >= 0) { bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); } g_slice_free(MirrorOp, op); qemu_coroutine_enter(s->common.co, NULL); }
static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; qemu_bh_delete(iTask->bh); qemu_coroutine_enter(iTask->co, NULL); }
static void test_nesting(void) { Coroutine *root; NestData nd = { .n_enter = 0, .n_return = 0, .max = 128, }; root = qemu_coroutine_create(nest); qemu_coroutine_enter(root, &nd); /* Must enter and return from max nesting level */ g_assert_cmpint(nd.n_enter, ==, nd.max); g_assert_cmpint(nd.n_return, ==, nd.max); } /* * Check that yield/enter transfer control correctly */ static void coroutine_fn yield_5_times(void *opaque) { bool *done = opaque; int i; for (i = 0; i < 5; i++) { qemu_coroutine_yield(); } *done = true; }
static int do_co_write_zeroes(int64_t offset, int count, int *total) { Coroutine *co; CoWriteZeroes data = { .offset = offset, .count = count, .total = total, .done = false, }; co = qemu_coroutine_create(co_write_zeroes_entry); qemu_coroutine_enter(co, &data); while (!data.done) { qemu_aio_wait(); } if (data.ret < 0) { return data.ret; } else { return 1; } } static int do_load_vmstate(char *buf, int64_t offset, int count, int *total) { *total = bdrv_load_vmstate(bs, (uint8_t *)buf, offset, count); if (*total < 0) { return *total; } return 1; }
static void iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { struct IscsiTask *iTask = opaque; struct scsi_task *task = command_data; iTask->complete = 1; iTask->status = status; iTask->do_retry = 0; iTask->task = task; if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) { iTask->do_retry = 1; goto out; } if (status != SCSI_STATUS_GOOD) { error_report("iSCSI: Failure. %s", iscsi_get_error(iscsi)); } out: if (iTask->co) { qemu_coroutine_enter(iTask->co, NULL); } }
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, int64_t granularity, int64_t buf_size, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockDriverCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; if (granularity == 0) { /* Choose the default granularity based on the target file's cluster * size, clamped between 4k and 64k. */ BlockDriverInfo bdi; if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { granularity = MAX(4096, bdi.cluster_size); granularity = MIN(65536, granularity); } else { granularity = 65536; } } assert ((granularity & (granularity - 1)) == 0); if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && !bdrv_iostatus_is_enabled(bs)) { error_set(errp, QERR_INVALID_PARAMETER, "on-source-error"); return; } s = block_job_create(driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->target = target; s->is_none_mode = is_none_mode; s->base = base; s->granularity = granularity; s->buf_size = MAX(buf_size, granularity); s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp); if (!s->dirty_bitmap) { return; } bdrv_set_enable_write_cache(s->target, true); bdrv_set_on_error(s->target, on_target_error, on_target_error); bdrv_iostatus_enable(s->target); s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }
static void qemu_gluster_complete_aio(void *opaque) { GlusterAIOCB *acb = (GlusterAIOCB *)opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_coroutine_enter(acb->coroutine, NULL); }
static void iscsi_retry_timer_expired(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; if (iTask->co) { qemu_coroutine_enter(iTask->co, NULL); } }
static void test_entered(void) { Coroutine *coroutine; coroutine = qemu_coroutine_create(verify_entered_step_1, NULL); g_assert(!qemu_coroutine_entered(coroutine)); qemu_coroutine_enter(coroutine); }
static void restart_coroutine(void *opaque) { Coroutine *co = opaque; DPRINTF("co=%p", co); qemu_coroutine_enter(co, NULL); }
static gboolean qio_channel_yield_enter(QIOChannel *ioc, GIOCondition condition, gpointer opaque) { QIOChannelYieldData *data = opaque; qemu_coroutine_enter(data->co); return FALSE; }
void block_job_resume(BlockJob *job) { job->paused = false; block_job_iostatus_reset(job); if (job->co && !job->busy) { qemu_coroutine_enter(job->co, NULL); } }
static void test_in_coroutine(void) { Coroutine *coroutine; g_assert(!qemu_in_coroutine()); coroutine = qemu_coroutine_create(verify_in_coroutine); qemu_coroutine_enter(coroutine, NULL); }
void process_incoming_migration(QEMUFile *f) { Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); int fd = qemu_get_fd(f); assert(fd != -1); socket_set_nonblock(fd); qemu_coroutine_enter(co, f); }
/** * qemu_co_queue_run_restart: * * Enter each coroutine that was previously marked for restart by * qemu_co_queue_next() or qemu_co_queue_restart_all(). This function is * invoked by the core coroutine code when the current coroutine yields or * terminates. */ void qemu_co_queue_run_restart(Coroutine *co) { Coroutine *next; trace_qemu_co_queue_run_restart(co); while ((next = QSIMPLEQ_FIRST(&co->co_queue_wakeup))) { QSIMPLEQ_REMOVE_HEAD(&co->co_queue_wakeup, co_queue_next); qemu_coroutine_enter(next); } }
static void test_no_dangling_access(void) { Coroutine *c1; Coroutine *c2; Coroutine tmp; c2 = qemu_coroutine_create(c2_fn, NULL); c1 = qemu_coroutine_create(c1_fn, c2); qemu_coroutine_enter(c1); /* c1 shouldn't be used any more now; make sure we segfault if it is */ tmp = *c1; memset(c1, 0xff, sizeof(Coroutine)); qemu_coroutine_enter(c2); /* Must restore the coroutine now to avoid corrupted pool */ *c1 = tmp; }
static void nbd_recv_coroutines_enter_all(NbdClientSession *s) { int i; for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->recv_coroutine[i]) { qemu_coroutine_enter(s->recv_coroutine[i], NULL); } } }
static void v9fs_thread_routine(gpointer data, gpointer user_data) { Coroutine *co = data; qemu_coroutine_enter(co, NULL); g_async_queue_push(v9fs_pool.completed, co); event_notifier_set(&v9fs_pool.e); }
static void v9fs_qemu_process_req_done(EventNotifier *e) { Coroutine *co; event_notifier_test_and_clear(e); while ((co = g_async_queue_try_pop(v9fs_pool.completed)) != NULL) { qemu_coroutine_enter(co, NULL); } }
static void nbd_reply_ready(void *opaque) { BDRVNBDState *s = opaque; uint64_t i; int ret; if (s->reply.handle == 0) { /* No reply already in flight. Fetch a header. It is possible * that another thread has done the same thing in parallel, so * the socket is not readable anymore. */ ret = nbd_receive_reply(s->sock, &s->reply); if (ret == -EAGAIN) { return; } if (ret < 0) { s->reply.handle = 0; goto fail; } } /* There's no need for a mutex on the receive side, because the * handler acts as a synchronization point and ensures that only * one coroutine is called until the reply finishes. */ i = HANDLE_TO_INDEX(s, s->reply.handle); if (i >= MAX_NBD_REQUESTS) { goto fail; } if (s->recv_coroutine[i]) { qemu_coroutine_enter(s->recv_coroutine[i], NULL); return; } fail: for (i = 0; i < MAX_NBD_REQUESTS; i++) { if (s->recv_coroutine[i]) { qemu_coroutine_enter(s->recv_coroutine[i], NULL); } } }
static void secondary_vm_do_failover(void) { int old_state; MigrationIncomingState *mis = migration_incoming_get_current(); /* Can not do failover during the process of VM's loading VMstate, Or * it will break the secondary VM. */ if (vmstate_loading) { old_state = failover_set_state(FAILOVER_STATUS_ACTIVE, FAILOVER_STATUS_RELAUNCH); if (old_state != FAILOVER_STATUS_ACTIVE) { error_report("Unknown error while do failover for secondary VM," "old_state: %s", FailoverStatus_lookup[old_state]); } return; } migrate_set_state(&mis->state, MIGRATION_STATUS_COLO, MIGRATION_STATUS_COMPLETED); if (!autostart) { error_report("\"-S\" qemu option will be ignored in secondary side"); /* recover runstate to normal migration finish state */ autostart = true; } /* * Make sure COLO incoming thread not block in recv or send, * If mis->from_src_file and mis->to_src_file use the same fd, * The second shutdown() will return -1, we ignore this value, * It is harmless. */ if (mis->from_src_file) { qemu_file_shutdown(mis->from_src_file); } if (mis->to_src_file) { qemu_file_shutdown(mis->to_src_file); } old_state = failover_set_state(FAILOVER_STATUS_ACTIVE, FAILOVER_STATUS_COMPLETED); if (old_state != FAILOVER_STATUS_ACTIVE) { error_report("Incorrect state (%s) while doing failover for " "secondary VM", FailoverStatus_lookup[old_state]); return; } /* Notify COLO incoming thread that failover work is finished */ qemu_sem_post(&mis->colo_incoming_sem); /* For Secondary VM, jump to incoming co */ if (mis->migration_incoming_co) { qemu_coroutine_enter(mis->migration_incoming_co); } }