static void test_flush(void) { g_assert(!aio_poll(ctx, false)); aio_notify(ctx); aio_flush(ctx); g_assert(!aio_poll(ctx, false)); }
void qemu_notify_event(void) { if (!qemu_aio_context) { return; } aio_notify(qemu_aio_context); }
static void test_notify(void) { g_assert(!aio_poll(ctx, false)); aio_notify(ctx); g_assert(!aio_poll(ctx, true)); g_assert(!aio_poll(ctx, false)); }
void qemu_bh_schedule(QEMUBH *bh) { if (bh->scheduled) return; bh->scheduled = 1; bh->idle = 0; aio_notify(bh->ctx); }
void qemu_bh_schedule(QEMUBH *bh) { if (bh->scheduled) return; bh->idle = 0; /* Make sure that idle & any writes needed by the callback are done * before the locations are read in the aio_bh_poll. */ smp_wmb(); bh->scheduled = 1; aio_notify(bh->ctx); }
static int iothread_stop(Object *object, void *opaque) { IOThread *iothread; iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); if (!iothread || !iothread->ctx) { return 0; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); return 0; }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); if (!iothread->ctx) { return; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); aio_context_unref(iothread->ctx); }
static gpointer iothread_g_main_context_init(gpointer opaque) { AioContext *ctx; IOThread *iothread = opaque; GSource *source; iothread->worker_context = g_main_context_new(); ctx = iothread_get_aio_context(iothread); source = aio_get_g_source(ctx); g_source_attach(source, iothread->worker_context); g_source_unref(source); aio_notify(iothread->ctx); return NULL; }
void qemu_bh_schedule(QEMUBH *bh) { AioContext *ctx; if (bh->scheduled) return; ctx = bh->ctx; bh->idle = 0; /* Make sure that: * 1. idle & any writes needed by the callback are done before the * locations are read in the aio_bh_poll. * 2. ctx is loaded before scheduled is set and the callback has a chance * to execute. */ smp_mb(); bh->scheduled = 1; aio_notify(ctx); }
static void aio_rfifolock_cb(void *opaque) { /* Kick owner thread in case they are blocked in aio_poll() */ aio_notify(opaque); }
static void aio_timerlist_notify(void *opaque) { aio_notify(opaque); }