static void *comp_handler_thread(void *arg) { RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; int rc; struct ibv_cq *ev_cq; void *ev_ctx; int flags; GPollFD pfds[1]; /* Change to non-blocking mode */ flags = fcntl(backend_dev->channel->fd, F_GETFL); rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { rdma_error_report("Failed to change backend channel FD to non-blocking"); return NULL; } pfds[0].fd = backend_dev->channel->fd; pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; backend_dev->comp_thread.is_running = true; while (backend_dev->comp_thread.run) { do { rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); if (!rc) { backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; } } while (!rc && backend_dev->comp_thread.run); if (backend_dev->comp_thread.run) { rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); if (unlikely(rc)) { rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, errno); continue; } rc = ibv_req_notify_cq(ev_cq, 0); if (unlikely(rc)) { rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); } backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); ibv_ack_cq_events(ev_cq, 1); } } backend_dev->comp_thread.is_running = false; qemu_thread_exit(0); return NULL; }
static int os_host_main_loop_wait(int64_t timeout) { int ret; static int spin_counter; glib_pollfds_fill(&timeout); /* If the I/O thread is very busy or we are incorrectly busy waiting in * the I/O thread, this can lead to starvation of the BQL such that the * VCPU threads never run. To make sure we can detect the later case, * print a message to the screen. If we run into this condition, create * a fake timeout in order to give the VCPU threads a chance to run. */ if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) { static bool notified; if (!notified) { fprintf(stderr, "main-loop: WARNING: I/O thread spun for %d iterations\n", MAX_MAIN_LOOP_SPIN); notified = true; } timeout = SCALE_MS; } if (timeout) { spin_counter = 0; qemu_mutex_unlock_iothread(); } else { spin_counter++; } ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); if (timeout) { qemu_mutex_lock_iothread(); } glib_pollfds_poll(); return ret; }
static int os_host_main_loop_wait(int64_t timeout) { GMainContext *context = g_main_context_default(); int ret; g_main_context_acquire(context); glib_pollfds_fill(&timeout); qemu_mutex_unlock_iothread(); replay_mutex_unlock(); ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); replay_mutex_lock(); qemu_mutex_lock_iothread(); glib_pollfds_poll(); g_main_context_release(context); return ret; }
static int os_host_main_loop_wait(int64_t timeout) { GMainContext *context = g_main_context_default(); GPollFD poll_fds[1024 * 2]; /* this is probably overkill */ int select_ret = 0; int g_poll_ret, ret, i, n_poll_fds; PollingEntry *pe; WaitObjects *w = &wait_objects; gint poll_timeout; int64_t poll_timeout_ns; static struct timeval tv0; fd_set rfds, wfds, xfds; int nfds; /* XXX: need to suppress polling by better using win32 events */ ret = 0; for (pe = first_polling_entry; pe != NULL; pe = pe->next) { ret |= pe->func(pe->opaque); } if (ret != 0) { return ret; } FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&xfds); nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds); if (nfds >= 0) { select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); if (select_ret != 0) { timeout = 0; } if (select_ret > 0) { pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds); } } g_main_context_prepare(context, &max_priority); n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout, poll_fds, ARRAY_SIZE(poll_fds)); g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds)); for (i = 0; i < w->num; i++) { poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i]; poll_fds[n_poll_fds + i].events = G_IO_IN; } if (poll_timeout < 0) { poll_timeout_ns = -1; } else { poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS; } poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); qemu_mutex_unlock_iothread(); g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns); qemu_mutex_lock_iothread(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; } for (i = 0; i < w->num; i++) { if (w->revents[i] && w->func[i]) { w->func[i](w->opaque[i]); } } } if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) { g_main_context_dispatch(context); } return select_ret || g_poll_ret; }