static void * thread_func(void * ptr) { pl061bbv_state * s = (pl061bbv_state *)ptr; socklen_t client_addr_len; if (s->sock < 0) { printf("%s: invalid socket\n", __PRETTY_FUNCTION__); qemu_thread_exit(NULL); return NULL; } printf("%s: thread started\n", __PRETTY_FUNCTION__); for (;;) { memset(&s->client_addr, 0, sizeof(struct sockaddr_in)); client_addr_len = sizeof(struct sockaddr_in); s->client_sock = accept(s->sock, (struct sockaddr *)&s->client_addr, &client_addr_len); if (s->client_sock < 0) continue; printf("%s: connection established\n", __PRETTY_FUNCTION__); process_client(s); close(s->client_sock); s->client_sock = -1; printf("%s: connection closed\n", __PRETTY_FUNCTION__); } qemu_thread_exit(NULL); return NULL; }
static void *comp_handler_thread(void *arg) { RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; int rc; struct ibv_cq *ev_cq; void *ev_ctx; int flags; GPollFD pfds[1]; /* Change to non-blocking mode */ flags = fcntl(backend_dev->channel->fd, F_GETFL); rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { rdma_error_report("Failed to change backend channel FD to non-blocking"); return NULL; } pfds[0].fd = backend_dev->channel->fd; pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; backend_dev->comp_thread.is_running = true; while (backend_dev->comp_thread.run) { do { rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); if (!rc) { backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; } } while (!rc && backend_dev->comp_thread.run); if (backend_dev->comp_thread.run) { rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); if (unlikely(rc)) { rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, errno); continue; } rc = ibv_req_notify_cq(ev_cq, 0); if (unlikely(rc)) { rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); } backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); ibv_ack_cq_events(ev_cq, 1); } } backend_dev->comp_thread.is_running = false; qemu_thread_exit(0); return NULL; }
static unsigned __stdcall win32_start_routine(void *arg) { QemuThreadData *data = (QemuThreadData *) arg; void *(*start_routine)(void *) = data->start_routine; void *thread_arg = data->arg; qemu_thread_data = data; qemu_thread_exit(start_routine(thread_arg)); abort(); }
static unsigned __stdcall win32_start_routine(void *arg) { QemuThreadData *data = (QemuThreadData *) arg; void *(*start_routine)(void *) = data->start_routine; void *thread_arg = data->arg; if (data->mode == QEMU_THREAD_DETACHED) { g_free(data); data = NULL; } qemu_thread_data = data; qemu_thread_exit(start_routine(thread_arg)); abort(); }
static unsigned __stdcall win32_start_routine(void *arg) { struct QemuThreadData data = *(struct QemuThreadData *) arg; QemuThread *thread = data.thread; free(arg); TlsSetValue(qemu_thread_tls_index, thread); /* * Use DuplicateHandle instead of assigning thread->thread in the * creating thread to avoid races. It's simpler this way than with * synchronization. */ DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(), &thread->thread, 0, FALSE, DUPLICATE_SAME_ACCESS); qemu_thread_exit(data.start_routine(data.arg)); abort(); }
static void xseg_request_handler(void *state) { BDRVArchipelagoState *s = (BDRVArchipelagoState *) state; void *psd = xseg_get_signal_desc(s->xseg, s->port); qemu_mutex_lock(&s->request_mutex); while (!s->stopping) { struct xseg_request *req; void *data; xseg_prepare_wait(s->xseg, s->srcport); req = xseg_receive(s->xseg, s->srcport, X_NONBLOCK); if (req) { AIORequestData *reqdata; ArchipelagoSegmentedRequest *segreq; xseg_get_req_data(s->xseg, req, (void **)&reqdata); switch (reqdata->op) { case ARCHIP_OP_READ: data = xseg_get_data(s->xseg, req); segreq = reqdata->segreq; segreq->count += req->serviced; qemu_iovec_from_buf(reqdata->aio_cb->qiov, reqdata->bufidx, data, req->serviced); xseg_put_request(s->xseg, req, s->srcport); if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) { if (!segreq->failed) { reqdata->aio_cb->ret = segreq->count; archipelago_finish_aiocb(reqdata); g_free(segreq); } else { g_free(segreq); g_free(reqdata); } } else { g_free(reqdata); } break; case ARCHIP_OP_WRITE: case ARCHIP_OP_FLUSH: segreq = reqdata->segreq; segreq->count += req->serviced; xseg_put_request(s->xseg, req, s->srcport); if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) { if (!segreq->failed) { reqdata->aio_cb->ret = segreq->count; archipelago_finish_aiocb(reqdata); g_free(segreq); } else { g_free(segreq); g_free(reqdata); } } else { g_free(reqdata); } break; case ARCHIP_OP_VOLINFO: s->is_signaled = true; qemu_cond_signal(&s->archip_cond); break; } } else { xseg_wait_signal(s->xseg, psd, 100000UL); } xseg_cancel_wait(s->xseg, s->srcport); } s->th_is_signaled = true; qemu_cond_signal(&s->request_cond); qemu_mutex_unlock(&s->request_mutex); qemu_thread_exit(NULL); }