void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread) { int n, i; int max_chunk; max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024); while (ck_pr_load_int(&_ph_run_loop)) { n = kevent(emitter->io_fd, emitter->kqset.events, emitter->kqset.used, emitter->kqset.events, MIN(emitter->kqset.size, max_chunk), NULL); if (n < 0 && errno != EINTR) { ph_panic("kevent: `Pe%d", errno); } if (n <= 0) { continue; } ph_thread_epoch_begin(); for (i = 0; i < n; i++) { dispatch_kevent(emitter, thread, &emitter->kqset.events[i]); } if (n + 1 >= emitter->kqset.size) { grow_kq_set(&emitter->kqset); } emitter->kqset.used = 0; if (ph_job_have_deferred_items(thread)) { ph_job_pool_apply_deferred_items(thread); } ph_thread_epoch_end(); ph_thread_epoch_poll(); } dispose_kq_set(&emitter->kqset); }
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread) { port_event_t *event; uint_t n, i, max_chunk, max_sleep; ph_job_t *job; ph_iomask_t mask; struct timespec ts; max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024); max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000); ts.tv_sec = max_sleep / 1000; ts.tv_nsec = (max_sleep - (ts.tv_sec * 1000)) * 1000000; event = malloc(max_chunk * sizeof(port_event_t)); while (ck_pr_load_int(&_ph_run_loop)) { n = 1; memset(event, 0, sizeof(*event)); if (port_getn(emitter->io_fd, event, max_chunk, &n, &ts)) { if (errno != EINTR && errno != ETIME) { ph_panic("port_getn: `Pe%d", errno); } n = 0; } if (!n) { ph_thread_epoch_poll(); continue; } for (i = 0; i < n; i++) { ph_thread_epoch_begin(); switch (event[i].portev_source) { case PORT_SOURCE_TIMER: gettimeofday(&thread->now, NULL); thread->refresh_time = false; ph_nbio_emitter_timer_tick(emitter); break; case PORT_SOURCE_USER: break; case PORT_SOURCE_FD: thread->refresh_time = true; job = event[i].portev_user; switch (event[i].portev_events & (POLLIN|POLLOUT|POLLERR|POLLHUP)) { case POLLIN: mask = PH_IOMASK_READ; break; case POLLOUT: mask = PH_IOMASK_WRITE; break; case POLLIN|POLLOUT: mask = PH_IOMASK_READ|PH_IOMASK_WRITE; break; default: mask = PH_IOMASK_ERR; } job->kmask = 0; ph_nbio_emitter_dispatch_immediate(emitter, job, mask); break; } if (ph_job_have_deferred_items(thread)) { ph_job_pool_apply_deferred_items(thread); } ph_thread_epoch_end(); ph_thread_epoch_poll(); } } free(event); }
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread) { struct epoll_event *event; int n, i; int max_chunk, max_sleep; max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024); max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000); event = malloc(max_chunk * sizeof(struct epoll_event)); while (ck_pr_load_int(&_ph_run_loop)) { n = epoll_wait(emitter->io_fd, event, max_chunk, max_sleep); thread->refresh_time = true; if (n < 0) { if (errno != EINTR) { ph_log(PH_LOG_ERR, "epoll_wait: `Pe%d", errno); } ph_job_collector_emitter_call(emitter); ph_thread_epoch_poll(); continue; } if (n == 0) { continue; } ph_thread_epoch_begin(); for (i = 0; i < n; i++) { ph_iomask_t mask = 0; ph_job_t *job = event[i].data.ptr; if (job->mask == 0) { // Ignore: disabled for now continue; } switch (event[i].events & (EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) { case EPOLLIN: mask = PH_IOMASK_READ; break; case EPOLLOUT: mask = PH_IOMASK_WRITE; break; case EPOLLIN|EPOLLOUT: mask = PH_IOMASK_READ|PH_IOMASK_WRITE; break; default: mask = PH_IOMASK_ERR; } // We can't just clear kmask completely because ONESHOT retains // the existence of the item; we need to know it is there so that // we can MOD it instead of ADD it later. job->kmask = DEFAULT_POLL_MASK; ph_nbio_emitter_dispatch_immediate(emitter, job, mask); if (ph_job_have_deferred_items(thread)) { ph_job_pool_apply_deferred_items(thread); } } ph_thread_epoch_end(); ph_job_collector_emitter_call(emitter); ph_thread_epoch_poll(); } free(event); }