static int eventer_kqueue_impl_loop() { struct timeval __dyna_sleep = { 0, 0 }; KQUEUE_DECL; KQUEUE_SETUP; if(!kqs) { kqs = calloc(1, sizeof(*kqs)); kqs_init(kqs); } pthread_setspecific(kqueue_setup_key, kqs); while(1) { struct timeval __now, __sleeptime; struct timespec __kqueue_sleeptime; int fd_cnt = 0; if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0) __dyna_sleep = eventer_max_sleeptime; __sleeptime = __dyna_sleep; eventer_dispatch_timed(&__now, &__sleeptime); if(compare_timeval(__sleeptime, __dyna_sleep) > 0) __sleeptime = __dyna_sleep; /* Handle recurrent events */ eventer_dispatch_recurrent(&__now); /* If we're the master, we need to lock the master_kqs and make mods */ if(master_kqs->__ke_vec_used) { struct timespec __zerotime = { 0, 0 }; pthread_mutex_lock(&kqs_lock); fd_cnt = kevent(kqueue_fd, master_kqs->__ke_vec, master_kqs->__ke_vec_used, NULL, 0, &__zerotime); noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, master_kqs->__ke_vec_used, fd_cnt); if(fd_cnt < 0) { noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno)); } master_kqs->__ke_vec_used = 0; pthread_mutex_unlock(&kqs_lock); } /* Now we move on to our fd-based events */ __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec; __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000; fd_cnt = kevent(kqueue_fd, ke_vec, ke_vec_used, ke_vec, ke_vec_a, &__kqueue_sleeptime); noitLT(eventer_deb, &__now, "debug: kevent(%d, [], %d) => %d\n", kqueue_fd, ke_vec_used, fd_cnt); ke_vec_used = 0; if(fd_cnt < 0) { noitLT(eventer_err, &__now, "kevent: %s\n", strerror(errno)); } else if(fd_cnt == 0) { /* timeout */ add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep); } else { int idx; __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */ /* loop once to clear */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; masks[ke->ident] = 0; } /* Loop again to aggregate */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ; if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE; } /* Loop a last time to process */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; eventer_t e; int fd; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) { if(ke->data != EBADF && ke->data != ENOENT) noitLT(eventer_err, &__now, "error [%d]: %s\n", (int)ke->ident, strerror(ke->data)); continue; } assert((vpsized_int)ke->udata == (vpsized_int)ke->ident); fd = ke->ident; e = master_fds[fd].e; /* If we've seen this fd, don't callback twice */ if(!masks[fd]) continue; /* It's possible that someone removed the event and freed it * before we got here. */ if(e) eventer_kqueue_impl_trigger(e, masks[fd]); masks[fd] = 0; /* indicates we've processed this fd */ } } } /* NOTREACHED */ return 0; }
static int eventer_kqueue_impl_loop() { struct timeval __dyna_sleep = { 0, 0 }; KQUEUE_DECL; KQUEUE_SETUP(NULL); if(eventer_kqueue_impl_register_wakeup(kqs) == -1) { mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n"); } while(1) { struct timeval __now, __sleeptime; struct timespec __kqueue_sleeptime; int fd_cnt = 0; if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0) __dyna_sleep = eventer_max_sleeptime; __sleeptime = __dyna_sleep; eventer_dispatch_timed(&__now, &__sleeptime); if(compare_timeval(__sleeptime, __dyna_sleep) > 0) __sleeptime = __dyna_sleep; /* Handle cross_thread dispatches */ eventer_cross_thread_process(); /* Handle recurrent events */ eventer_dispatch_recurrent(&__now); /* Now we move on to our fd-based events */ __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec; __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000; fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used, ke_vec, ke_vec_a, &__kqueue_sleeptime); kqs->wakeup_notify = 0; if(fd_cnt > 0 || ke_vec_used) mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt); ke_vec_used = 0; if(fd_cnt < 0) { mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno)); } else if(fd_cnt == 0 || (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) { /* timeout */ if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs); add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep); } else { int idx; __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */ /* loop once to clear */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; if(ke->filter == EVFILT_USER) { eventer_kqueue_impl_register_wakeup(kqs); continue; } masks[ke->ident] = 0; } /* Loop again to aggregate */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; if(ke->filter == EVFILT_USER) continue; if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ; if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE; } /* Loop a last time to process */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; eventer_t e; int fd; ke = &ke_vec[idx]; if(ke->filter == EVFILT_USER) continue; if(ke->flags & EV_ERROR) { if(ke->data != EBADF && ke->data != ENOENT) mtevLT(eventer_err, &__now, "error [%d]: %s\n", (int)ke->ident, strerror(ke->data)); continue; } mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident); fd = ke->ident; e = master_fds[fd].e; /* If we've seen this fd, don't callback twice */ if(!masks[fd]) continue; /* It's possible that someone removed the event and freed it * before we got here. */ if(e) eventer_kqueue_impl_trigger(e, masks[fd]); masks[fd] = 0; /* indicates we've processed this fd */ } } } /* NOTREACHED */ return 0; }