static int evport_dispatch(struct event_base *base, struct timeval *tv) { int i, res; struct evport_data *epdp = base->evbase; port_event_t pevtlist[EVENTS_PER_GETN]; /* * port_getn will block until it has at least nevents events. It will * also return how many it's given us (which may be more than we asked * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in * nevents. */ int nevents = 1; /* * We have to convert a struct timeval to a struct timespec * (only difference is nanoseconds vs. microseconds). If no time-based * events are active, we should wait for I/O (and tv == NULL). */ struct timespec ts; struct timespec *ts_p = NULL; if (tv != NULL) { ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; ts_p = &ts; } /* * Before doing anything else, we need to reassociate the events we hit * last time which need reassociation. See comment at the end of the * loop below. */ for (i = 0; i < EVENTS_PER_GETN; ++i) { struct fd_info *fdi = NULL; if (epdp->ed_pending[i] != -1) { fdi = &(epdp->ed_fds[epdp->ed_pending[i]]); } if (fdi != NULL && FDI_HAS_EVENTS(fdi)) { int fd = epdp->ed_pending[i]; reassociate(epdp, fdi, fd); epdp->ed_pending[i] = -1; } } EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, th_base_lock); res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN, (unsigned int *) &nevents, ts_p); EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock); if (res == -1) { if (errno == EINTR || errno == EAGAIN) { evsig_process(base); return (0); } else if (errno == ETIME) { if (nevents == 0) return (0); } else { event_warn("port_getn"); return (-1); } } else if (base->sig.evsig_caught) { evsig_process(base); } event_debug(("%s: port_getn reports %d events", __func__, nevents)); for (i = 0; i < nevents; ++i) { struct fd_info *fdi; port_event_t *pevt = &pevtlist[i]; int fd = (int) pevt->portev_object; check_evportop(epdp); check_event(pevt); epdp->ed_pending[i] = fd; /* * Figure out what kind of event it was * (because we have to pass this to the callback) */ res = 0; if (pevt->portev_events & POLLIN) res |= EV_READ; if (pevt->portev_events & POLLOUT) res |= EV_WRITE; EVUTIL_ASSERT(epdp->ed_nevents > fd); fdi = &(epdp->ed_fds[fd]); evmap_io_active(base, fd, res); } /* end of all events gotten */ check_evportop(epdp); return (0); }
static int epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct epollop *epollop = arg; struct epoll_event *events = epollop->events; struct evepoll *evep; int i, res, timeout = -1; if (tv != NULL) timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; if (timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is too big; * see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } evsignal_process(base); return (0); } else if (base->sig.evsignal_caught) { evsignal_process(base); } event_debug(("%s: epoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int what = events[i].events; struct event *evread = NULL, *evwrite = NULL; evep = (struct evepoll *)events[i].data.ptr; if (what & (EPOLLHUP|EPOLLERR)) { evread = evep->evread; evwrite = evep->evwrite; } else { if (what & EPOLLIN) { evread = evep->evread; } if (what & EPOLLOUT) { evwrite = evep->evwrite; } } if (!(evread||evwrite)) continue; if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } return (0); }
/* Helper: set the signal handler for evsignal to handler in base, so that * we can restore the original handler when we clear the current one. */ int _evsig_set_handler(struct event_base *base, int evsignal, void (__cdecl *handler)(int)) { #ifdef _EVENT_HAVE_SIGACTION struct sigaction sa; #else ev_sighandler_t sh; #endif struct evsig_info *sig = &base->sig; void *p; /* * resize saved signal handler array up to the highest signal number. * a dynamic array is used to keep footprint on the low side. */ if (evsignal >= sig->sh_old_max) { int new_max = evsignal + 1; event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing", __func__, evsignal, sig->sh_old_max)); p = mm_realloc(sig->sh_old, new_max * sizeof(*sig->sh_old)); if (p == NULL) { event_warn("realloc"); return (-1); } memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old), 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old)); sig->sh_old_max = new_max; sig->sh_old = p; } /* allocate space for previous handler out of dynamic array */ sig->sh_old[evsignal] = mm_malloc(sizeof *sig->sh_old[evsignal]); if (sig->sh_old[evsignal] == NULL) { event_warn("malloc"); return (-1); } /* save previous handler and setup new handler */ #ifdef _EVENT_HAVE_SIGACTION memset(&sa, 0, sizeof(sa)); sa.sa_handler = handler; sa.sa_flags |= SA_RESTART; sigfillset(&sa.sa_mask); if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) { event_warn("sigaction"); mm_free(sig->sh_old[evsignal]); sig->sh_old[evsignal] = NULL; return (-1); } #else if ((sh = signal(evsignal, handler)) == SIG_ERR) { event_warn("signal"); mm_free(sig->sh_old[evsignal]); sig->sh_old[evsignal] = NULL; return (-1); } *sig->sh_old[evsignal] = sh; #endif return (0); }
static int kq_dispatch(struct event_base *base, struct timeval *tv) { struct kqop *kqop = base->evbase; struct kevent *events = kqop->events; struct kevent *changes; struct timespec ts, *ts_p = NULL; int i, n_changes, res; if (tv != NULL) { TIMEVAL_TO_TIMESPEC(tv, &ts); ts_p = &ts; } /* Build "changes" from "base->changes" */ EVUTIL_ASSERT(kqop->changes); n_changes = kq_build_changes_list(&base->changelist, kqop); if (n_changes < 0) return -1; event_changelist_remove_all_(&base->changelist, base); /* steal the changes array in case some broken code tries to call * dispatch twice at once. */ changes = kqop->changes; kqop->changes = NULL; /* Make sure that 'events' is at least as long as the list of changes: * otherwise errors in the changes can get reported as a -1 return * value from kevent() rather than as EV_ERROR events in the events * array. * * (We could instead handle -1 return values from kevent() by * retrying with a smaller changes array or a larger events array, * but this approach seems less risky for now.) */ if (kqop->events_size < n_changes) { int new_size = kqop->events_size; do { new_size *= 2; } while (new_size < n_changes); kq_grow_events(kqop, new_size); events = kqop->events; } EVBASE_RELEASE_LOCK(base, th_base_lock); res = kevent(kqop->kq, changes, n_changes, events, kqop->events_size, ts_p); EVBASE_ACQUIRE_LOCK(base, th_base_lock); EVUTIL_ASSERT(kqop->changes == NULL); kqop->changes = changes; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { switch (events[i].data) { /* Can occur on delete if we are not currently * watching any events on this fd. That can * happen when the fd was closed and another * file was opened with that fd. */ case ENOENT: /* Can occur for reasons not fully understood * on FreeBSD. */ case EINVAL: continue; #if defined(__FreeBSD__) /* * This currently occurs if an FD is closed * before the EV_DELETE makes it out via kevent(). * The FreeBSD capabilities code sees the blank * capability set and rejects the request to * modify an event. * * To be strictly correct - when an FD is closed, * all the registered events are also removed. * Queuing EV_DELETE to a closed FD is wrong. * The event(s) should just be deleted from * the pending changelist. */ case ENOTCAPABLE: continue; #endif /* Can occur on a delete if the fd is closed. */ case EBADF: /* XXXX On NetBSD, we can also get EBADF if we * try to add the write side of a pipe, but * the read side has already been closed. * Other BSDs call this situation 'EPIPE'. It * would be good if we had a way to report * this situation. */ continue; /* These two can occur on an add if the fd was one side * of a pipe, and the other side was closed. */ case EPERM: case EPIPE: /* Report read events, if we're listening for * them, so that the user can learn about any * add errors. (If the operation was a * delete, then udata should be cleared.) */ if (events[i].udata) { /* The operation was an add: * report the error as a read. */ which |= EV_READ; break; } else { /* The operation was a del: * report nothing. */ continue; } /* Other errors shouldn't occur. */ default: errno = events[i].data; return (-1); } } else if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; #ifdef EVFILT_USER } else if (events[i].filter == EVFILT_USER) { base->is_notify_pending = 0; #endif } if (!which) continue; evmap_io_active_(base, events[i].ident, which | EV_ET); } if (res == kqop->events_size) { /* We used all the events space that we have. Maybe we should make it bigger. */ kq_grow_events(kqop, kqop->events_size * 2); } return (0); }
int epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct epollop *epollop = arg; struct epoll_event *events = epollop->events; struct evepoll *evep; int i, res, timeout; if (evsignal_deliver(&epollop->evsigmask) == -1) return (-1); timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; benchmark_stop_sample(); event_mutex_unlock(base); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); event_mutex_lock(base); benchmark_start_sample(); if (evsignal_recalc(&epollop->evsigmask) == -1) return (-1); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } evsignal_process(); return (0); } else if (evsignal_caught) evsignal_process(); event_debug(("%s: epoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; int what = events[i].events; struct event *evread = NULL, *evwrite = NULL; evep = (struct evepoll *)events[i].data.ptr; if (what & EPOLLHUP) what |= EPOLLIN | EPOLLOUT; else if (what & EPOLLERR) what |= EPOLLIN | EPOLLOUT; if (what & EPOLLIN) { evread = evep->evread; which |= EV_READ; } if (what & EPOLLOUT) { evwrite = evep->evwrite; which |= EV_WRITE; } if (!which) continue; if (evread != NULL && !(evread->ev_events & EV_PERSIST)) event_del(evread); if (evwrite != NULL && evwrite != evread && !(evwrite->ev_events & EV_PERSIST)) event_del(evwrite); if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } return (0); }
static int poll_dispatch(struct event_base *base, struct timeval *tv) { int res, i, j, nfds; long msec = -1; struct pollop *pop = base->evbase; struct pollfd *event_set; poll_check_ok(pop); nfds = pop->nfds; #ifndef _EVENT_DISABLE_THREAD_SUPPORT if (base->th_base_lock) { /* If we're using this backend in a multithreaded setting, * then we need to work on a copy of event_set, so that we can * let other threads modify the main event_set while we're * polling. If we're not multithreaded, then we'll skip the * copy step here to save memory and time. */ if (pop->realloc_copy) { struct pollfd *tmp = mm_realloc(pop->event_set_copy, pop->event_count * sizeof(struct pollfd)); if (tmp == NULL) { event_warn("realloc"); return -1; } pop->event_set_copy = tmp; pop->realloc_copy = 0; } memcpy(pop->event_set_copy, pop->event_set, sizeof(struct pollfd)*nfds); event_set = pop->event_set_copy; } else { event_set = pop->event_set; } #else event_set = pop->event_set; #endif if (tv != NULL) { msec = evutil_tv_to_msec(tv); if (msec < 0 || msec > INT_MAX) msec = INT_MAX; } EVBASE_RELEASE_LOCK(base, th_base_lock); res = poll(event_set, nfds, msec); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("poll"); return (-1); } return (0); } event_debug(("%s: poll reports %d", __func__, res)); if (res == 0 || nfds == 0) return (0); i = random() % nfds; for (j = 0; j < nfds; j++) { int what; if (++i == nfds) i = 0; what = event_set[i].revents; if (!what) continue; res = 0; /* If the file gets closed notify */ if (what & (POLLHUP|POLLERR)) what |= POLLIN|POLLOUT; if (what & POLLIN) res |= EV_READ; if (what & POLLOUT) res |= EV_WRITE; if (res == 0) continue; evmap_io_active(base, event_set[i].fd, res); } return (0); }
static int epoll_dispatch(struct event_base *base, struct timeval *tv) { struct epollop *epollop = base->evbase; struct epoll_event *events = epollop->events; int i, res; long timeout = -1; #ifdef USING_TIMERFD if (epollop->timerfd >= 0) { struct itimerspec is; is.it_interval.tv_sec = 0; is.it_interval.tv_nsec = 0; if (tv == NULL) { /* No timeout; disarm the timer. */ is.it_value.tv_sec = 0; is.it_value.tv_nsec = 0; } else { if (tv->tv_sec == 0 && tv->tv_usec == 0) { /* we need to exit immediately; timerfd can't * do that. */ timeout = 0; } is.it_value.tv_sec = tv->tv_sec; is.it_value.tv_nsec = tv->tv_usec * 1000; } /* TODO: we could avoid unnecessary syscalls here by only calling timerfd_settime when the top timeout changes, or when we're called with a different timeval. */ if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) { event_warn("timerfd_settime"); } } else #endif if (tv != NULL) { timeout = evutil_tv_to_msec_(tv); if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } } epoll_apply_changes(base); event_changelist_remove_all_(&base->changelist, base); EVBASE_RELEASE_LOCK(base, th_base_lock); res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } return (0); } event_debug(("%s: epoll_wait reports %d", __func__, res)); EVUTIL_ASSERT(res <= epollop->nevents); for (i = 0; i < res; i++) { int what = events[i].events; short ev = 0; #ifdef USING_TIMERFD if (events[i].data.fd == epollop->timerfd) continue; #endif if (what & (EPOLLHUP|EPOLLERR)) { ev = EV_READ | EV_WRITE; } else { if (what & EPOLLIN) ev |= EV_READ; if (what & EPOLLOUT) ev |= EV_WRITE; if (what & EPOLLRDHUP) ev |= EV_CLOSED; } if (!ev) continue; evmap_io_active_(base, events[i].data.fd, ev | EV_ET); } if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { /* We used all of the event space this time. We should be ready for more events next time. */ int new_nevents = epollop->nevents * 2; struct epoll_event *new_events; new_events = mm_realloc(epollop->events, new_nevents * sizeof(struct epoll_event)); if (new_events) { epollop->events = new_events; epollop->nevents = new_nevents; } } return (0); }
static void test_evutil_log(void *ptr) { evutil_socket_t fd = -1; char buf[128]; event_set_log_callback(logfn); event_set_fatal_callback(fatalfn); #define RESET() do { \ logsev = 0; \ if (logmsg) free(logmsg); \ logmsg = NULL; \ } while (0) #define LOGEQ(sev,msg) do { \ tt_int_op(logsev,==,sev); \ tt_assert(logmsg != NULL); \ tt_str_op(logmsg,==,msg); \ } while (0) #ifdef CAN_CHECK_ERR /* We need to disable these tests for now. Previously, the logging * module didn't enforce the requirement that a fatal callback * actually exit. Now, it exits no matter what, so if we wan to * reinstate these tests, we'll need to fork for each one. */ check_error_logging(errx_fn, 2, _EVENT_LOG_ERR, "Fatal error; too many kumquats (5)"); RESET(); #endif event_warnx("Far too many %s (%d)", "wombats", 99); LOGEQ(_EVENT_LOG_WARN, "Far too many wombats (99)"); RESET(); event_msgx("Connecting lime to coconut"); LOGEQ(_EVENT_LOG_MSG, "Connecting lime to coconut"); RESET(); event_debug(("A millisecond passed! We should log that!")); #ifdef USE_DEBUG LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!"); #else tt_int_op(logsev,==,0); tt_ptr_op(logmsg,==,NULL); #endif RESET(); /* Try with an errno. */ errno = ENOENT; event_warn("Couldn't open %s", "/bad/file"); evutil_snprintf(buf, sizeof(buf), "Couldn't open /bad/file: %s",strerror(ENOENT)); LOGEQ(_EVENT_LOG_WARN,buf); RESET(); #ifdef CAN_CHECK_ERR evutil_snprintf(buf, sizeof(buf), "Couldn't open /very/bad/file: %s",strerror(ENOENT)); check_error_logging(err_fn, 5, _EVENT_LOG_ERR, buf); RESET(); #endif /* Try with a socket errno. */ fd = socket(AF_INET, SOCK_STREAM, 0); #ifdef WIN32 evutil_snprintf(buf, sizeof(buf), "Unhappy socket: %s", evutil_socket_error_to_string(WSAEWOULDBLOCK)); EVUTIL_SET_SOCKET_ERROR(WSAEWOULDBLOCK); #else evutil_snprintf(buf, sizeof(buf), "Unhappy socket: %s", strerror(EAGAIN)); errno = EAGAIN; #endif event_sock_warn(fd, "Unhappy socket"); LOGEQ(_EVENT_LOG_WARN, buf); RESET(); #ifdef CAN_CHECK_ERR check_error_logging(sock_err_fn, 20, _EVENT_LOG_ERR, buf); RESET(); #endif #undef RESET #undef LOGEQ end: if (logmsg) free(logmsg); if (fd >= 0) evutil_closesocket(fd); }
int kq_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct kqop *kqop = arg; struct kevent *changes = kqop->changes; struct kevent *events = kqop->events; struct event *ev; struct timespec ts; int i, res; TIMEVAL_TO_TIMESPEC(tv, &ts); res = kevent(kqop->kq, changes, kqop->nchanges, events, kqop->nevents, &ts); kqop->nchanges = 0; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { /* * Error messages that can happen, when a delete fails. * EBADF happens when the file discriptor has been * closed, * ENOENT when the file discriptor was closed and * then reopened. * An error is also indicated when a callback deletes * an event we are still processing. In that case * the data field is set to ENOENT. */ if (events[i].data == EBADF || events[i].data == ENOENT) continue; errno = events[i].data; return (-1); } ev = (struct event *)events[i].udata; if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; } else if (events[i].filter == EVFILT_SIGNAL) { which |= EV_SIGNAL; } if (!which) continue; if (!(ev->ev_events & EV_PERSIST)) { ev->ev_flags &= ~EVLIST_X_KQINKERNEL; event_del(ev); } event_active(ev, which, ev->ev_events & EV_SIGNAL ? events[i].data : 1); } return (0); }
static int epoll_apply_one_change(struct event_base *base, struct epollop *epollop, const struct event_change *ch) { struct epoll_event epev; int op, events = 0; int idx; idx = EPOLL_OP_TABLE_INDEX(ch); op = epoll_op_table[idx].op; events = epoll_op_table[idx].events; if (!events) { EVUTIL_ASSERT(op == 0); return 0; } if ((ch->read_change|ch->write_change) & EV_CHANGE_ET) events |= EPOLLET; memset(&epev, 0, sizeof(epev)); epev.data.fd = ch->fd; epev.events = events; if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) { event_debug((PRINT_CHANGES(op, epev.events, ch, "okay"))); return 0; } switch (op) { case EPOLL_CTL_MOD: if (errno == ENOENT) { /* If a MOD operation fails with ENOENT, the * fd was probably closed and re-opened. We * should retry the operation as an ADD. */ if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) { event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too", (int)epev.events, ch->fd); return -1; } else { event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.", (int)epev.events, ch->fd)); return 0; } } break; case EPOLL_CTL_ADD: if (errno == EEXIST) { /* If an ADD operation fails with EEXIST, * either the operation was redundant (as with a * precautionary add), or we ran into a fun * kernel bug where using dup*() to duplicate the * same file into the same fd gives you the same epitem * rather than a fresh one. For the second case, * we must retry with MOD. */ if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) { event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too", (int)epev.events, ch->fd); return -1; } else { event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.", (int)epev.events, ch->fd)); return 0; } } break; case EPOLL_CTL_DEL: if (errno == ENOENT || errno == EBADF || errno == EPERM) { /* If a delete fails with one of these errors, * that's fine too: we closed the fd before we * got around to calling epoll_dispatch. */ event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.", (int)epev.events, ch->fd, strerror(errno))); return 0; } break; default: break; } event_warn(PRINT_CHANGES(op, epev.events, ch, "failed")); return -1; }
static void test_evutil_log(void *ptr) { evutil_socket_t fd = -1; char buf[128]; event_set_log_callback(logfn); event_set_fatal_callback(fatalfn); #define RESET() do { \ logsev = exited = exitcode = 0; \ if (logmsg) free(logmsg); \ logmsg = NULL; \ } while (0) #define LOGEQ(sev,msg) do { \ tt_int_op(logsev,==,sev); \ tt_assert(logmsg != NULL); \ tt_str_op(logmsg,==,msg); \ } while (0) event_errx(2, "Fatal error; too many kumquats (%d)", 5); LOGEQ(_EVENT_LOG_ERR, "Fatal error; too many kumquats (5)"); tt_int_op(exitcode,==,2); RESET(); event_warnx("Far too many %s (%d)", "wombats", 99); LOGEQ(_EVENT_LOG_WARN, "Far too many wombats (99)"); tt_int_op(exited,==,0); RESET(); event_msgx("Connecting lime to coconut"); LOGEQ(_EVENT_LOG_MSG, "Connecting lime to coconut"); tt_int_op(exited,==,0); RESET(); event_debug(("A millisecond passed! We should log that!")); #ifdef USE_DEBUG LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!"); #else tt_int_op(logsev,==,0); tt_ptr_op(logmsg,==,NULL); #endif RESET(); /* Try with an errno. */ errno = ENOENT; event_warn("Couldn't open %s", "/bad/file"); evutil_snprintf(buf, sizeof(buf), "Couldn't open /bad/file: %s",strerror(ENOENT)); LOGEQ(_EVENT_LOG_WARN,buf); tt_int_op(exited, ==, 0); RESET(); errno = ENOENT; event_err(5,"Couldn't open %s", "/very/bad/file"); evutil_snprintf(buf, sizeof(buf), "Couldn't open /very/bad/file: %s",strerror(ENOENT)); LOGEQ(_EVENT_LOG_ERR,buf); tt_int_op(exitcode, ==, 5); RESET(); /* Try with a socket errno. */ fd = socket(AF_INET, SOCK_STREAM, 0); #ifdef WIN32 evutil_snprintf(buf, sizeof(buf), "Unhappy socket: %s", evutil_socket_error_to_string(WSAEWOULDBLOCK)); EVUTIL_SET_SOCKET_ERROR(WSAEWOULDBLOCK); #else evutil_snprintf(buf, sizeof(buf), "Unhappy socket: %s", strerror(EAGAIN)); errno = EAGAIN; #endif event_sock_warn(fd, "Unhappy socket"); LOGEQ(_EVENT_LOG_WARN, buf); tt_int_op(exited,==,0); RESET(); #ifdef WIN32 EVUTIL_SET_SOCKET_ERROR(WSAEWOULDBLOCK); #else errno = EAGAIN; #endif event_sock_err(200, fd, "Unhappy socket"); LOGEQ(_EVENT_LOG_ERR, buf); tt_int_op(exitcode,==,200); RESET(); #undef RESET #undef LOGEQ end: if (logmsg) free(logmsg); if (fd >= 0) EVUTIL_CLOSESOCKET(fd); }
static int kq_dispatch(struct event_base *base, struct timeval *tv) { struct kqop *kqop = base->evbase; struct kevent *events = kqop->events; struct kevent *changes; struct timespec ts, *ts_p = NULL; int i, n_changes, res; if (tv != NULL) { TIMEVAL_TO_TIMESPEC(tv, &ts); ts_p = &ts; } /* Build "changes" from "base->changes" */ EVUTIL_ASSERT(kqop->changes); n_changes = kq_build_changes_list(&base->changelist, kqop); if (n_changes < 0) return -1; event_changelist_remove_all(&base->changelist, base); /* steal the changes array in case some broken code tries to call * dispatch twice at once. */ changes = kqop->changes; kqop->changes = NULL; EVBASE_RELEASE_LOCK(base, th_base_lock); res = kevent(kqop->kq, changes, n_changes, events, kqop->events_size, ts_p); EVBASE_ACQUIRE_LOCK(base, th_base_lock); EVUTIL_ASSERT(kqop->changes == NULL); kqop->changes = changes; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { /* * Error messages that can happen, when a delete fails. * EBADF happens when the file descriptor has been * closed, * ENOENT when the file descriptor was closed and * then reopened. * EINVAL for some reasons not understood; EINVAL * should not be returned ever; but FreeBSD does :-\ * An error is also indicated when a callback deletes * an event we are still processing. In that case * the data field is set to ENOENT. */ if (events[i].data == EBADF || events[i].data == EINVAL || events[i].data == ENOENT) continue; errno = events[i].data; return (-1); } if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; } else if (events[i].filter == EVFILT_SIGNAL) { which |= EV_SIGNAL; } if (!which) continue; if (events[i].filter == EVFILT_SIGNAL) { evmap_signal_active(base, events[i].ident, 1); } else { evmap_io_active(base, events[i].ident, which | EV_ET); } } if (res == kqop->events_size) { struct kevent *newresult; int size = kqop->events_size; /* We used all the events space that we have. Maybe we should make it bigger. */ size *= 2; newresult = mm_realloc(kqop->events, size * sizeof(struct kevent)); if (newresult) { kqop->events = newresult; kqop->events_size = size; } } return (0); }
static int kq_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct kqop *kqop = arg; struct kevent *changes = kqop->changes; struct kevent *events = kqop->events; struct event *ev; struct timespec ts, *ts_p = NULL; int i, res; if (tv != NULL) { TIMEVAL_TO_TIMESPEC(tv, &ts); ts_p = &ts; } res = kevent(kqop->kq, changes, kqop->nchanges, events, kqop->nevents, ts_p); kqop->nchanges = 0; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { /* * Error messages that can happen, when a delete fails. * EBADF happens when the file discriptor has been * closed, * ENOENT when the file discriptor was closed and * then reopened. * EINVAL for some reasons not understood; EINVAL * should not be returned ever; but FreeBSD does :-\ * An error is also indicated when a callback deletes * an event we are still processing. In that case * the data field is set to ENOENT. */ if (events[i].data == EBADF || events[i].data == EINVAL || events[i].data == ENOENT) continue; errno = events[i].data; return (-1); } if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; } else if (events[i].filter == EVFILT_SIGNAL) { which |= EV_SIGNAL; } if (!which) continue; if (events[i].filter == EVFILT_SIGNAL) { struct event_list *head = (struct event_list *)events[i].udata; TAILQ_FOREACH(ev, head, ev_signal_next) { event_active(ev, which, events[i].data); } } else {
int poll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { int res, i, msec = -1, nfds; struct pollop *pop = arg; poll_check_ok(pop); if (tv != NULL) msec = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; nfds = pop->nfds; res = poll(pop->event_set, nfds, msec); if (res == -1) { if (errno != EINTR) { event_warn("poll"); return (-1); } evsignal_process(base); return (0); } else if (base->sig.evsignal_caught) { evsignal_process(base); } event_debug(("%s: poll reports %d", __func__, res)); if (res == 0) return (0); for (i = 0; i < nfds; i++) { int what = pop->event_set[i].revents; struct event *r_ev = NULL, *w_ev = NULL; if (!what) continue; res = 0; /* If the file gets closed notify */ if (what & (POLLHUP|POLLERR)) what |= POLLIN|POLLOUT; if (what & POLLIN) { res |= EV_READ; r_ev = pop->event_r_back[i]; } if (what & POLLOUT) { res |= EV_WRITE; w_ev = pop->event_w_back[i]; } if (res == 0) continue; if (r_ev && (res & r_ev->ev_events)) { event_active(r_ev, res & r_ev->ev_events, 1); } if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) { event_active(w_ev, res & w_ev->ev_events, 1); } } return (0); }
static int devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct devpollop *devpollop = arg; struct pollfd *events = devpollop->events; struct dvpoll dvp; struct evdevpoll *evdp; int i, res, timeout = -1; if (devpollop->nchanges) devpoll_commit(devpollop); if (tv != NULL) timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; dvp.dp_fds = devpollop->events; dvp.dp_nfds = devpollop->nevents; dvp.dp_timeout = timeout; res = ioctl(devpollop->dpfd, DP_POLL, &dvp); if (res == -1) { if (errno != EINTR) { event_warn("ioctl: DP_POLL"); return (-1); } evsignal_process(base); return (0); } else if (base->sig.evsignal_caught) { evsignal_process(base); } event_debug(("%s: devpoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; int what = events[i].revents; struct event *evread = NULL, *evwrite = NULL; assert(events[i].fd < devpollop->nfds); evdp = &devpollop->fds[events[i].fd]; if (what & POLLHUP) what |= POLLIN | POLLOUT; else if (what & POLLERR) what |= POLLIN | POLLOUT; if (what & POLLIN) { evread = evdp->evread; which |= EV_READ; } if (what & POLLOUT) { evwrite = evdp->evwrite; which |= EV_WRITE; } if (!which) continue; if (evread != NULL && !(evread->ev_events & EV_PERSIST)) event_del(evread); if (evwrite != NULL && evwrite != evread && !(evwrite->ev_events & EV_PERSIST)) event_del(evwrite); if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } return (0); }
int win32_dispatch(struct event_base *base, struct timeval *tv) { struct win32op *win32op = base->evbase; int res = 0; unsigned j, i; int fd_count; SOCKET s; if (win32op->resize_out_sets) { size_t size = FD_SET_ALLOC_SIZE(win32op->num_fds_in_fd_sets); if (!(win32op->readset_out = mm_realloc(win32op->readset_out, size))) return (-1); if (!(win32op->exset_out = mm_realloc(win32op->exset_out, size))) return (-1); if (!(win32op->writeset_out = mm_realloc(win32op->writeset_out, size))) return (-1); win32op->resize_out_sets = 0; } fd_set_copy(win32op->readset_out, win32op->readset_in); fd_set_copy(win32op->exset_out, win32op->writeset_in); fd_set_copy(win32op->writeset_out, win32op->writeset_in); fd_count = (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ? win32op->readset_out->fd_count : win32op->writeset_out->fd_count; if (!fd_count) { long msec = tv ? evutil_tv_to_msec(tv) : LONG_MAX; /* Sleep's DWORD argument is unsigned long */ if (msec < 0) msec = LONG_MAX; /* Windows doesn't like you to call select() with no sockets */ Sleep(msec); return (0); } EVBASE_RELEASE_LOCK(base, th_base_lock); res = select(fd_count, (struct fd_set*)win32op->readset_out, (struct fd_set*)win32op->writeset_out, (struct fd_set*)win32op->exset_out, tv); EVBASE_ACQUIRE_LOCK(base, th_base_lock); event_debug(("%s: select returned %d", __func__, res)); if (res <= 0) { return res; } if (win32op->readset_out->fd_count) { i = rand() % win32op->readset_out->fd_count; for (j=0; j<win32op->readset_out->fd_count; ++j) { if (++i >= win32op->readset_out->fd_count) i = 0; s = win32op->readset_out->fd_array[i]; evmap_io_active(base, s, EV_READ); } } if (win32op->exset_out->fd_count) { i = rand() % win32op->exset_out->fd_count; for (j=0; j<win32op->exset_out->fd_count; ++j) { if (++i >= win32op->exset_out->fd_count) i = 0; s = win32op->exset_out->fd_array[i]; evmap_io_active(base, s, EV_WRITE); } } if (win32op->writeset_out->fd_count) { SOCKET s; i = rand() % win32op->writeset_out->fd_count; for (j=0; j<win32op->writeset_out->fd_count; ++j) { if (++i >= win32op->writeset_out->fd_count) i = 0; s = win32op->writeset_out->fd_array[i]; evmap_io_active(base, s, EV_WRITE); } } return (0); }
static int epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct epollop *epollop = arg; struct epoll_event *events = epollop->events; struct evepoll *evep; int i, res, timeout = -1; if (tv != NULL) timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; if (timeout > MAX_EPOLL_TIMEOUT_MSEC) { /* Linux kernels can wait forever if the timeout is too big; * see comment on MAX_EPOLL_TIMEOUT_MSEC. */ timeout = MAX_EPOLL_TIMEOUT_MSEC; } res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); if (res == -1) { if (errno != EINTR) { event_warn("epoll_wait"); return (-1); } evsignal_process(base); return (0); } else if (base->sig.evsignal_caught) { evsignal_process(base); } event_debug(("%s: epoll_wait reports %d", __func__, res)); for (i = 0; i < res; i++) { int what = events[i].events; struct event *evread = NULL, *evwrite = NULL; int fd = events[i].data.fd; if (fd < 0 || fd >= epollop->nfds) continue; evep = &epollop->fds[fd]; if (what & (EPOLLHUP|EPOLLERR)) { evread = evep->evread; evwrite = evep->evwrite; } else { if (what & EPOLLIN) { evread = evep->evread; } if (what & EPOLLOUT) { evwrite = evep->evwrite; } } if (!(evread||evwrite)) continue; if (evread != NULL) event_active(evread, EV_READ, 1); if (evwrite != NULL) event_active(evwrite, EV_WRITE, 1); } if (res == epollop->nevents && epollop->nevents < MAX_NEVENTS) { /* We used all of the event space this time. We should be ready for more events next time. */ int new_nevents = epollop->nevents * 2; struct epoll_event *new_events; new_events = realloc(epollop->events, new_nevents * sizeof(struct epoll_event)); if (new_events) { epollop->events = new_events; epollop->nevents = new_nevents; } } return (0); }
static int select_dispatch(struct event_base *base, struct timeval *tv) { int res=0, i, j, nfds; struct selectop *sop = base->evbase; check_selectop(sop); if (sop->resize_out_sets) { fd_set *readset_out=NULL, *writeset_out=NULL; size_t sz = sop->event_fdsz; if (!(readset_out = mm_realloc(sop->event_readset_out, sz))) return (-1); sop->event_readset_out = readset_out; if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) { /* We don't free readset_out here, since it was * already successfully reallocated. The next time * we call select_dispatch, the realloc will be a * no-op. */ return (-1); } sop->event_writeset_out = writeset_out; sop->resize_out_sets = 0; } memcpy(sop->event_readset_out, sop->event_readset_in, sop->event_fdsz); memcpy(sop->event_writeset_out, sop->event_writeset_in, sop->event_fdsz); nfds = sop->event_fds+1; EVBASE_RELEASE_LOCK(base, th_base_lock); res = select(nfds, sop->event_readset_out, sop->event_writeset_out, NULL, tv); EVBASE_ACQUIRE_LOCK(base, th_base_lock); check_selectop(sop); if (res == -1) { if (errno != EINTR) { event_warn("select"); return (-1); } return (0); } event_debug(("%s: select reports %d", __func__, res)); check_selectop(sop); i = random() % nfds; for (j = 0; j < nfds; ++j) { if (++i >= nfds) i = 0; res = 0; if (FD_ISSET(i, sop->event_readset_out)) res |= EV_READ; if (FD_ISSET(i, sop->event_writeset_out)) res |= EV_WRITE; if (res == 0) continue; evmap_io_active(base, i, res); } check_selectop(sop); return (0); }
static int proxy_connect_none(const char *host, unsigned short port_, struct PHB *phb) { struct sockaddr_in me; int fd = -1; if (phb->gai_cur == NULL) { int ret; char port[6]; struct addrinfo hints; g_snprintf(port, sizeof(port), "%d", port_); memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_ADDRCONFIG | AI_NUMERICSERV; if (!(ret = getaddrinfo(host, port, &hints, &phb->gai))) { phb->gai_cur = phb->gai; } else { event_debug("gai(): %s\n", gai_strerror(ret)); } } for (; phb->gai_cur; phb->gai_cur = phb->gai_cur->ai_next) { if ((fd = socket(phb->gai_cur->ai_family, phb->gai_cur->ai_socktype, phb->gai_cur->ai_protocol)) < 0) { event_debug("socket failed: %d\n", errno); continue; } sock_make_nonblocking(fd); if (global.conf->iface_out) { me.sin_family = AF_INET; me.sin_port = 0; me.sin_addr.s_addr = inet_addr(global.conf->iface_out); if (bind(fd, (struct sockaddr *) &me, sizeof(me)) != 0) { event_debug("bind( %d, \"%s\" ) failure\n", fd, global.conf->iface_out); } } event_debug("proxy_connect_none( \"%s\", %d ) = %d\n", host, port_, fd); if (connect(fd, phb->gai_cur->ai_addr, phb->gai_cur->ai_addrlen) < 0 && !sockerr_again()) { event_debug("connect failed: %s\n", strerror(errno)); closesocket(fd); fd = -1; continue; } else { phb->inpa = b_input_add(fd, B_EV_IO_WRITE, proxy_connected, phb); phb->fd = fd; break; } } if (fd < 0 && host) { phb_free(phb, TRUE); } return fd; }