/* * Doubles the size of the allocated file descriptor array. */ static int grow (struct evport_data *epdp, int factor) { struct fd_info *tmp; int oldsize = epdp->ed_nevents; int newsize = factor * oldsize; assert (factor > 1); check_evportop (epdp); tmp = realloc (epdp->ed_fds, sizeof (struct fd_info) * newsize); if (NULL == tmp) return -1; epdp->ed_fds = tmp; memset ( (char*) (epdp->ed_fds + oldsize), 0, (newsize - oldsize) *sizeof (struct fd_info) ); epdp->ed_nevents = newsize; check_evportop (epdp); return 0; }
static int evport_add(struct event_base *base, int fd, short old, short events, void *p) { struct evport_data *evpd = base->evbase; struct fd_info *fdi; int factor; (void)p; check_evportop(evpd); /* * If necessary, grow the file descriptor info table */ factor = 1; while (fd >= factor * evpd->ed_nevents) factor *= 2; if (factor > 1) { if (-1 == grow(evpd, factor)) { return (-1); } } fdi = &evpd->ed_fds[fd]; fdi->fdi_what |= events; return reassociate(evpd, fdi, fd); }
static int evport_add(struct event_base *base, int fd, short old, short events, void *p) { struct evport_data *evpd = base->evbase; struct fd_info *fdi = p; check_evportop(evpd); fdi->fdi_what |= events; return reassociate(evpd, fdi, fd); }
static int evport_del(struct event_base *base, int fd, short old, short events, void *p) { struct evport_data *evpd = base->evbase; struct fd_info *fdi; int i; int associated = 1; (void)p; check_evportop(evpd); if (evpd->ed_nevents < fd) { return (-1); } for (i = 0; i < EVENTS_PER_GETN; ++i) { if (evpd->ed_pending[i] == fd) { associated = 0; break; } } fdi = &evpd->ed_fds[fd]; if (events & EV_READ) fdi->fdi_what &= ~EV_READ; if (events & EV_WRITE) fdi->fdi_what &= ~EV_WRITE; if (associated) { if (!FDI_HAS_EVENTS(fdi) && port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) { /* * Ignore EBADFD error the fd could have been closed * before event_del() was called. */ if (errno != EBADFD) { event_warn("port_dissociate"); return (-1); } } else { if (FDI_HAS_EVENTS(fdi)) { return (reassociate(evpd, fdi, fd)); } } } else { if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) { evpd->ed_pending[i] = -1; } } return 0; }
/* * (Re)associates the given file descriptor with the event port. The OS events * are specified (implicitly) from the fd_info struct. */ static int reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd) { int sysevents = FDI_TO_SYSEVENTS(fdip); if (sysevents != 0) { if (port_associate(epdp->ed_port, PORT_SOURCE_FD, fd, sysevents, NULL) == -1) { event_warn("port_associate"); return (-1); } } check_evportop(epdp); return (0); }
static int evport_add (void *arg, struct event *ev) { struct evport_data *evpd = arg; struct fd_info *fdi; int factor; check_evportop (evpd); /* * Delegate, if it's not ours to handle. */ if (ev->ev_events & EV_SIGNAL) return (evsignal_add (ev) ); /* * If necessary, grow the file descriptor info table */ factor = 1; while (ev->ev_fd >= factor * evpd->ed_nevents) factor *= 2; if (factor > 1) { if (-1 == grow (evpd, factor) ) { return (-1); } } fdi = &evpd->ed_fds[ev->ev_fd]; if (ev->ev_events & EV_READ) fdi->fdi_revt = ev; if (ev->ev_events & EV_WRITE) fdi->fdi_wevt = ev; return reassociate (evpd, fdi, ev->ev_fd); }
static int evport_del(struct event_base *base, int fd, short old, short events, void *p) { struct evport_data *evpd = base->evbase; struct fd_info *fdi = p; int associated = ! fdi->pending_idx_plus_1; check_evportop(evpd); fdi->fdi_what &= ~(events &(EV_READ|EV_WRITE)); if (associated) { if (!FDI_HAS_EVENTS(fdi) && port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) { /* * Ignore EBADFD error the fd could have been closed * before event_del() was called. */ if (errno != EBADFD) { event_warn("port_dissociate"); return (-1); } } else { if (FDI_HAS_EVENTS(fdi)) { return (reassociate(evpd, fdi, fd)); } } } else { if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) { const int i = fdi->pending_idx_plus_1 - 1; EVUTIL_ASSERT(evpd->ed_pending[i] == fd); evpd->ed_pending[i] = -1; fdi->pending_idx_plus_1 = 0; } } return 0; }
static int evport_dispatch(struct event_base *base, struct timeval *tv) { int i, res; struct evport_data *epdp = base->evbase; port_event_t pevtlist[EVENTS_PER_GETN]; /* * port_getn will block until it has at least nevents events. It will * also return how many it's given us (which may be more than we asked * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in * nevents. */ int nevents = 1; /* * We have to convert a struct timeval to a struct timespec * (only difference is nanoseconds vs. microseconds). If no time-based * events are active, we should wait for I/O (and tv == NULL). */ struct timespec ts; struct timespec *ts_p = NULL; if (tv != NULL) { ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; ts_p = &ts; } /* * Before doing anything else, we need to reassociate the events we hit * last time which need reassociation. See comment at the end of the * loop below. */ for (i = 0; i < EVENTS_PER_GETN; ++i) { struct fd_info *fdi = NULL; if (epdp->ed_pending[i] != -1) { fdi = &(epdp->ed_fds[epdp->ed_pending[i]]); } if (fdi != NULL && FDI_HAS_EVENTS(fdi)) { int fd = epdp->ed_pending[i]; reassociate(epdp, fdi, fd); epdp->ed_pending[i] = -1; } } EVBASE_RELEASE_LOCK(base, th_base_lock); res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN, (unsigned int *) &nevents, ts_p); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno == EINTR || errno == EAGAIN) { return (0); } else if (errno == ETIME) { if (nevents == 0) return (0); } else { event_warn("port_getn"); return (-1); } } event_debug(("%s: port_getn reports %d events", __func__, nevents)); for (i = 0; i < nevents; ++i) { struct fd_info *fdi; port_event_t *pevt = &pevtlist[i]; int fd = (int) pevt->portev_object; check_evportop(epdp); check_event(pevt); epdp->ed_pending[i] = fd; /* * Figure out what kind of event it was * (because we have to pass this to the callback) */ res = 0; if (pevt->portev_events & (POLLERR|POLLHUP)) { res = EV_READ | EV_WRITE; } else { if (pevt->portev_events & POLLIN) res |= EV_READ; if (pevt->portev_events & POLLOUT) res |= EV_WRITE; } /* * Check for the error situations or a hangup situation */ if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL)) res |= EV_READ|EV_WRITE; EVUTIL_ASSERT(epdp->ed_nevents > fd); fdi = &(epdp->ed_fds[fd]); evmap_io_active(base, fd, res); } /* end of all events gotten */ check_evportop(epdp); return (0); }
static int evport_del (void *arg, struct event *ev) { struct evport_data *evpd = arg; struct fd_info *fdi; int i; int associated = 1; check_evportop (evpd); /* * Delegate, if it's not ours to handle */ if (ev->ev_events & EV_SIGNAL) { return (evsignal_del (ev) ); } if (evpd->ed_nevents < ev->ev_fd) { return (-1); } for (i = 0; i < EVENTS_PER_GETN; ++i) { if (evpd->ed_pending[i] == ev->ev_fd) { associated = 0; break; } } fdi = &evpd->ed_fds[ev->ev_fd]; if (ev->ev_events & EV_READ) fdi->fdi_revt = NULL; if (ev->ev_events & EV_WRITE) fdi->fdi_wevt = NULL; if (associated) { if (!FDI_HAS_EVENTS (fdi) && port_dissociate (evpd->ed_port, PORT_SOURCE_FD, ev->ev_fd) == -1) { /* * Ignre EBADFD error the fd could have been closed * before event_del() was called. */ if (errno != EBADFD) { event_warn ("port_dissociate"); return (-1); } } else { if (FDI_HAS_EVENTS (fdi) ) { return (reassociate (evpd, fdi, ev->ev_fd) ); } } } else { if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) { evpd->ed_pending[i] = -1; } } return 0; }
static int evport_dispatch (struct event_base *base, void *arg, struct timeval *tv) { int i, res; struct evport_data *epdp = arg; port_event_t pevtlist[EVENTS_PER_GETN]; /* * port_getn will block until it has at least nevents events. It will * also return how many it's given us (which may be more than we asked * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in * nevents. */ int nevents = 1; /* * We have to convert a struct timeval to a struct timespec * (only difference is nanoseconds vs. microseconds). If no time-based * events are active, we should wait for I/O (and tv == NULL). */ struct timespec ts; struct timespec *ts_p = NULL; if (tv != NULL) { ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; ts_p = &ts; } /* * Before doing anything else, we need to reassociate the events we hit * last time which need reassociation. See comment at the end of the * loop below. */ for (i = 0; i < EVENTS_PER_GETN; ++i) { struct fd_info *fdi = NULL; if (epdp->ed_pending[i] != -1) { fdi = & (epdp->ed_fds[epdp->ed_pending[i]]); } if (fdi != NULL && FDI_HAS_EVENTS (fdi) ) { int fd = FDI_HAS_READ (fdi) ? fdi->fdi_revt->ev_fd : fdi->fdi_wevt->ev_fd; reassociate (epdp, fdi, fd); epdp->ed_pending[i] = -1; } } if ( (res = port_getn (epdp->ed_port, pevtlist, EVENTS_PER_GETN, (unsigned int *) & nevents, ts_p) ) == -1) { if (errno == EINTR || errno == EAGAIN) { evsignal_process (base); return (0); } else if (errno == ETIME) { if (nevents == 0) return (0); } else { event_warn ("port_getn"); return (-1); } } else if (base->sig.evsignal_caught) { evsignal_process (base); } event_debug ( ("%s: port_getn reports %d events", __func__, nevents) ); for (i = 0; i < nevents; ++i) { struct event *ev; struct fd_info *fdi; port_event_t *pevt = &pevtlist[i]; int fd = (int) pevt->portev_object; check_evportop (epdp); check_event (pevt); epdp->ed_pending[i] = fd; /* * Figure out what kind of event it was * (because we have to pass this to the callback) */ res = 0; if (pevt->portev_events & POLLIN) res |= EV_READ; if (pevt->portev_events & POLLOUT) res |= EV_WRITE; assert (epdp->ed_nevents > fd); fdi = & (epdp->ed_fds[fd]); /* * We now check for each of the possible events (READ * or WRITE). Then, we activate the event (which will * cause its callback to be executed). */ if ( (res & EV_READ) && ( (ev = fdi->fdi_revt) != NULL) ) { event_active (ev, res, 1); } if ( (res & EV_WRITE) && ( (ev = fdi->fdi_wevt) != NULL) ) { event_active (ev, res, 1); } } /* end of all events gotten */ check_evportop (epdp); return (0); }