static int do_fd_clear(struct event_base *base, struct win32op *op, struct idx_info *ent, int read) { int i; struct win_fd_set *set = read ? op->readset_in : op->writeset_in; if (read) { i = ent->read_pos_plus1 - 1; ent->read_pos_plus1 = 0; } else { i = ent->write_pos_plus1 - 1; ent->write_pos_plus1 = 0; } if (i < 0) return (0); if (--set->fd_count != (unsigned)i) { struct idx_info *ent2; SOCKET s2; s2 = set->fd_array[i] = set->fd_array[set->fd_count]; ent2 = evmap_io_get_fdinfo_(&base->io, s2); if (!ent2) /* This indicates a bug. */ return (0); if (read) ent2->read_pos_plus1 = i+1; else ent2->write_pos_plus1 = i+1; } return (0); }
static int poll_del(struct event_base *base, int fd, short old, short events, void *idx_) { struct pollop *pop = base->evbase; struct pollfd *pfd = NULL; struct pollidx *idx = idx_; int i; EVUTIL_ASSERT((events & EV_SIGNAL) == 0); if (!(events & (EV_READ|EV_WRITE))) return (0); poll_check_ok(pop); i = idx->idxplus1 - 1; if (i < 0) return (-1); /* Do we still want to read or write? */ pfd = &pop->event_set[i]; if (events & EV_READ) pfd->events &= ~POLLIN; if (events & EV_WRITE) pfd->events &= ~POLLOUT; poll_check_ok(pop); if (pfd->events) /* Another event cares about that fd. */ return (0); /* Okay, so we aren't interested in that fd anymore. */ idx->idxplus1 = 0; --pop->nfds; if (i != pop->nfds) { /* * Shift the last pollfd down into the now-unoccupied * position. */ memcpy(&pop->event_set[i], &pop->event_set[pop->nfds], sizeof(struct pollfd)); idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd); EVUTIL_ASSERT(idx); EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1); idx->idxplus1 = i + 1; } poll_check_ok(pop); return (0); }
static int evport_dispatch(struct event_base *base, struct timeval *tv) { int i, res; struct evport_data *epdp = base->evbase; port_event_t *pevtlist = epdp->ed_pevtlist; /* * port_getn will block until it has at least nevents events. It will * also return how many it's given us (which may be more than we asked * for, as long as it's less than our maximum (ed_maxevents)) in * nevents. */ int nevents = 1; /* * We have to convert a struct timeval to a struct timespec * (only difference is nanoseconds vs. microseconds). If no time-based * events are active, we should wait for I/O (and tv == NULL). */ struct timespec ts; struct timespec *ts_p = NULL; if (tv != NULL) { ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; ts_p = &ts; } /* * Before doing anything else, we need to reassociate the events we hit * last time which need reassociation. See comment at the end of the * loop below. */ for (i = 0; i < epdp->ed_npending; ++i) { struct fd_info *fdi = NULL; const int fd = epdp->ed_pending[i]; if (fd != -1) { /* We might have cleared out this event; we need * to be sure that it's still set. */ fdi = evmap_io_get_fdinfo_(&base->io, fd); } if (fdi != NULL && FDI_HAS_EVENTS(fdi)) { reassociate(epdp, fdi, fd); /* epdp->ed_pending[i] = -1; */ fdi->pending_idx_plus_1 = 0; } } EVBASE_RELEASE_LOCK(base, th_base_lock); res = port_getn(epdp->ed_port, pevtlist, epdp->ed_maxevents, (unsigned int *) &nevents, ts_p); EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (res == -1) { if (errno == EINTR || errno == EAGAIN) { return (0); } else if (errno == ETIME) { if (nevents == 0) return (0); } else { event_warn("port_getn"); return (-1); } } event_debug(("%s: port_getn reports %d events", __func__, nevents)); for (i = 0; i < nevents; ++i) { port_event_t *pevt = &pevtlist[i]; int fd = (int) pevt->portev_object; struct fd_info *fdi = pevt->portev_user; /*EVUTIL_ASSERT(evmap_io_get_fdinfo_(&base->io, fd) == fdi);*/ check_evportop(epdp); check_event(pevt); epdp->ed_pending[i] = fd; fdi->pending_idx_plus_1 = i + 1; /* * Figure out what kind of event it was * (because we have to pass this to the callback) */ res = 0; if (pevt->portev_events & (POLLERR|POLLHUP)) { res = EV_READ | EV_WRITE; } else { if (pevt->portev_events & POLLIN) res |= EV_READ; if (pevt->portev_events & POLLOUT) res |= EV_WRITE; } /* * Check for the error situations or a hangup situation */ if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL)) res |= EV_READ|EV_WRITE; evmap_io_active_(base, fd, res); } /* end of all events gotten */ epdp->ed_npending = nevents; if (nevents == epdp->ed_maxevents && epdp->ed_maxevents < MAX_EVENTS_PER_GETN) { /* we used all the space this time. We should be ready * for more events next time around. */ grow(epdp, epdp->ed_maxevents * 2); } check_evportop(epdp); return (0); }