Exemple #1
0
static int
devpoll_dispatch(struct event_base *base, struct timeval *tv)
{
	struct devpollop *devpollop = base->evbase;
	struct pollfd *events = devpollop->events;
	struct dvpoll dvp;
	int i, res, timeout = -1;

	if (devpollop->nchanges)
		devpoll_commit(devpollop);

	if (tv != NULL)
		timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;

	dvp.dp_fds = devpollop->events;
	dvp.dp_nfds = devpollop->nevents;
	dvp.dp_timeout = timeout;

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = ioctl(devpollop->dpfd, DP_POLL, &dvp);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("ioctl: DP_POLL");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: devpoll_wait reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;
		int what = events[i].revents;

		if (what & POLLHUP)
			what |= POLLIN | POLLOUT;
		else if (what & POLLERR)
			what |= POLLIN | POLLOUT;

		if (what & POLLIN)
			which |= EV_READ;
		if (what & POLLOUT)
			which |= EV_WRITE;

		if (!which)
			continue;

		/* XXX(niels): not sure if this works for devpoll */
		evmap_io_active_(base, events[i].fd, which);
	}

	return (0);
}
Exemple #2
0
static int
kq_dispatch(struct event_base *base, struct timeval *tv)
{
    struct kqop *kqop = base->evbase;
    struct kevent *events = kqop->events;
    struct kevent *changes;
    struct timespec ts, *ts_p = NULL;
    int i, n_changes, res;

    if (tv != NULL) {
        TIMEVAL_TO_TIMESPEC(tv, &ts);
        ts_p = &ts;
    }

    /* Build "changes" from "base->changes" */
    EVUTIL_ASSERT(kqop->changes);
    n_changes = kq_build_changes_list(&base->changelist, kqop);
    if (n_changes < 0)
        return -1;

    event_changelist_remove_all_(&base->changelist, base);

    /* steal the changes array in case some broken code tries to call
     * dispatch twice at once. */
    changes = kqop->changes;
    kqop->changes = NULL;

    /* Make sure that 'events' is at least as long as the list of changes:
     * otherwise errors in the changes can get reported as a -1 return
     * value from kevent() rather than as EV_ERROR events in the events
     * array.
     *
     * (We could instead handle -1 return values from kevent() by
     * retrying with a smaller changes array or a larger events array,
     * but this approach seems less risky for now.)
     */
    if (kqop->events_size < n_changes) {
        int new_size = kqop->events_size;
        do {
            new_size *= 2;
        } while (new_size < n_changes);

        kq_grow_events(kqop, new_size);
        events = kqop->events;
    }

    EVBASE_RELEASE_LOCK(base, th_base_lock);

    res = kevent(kqop->kq, changes, n_changes,
                 events, kqop->events_size, ts_p);

    EVBASE_ACQUIRE_LOCK(base, th_base_lock);

    EVUTIL_ASSERT(kqop->changes == NULL);
    kqop->changes = changes;

    if (res == -1) {
        if (errno != EINTR) {
            event_warn("kevent");
            return (-1);
        }

        return (0);
    }

    event_debug(("%s: kevent reports %d", __func__, res));

    for (i = 0; i < res; i++) {
        int which = 0;

        if (events[i].flags & EV_ERROR) {
            switch (events[i].data) {

            /* Can occur on delete if we are not currently
             * watching any events on this fd.  That can
             * happen when the fd was closed and another
             * file was opened with that fd. */
            case ENOENT:
            /* Can occur for reasons not fully understood
             * on FreeBSD. */
            case EINVAL:
                continue;

            /* Can occur on a delete if the fd is closed. */
            case EBADF:
                /* XXXX On NetBSD, we can also get EBADF if we
                 * try to add the write side of a pipe, but
                 * the read side has already been closed.
                 * Other BSDs call this situation 'EPIPE'. It
                 * would be good if we had a way to report
                 * this situation. */
                continue;
            /* These two can occur on an add if the fd was one side
             * of a pipe, and the other side was closed. */
            case EPERM:
            case EPIPE:
                /* Report read events, if we're listening for
                 * them, so that the user can learn about any
                 * add errors.  (If the operation was a
                 * delete, then udata should be cleared.) */
                if (events[i].udata) {
                    /* The operation was an add:
                     * report the error as a read. */
                    which |= EV_READ;
                    break;
                } else {
                    /* The operation was a del:
                     * report nothing. */
                    continue;
                }

            /* Other errors shouldn't occur. */
            default:
                errno = events[i].data;
                return (-1);
            }
        } else if (events[i].filter == EVFILT_READ) {
            which |= EV_READ;
        } else if (events[i].filter == EVFILT_WRITE) {
            which |= EV_WRITE;
        } else if (events[i].filter == EVFILT_SIGNAL) {
            which |= EV_SIGNAL;
#ifdef EVFILT_USER
        } else if (events[i].filter == EVFILT_USER) {
            base->is_notify_pending = 0;
#endif
        }

        if (!which)
            continue;

        if (events[i].filter == EVFILT_SIGNAL) {
            evmap_signal_active_(base, events[i].ident, 1);
        } else {
            evmap_io_active_(base, events[i].ident, which | EV_ET);
        }
    }

    if (res == kqop->events_size) {
        /* We used all the events space that we have. Maybe we should
           make it bigger. */
        kq_grow_events(kqop, kqop->events_size * 2);
    }

    return (0);
}
int
win32_dispatch(struct event_base *base, struct timeval *tv)
{
	struct win32op *win32op = base->evbase;
	int res = 0;
	unsigned j, i;
	int fd_count;
	SOCKET s;

	if (win32op->resize_out_sets) {
		size_t size = FD_SET_ALLOC_SIZE(win32op->num_fds_in_fd_sets);
		if (!(win32op->readset_out = mm_realloc(win32op->readset_out, size)))
			return (-1);
		if (!(win32op->exset_out = mm_realloc(win32op->exset_out, size)))
			return (-1);
		if (!(win32op->writeset_out = mm_realloc(win32op->writeset_out, size)))
			return (-1);
		win32op->resize_out_sets = 0;
	}

	fd_set_copy(win32op->readset_out, win32op->readset_in);
	fd_set_copy(win32op->exset_out, win32op->writeset_in);
	fd_set_copy(win32op->writeset_out, win32op->writeset_in);

	fd_count =
	    (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
	    win32op->readset_out->fd_count : win32op->writeset_out->fd_count;

	if (!fd_count) {
		long msec = tv ? evutil_tv_to_msec_(tv) : LONG_MAX;
		/* Sleep's DWORD argument is unsigned long */
		if (msec < 0)
			msec = LONG_MAX;
		/* Windows doesn't like you to call select() with no sockets */
		Sleep(msec);
		return (0);
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = select(fd_count,
		     (struct fd_set*)win32op->readset_out,
		     (struct fd_set*)win32op->writeset_out,
		     (struct fd_set*)win32op->exset_out, tv);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	event_debug(("%s: select returned %d", __func__, res));

	if (res <= 0) {
		return res;
	}

	if (win32op->readset_out->fd_count) {
		i = evutil_weakrand_range_(&base->weakrand_seed,
		    win32op->readset_out->fd_count);
		for (j=0; j<win32op->readset_out->fd_count; ++j) {
			if (++i >= win32op->readset_out->fd_count)
				i = 0;
			s = win32op->readset_out->fd_array[i];
			evmap_io_active_(base, s, EV_READ);
		}
	}
	if (win32op->exset_out->fd_count) {
		i = evutil_weakrand_range_(&base->weakrand_seed,
		    win32op->exset_out->fd_count);
		for (j=0; j<win32op->exset_out->fd_count; ++j) {
			if (++i >= win32op->exset_out->fd_count)
				i = 0;
			s = win32op->exset_out->fd_array[i];
			evmap_io_active_(base, s, EV_WRITE);
		}
	}
	if (win32op->writeset_out->fd_count) {
		SOCKET s;
		i = evutil_weakrand_range_(&base->weakrand_seed,
		    win32op->writeset_out->fd_count);
		for (j=0; j<win32op->writeset_out->fd_count; ++j) {
			if (++i >= win32op->writeset_out->fd_count)
				i = 0;
			s = win32op->writeset_out->fd_array[i];
			evmap_io_active_(base, s, EV_WRITE);
		}
	}
	return (0);
}
Exemple #4
0
static int
epoll_dispatch(struct event_base *base, struct timeval *tv)
{
	struct epollop *epollop = base->evbase;
	struct epoll_event *events = epollop->events;
	int i, res;
	long timeout = -1;

#ifdef USING_TIMERFD
	if (epollop->timerfd >= 0) {
		struct itimerspec is;
		is.it_interval.tv_sec = 0;
		is.it_interval.tv_nsec = 0;
		if (tv == NULL) {
			/* No timeout; disarm the timer. */
			is.it_value.tv_sec = 0;
			is.it_value.tv_nsec = 0;
		} else {
			if (tv->tv_sec == 0 && tv->tv_usec == 0) {
				/* we need to exit immediately; timerfd can't
				 * do that. */
				timeout = 0;
			}
			is.it_value.tv_sec = tv->tv_sec;
			is.it_value.tv_nsec = tv->tv_usec * 1000;
		}
		/* TODO: we could avoid unnecessary syscalls here by only
		   calling timerfd_settime when the top timeout changes, or
		   when we're called with a different timeval.
		*/
		if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) {
			event_warn("timerfd_settime");
		}
	} else
#endif
	if (tv != NULL) {
		timeout = evutil_tv_to_msec_(tv);
		if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
			/* Linux kernels can wait forever if the timeout is
			 * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
			timeout = MAX_EPOLL_TIMEOUT_MSEC;
		}
	}

	epoll_apply_changes(base);
	event_changelist_remove_all_(&base->changelist, base);

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("epoll_wait");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: epoll_wait reports %d", __func__, res));
	EVUTIL_ASSERT(res <= epollop->nevents);

	for (i = 0; i < res; i++) {
		int what = events[i].events;
		short ev = 0;
#ifdef USING_TIMERFD
		if (events[i].data.fd == epollop->timerfd)
			continue;
#endif

		if (what & (EPOLLHUP|EPOLLERR)) {
			ev = EV_READ | EV_WRITE;
		} else {
			if (what & EPOLLIN)
				ev |= EV_READ;
			if (what & EPOLLOUT)
				ev |= EV_WRITE;
			if (what & EPOLLRDHUP)
				ev |= EV_CLOSED;
		}

		if (!ev)
			continue;

		evmap_io_active_(base, events[i].data.fd, ev | EV_ET);
	}

	if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
		/* We used all of the event space this time.  We should
		   be ready for more events next time. */
		int new_nevents = epollop->nevents * 2;
		struct epoll_event *new_events;

		new_events = mm_realloc(epollop->events,
		    new_nevents * sizeof(struct epoll_event));
		if (new_events) {
			epollop->events = new_events;
			epollop->nevents = new_nevents;
		}
	}

	return (0);
}
Exemple #5
0
static int
evport_dispatch(struct event_base *base, struct timeval *tv)
{
	int i, res;
	struct evport_data *epdp = base->evbase;
	port_event_t *pevtlist = epdp->ed_pevtlist;

	/*
	 * port_getn will block until it has at least nevents events. It will
	 * also return how many it's given us (which may be more than we asked
	 * for, as long as it's less than our maximum (ed_maxevents)) in
	 * nevents.
	 */
	int nevents = 1;

	/*
	 * We have to convert a struct timeval to a struct timespec
	 * (only difference is nanoseconds vs. microseconds). If no time-based
	 * events are active, we should wait for I/O (and tv == NULL).
	 */
	struct timespec ts;
	struct timespec *ts_p = NULL;
	if (tv != NULL) {
		ts.tv_sec = tv->tv_sec;
		ts.tv_nsec = tv->tv_usec * 1000;
		ts_p = &ts;
	}

	/*
	 * Before doing anything else, we need to reassociate the events we hit
	 * last time which need reassociation. See comment at the end of the
	 * loop below.
	 */
	for (i = 0; i < epdp->ed_npending; ++i) {
		struct fd_info *fdi = NULL;
		const int fd = epdp->ed_pending[i];
		if (fd != -1) {
			/* We might have cleared out this event; we need
			 * to be sure that it's still set. */
			fdi = evmap_io_get_fdinfo_(&base->io, fd);
		}

		if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
			reassociate(epdp, fdi, fd);
			/* epdp->ed_pending[i] = -1; */
			fdi->pending_idx_plus_1 = 0;
		}
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = port_getn(epdp->ed_port, pevtlist, epdp->ed_maxevents,
	    (unsigned int *) &nevents, ts_p);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno == EINTR || errno == EAGAIN) {
			return (0);
		} else if (errno == ETIME) {
			if (nevents == 0)
				return (0);
		} else {
			event_warn("port_getn");
			return (-1);
		}
	}

	event_debug(("%s: port_getn reports %d events", __func__, nevents));

	for (i = 0; i < nevents; ++i) {
		port_event_t *pevt = &pevtlist[i];
		int fd = (int) pevt->portev_object;
		struct fd_info *fdi = pevt->portev_user;
		/*EVUTIL_ASSERT(evmap_io_get_fdinfo_(&base->io, fd) == fdi);*/

		check_evportop(epdp);
		check_event(pevt);
		epdp->ed_pending[i] = fd;
		fdi->pending_idx_plus_1 = i + 1;

		/*
		 * Figure out what kind of event it was
		 * (because we have to pass this to the callback)
		 */
		res = 0;
		if (pevt->portev_events & (POLLERR|POLLHUP)) {
			res = EV_READ | EV_WRITE;
		} else {
			if (pevt->portev_events & POLLIN)
				res |= EV_READ;
			if (pevt->portev_events & POLLOUT)
				res |= EV_WRITE;
		}

		/*
		 * Check for the error situations or a hangup situation
		 */
		if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
			res |= EV_READ|EV_WRITE;

		evmap_io_active_(base, fd, res);
	} /* end of all events gotten */
	epdp->ed_npending = nevents;

	if (nevents == epdp->ed_maxevents &&
	    epdp->ed_maxevents < MAX_EVENTS_PER_GETN) {
		/* we used all the space this time.  We should be ready
		 * for more events next time around. */
		grow(epdp, epdp->ed_maxevents * 2);
	}

	check_evportop(epdp);

	return (0);
}
Exemple #6
0
static int
poll_dispatch(struct event_base *base, struct timeval *tv)
{
	int res, i, j, nfds;
	long msec = -1;
	struct pollop *pop = base->evbase;
	struct pollfd *event_set;

	poll_check_ok(pop);

	nfds = pop->nfds;

#ifndef EVENT__DISABLE_THREAD_SUPPORT
	if (base->th_base_lock) {
		/* If we're using this backend in a multithreaded setting,
		 * then we need to work on a copy of event_set, so that we can
		 * let other threads modify the main event_set while we're
		 * polling. If we're not multithreaded, then we'll skip the
		 * copy step here to save memory and time. */
		if (pop->realloc_copy) {
			struct pollfd *tmp = mm_realloc(pop->event_set_copy,
			    pop->event_count * sizeof(struct pollfd));
			if (tmp == NULL) {
				event_warn("realloc");
				return -1;
			}
			pop->event_set_copy = tmp;
			pop->realloc_copy = 0;
		}
		memcpy(pop->event_set_copy, pop->event_set,
		    sizeof(struct pollfd)*nfds);
		event_set = pop->event_set_copy;
	} else {
		event_set = pop->event_set;
	}
#else
	event_set = pop->event_set;
#endif

	if (tv != NULL) {
		msec = evutil_tv_to_msec_(tv);
		if (msec < 0 || msec > INT_MAX)
			msec = INT_MAX;
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = poll(event_set, nfds, msec);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("poll");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: poll reports %d", __func__, res));

	if (res == 0 || nfds == 0)
		return (0);

	i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
	for (j = 0; j < nfds; j++) {
		int what;
		if (++i == nfds)
			i = 0;
		what = event_set[i].revents;
		if (!what)
			continue;

		res = 0;

		/* If the file gets closed notify */
		if (what & (POLLHUP|POLLERR|POLLNVAL))
			what |= POLLIN|POLLOUT;
		if (what & POLLIN)
			res |= EV_READ;
		if (what & POLLOUT)
			res |= EV_WRITE;
		if (res == 0)
			continue;

		evmap_io_active_(base, event_set[i].fd, res);
	}

	return (0);
}
Exemple #7
0
static int
select_dispatch(struct event_base *base, struct timeval *tv)
{
	int res=0, i, j, nfds;
	struct selectop *sop = base->evbase;

	check_selectop(sop);
	if (sop->resize_out_sets) {
		fd_set *readset_out=NULL, *writeset_out=NULL;
		size_t sz = sop->event_fdsz;
		if (!(readset_out = mm_realloc(sop->event_readset_out, sz)))
			return (-1);
		sop->event_readset_out = readset_out;
		if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) {
			/* We don't free readset_out here, since it was
			 * already successfully reallocated. The next time
			 * we call select_dispatch, the realloc will be a
			 * no-op. */
			return (-1);
		}
		sop->event_writeset_out = writeset_out;
		sop->resize_out_sets = 0;
	}

	memcpy(sop->event_readset_out, sop->event_readset_in,
	       sop->event_fdsz);
	memcpy(sop->event_writeset_out, sop->event_writeset_in,
	       sop->event_fdsz);

	nfds = sop->event_fds+1;

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = select(nfds, sop->event_readset_out,
	    sop->event_writeset_out, NULL, tv);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	check_selectop(sop);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("select");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: select reports %d", __func__, res));

	check_selectop(sop);
	i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
	for (j = 0; j < nfds; ++j) {
		if (++i >= nfds)
			i = 0;
		res = 0;
		if (FD_ISSET(i, sop->event_readset_out))
			res |= EV_READ;
		if (FD_ISSET(i, sop->event_writeset_out))
			res |= EV_WRITE;

		if (res == 0)
			continue;

		evmap_io_active_(base, i, res);
	}
	check_selectop(sop);

	return (0);
}