Beispiel #1
0
static int
epoll_dispatch(struct event_base *base, struct timeval *tv)
{
	struct epollop *epollop = base->evbase;
	struct epoll_event *events = epollop->events;
	int i, res;
	long timeout = -1;

	if (tv != NULL) {
		timeout = evutil_tv_to_msec(tv);
		if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
			/* Linux kernels can wait forever if the timeout is
			 * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
			timeout = MAX_EPOLL_TIMEOUT_MSEC;
		}
	}

	epoll_apply_changes(base);
	event_changelist_remove_all(&base->changelist, base);

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("epoll_wait");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: epoll_wait reports %d", __func__, res));
	EVUTIL_ASSERT(res <= epollop->nevents);

	for (i = 0; i < res; i++) {
		int what = events[i].events;
		short ev = 0;

		if (what & (EPOLLHUP|EPOLLERR)) {
			ev = EV_READ | EV_WRITE;
		} else {
			if (what & EPOLLIN)
				ev |= EV_READ;
			if (what & EPOLLOUT)
				ev |= EV_WRITE;
		}

		if (!ev)
			continue;

		evmap_io_active(base, events[i].data.fd, ev | EV_ET);
	}

	if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
		/* We used all of the event space this time.  We should
		   be ready for more events next time. */
		int new_nevents = epollop->nevents * 2;
		struct epoll_event *new_events;

		new_events = mm_realloc(epollop->events,
		    new_nevents * sizeof(struct epoll_event));
		if (new_events) {
			epollop->events = new_events;
			epollop->nevents = new_nevents;
		}
	}

	return (0);
}
Beispiel #2
0
static int
kq_dispatch(struct event_base *base, struct timeval *tv)
{
	struct kqop *kqop = base->evbase;
	struct kevent *events = kqop->events;
	struct kevent *changes;
	struct timespec ts, *ts_p = NULL;
	int i, n_changes, res;

	if (tv != NULL) {
		TIMEVAL_TO_TIMESPEC(tv, &ts);
		ts_p = &ts;
	}

	/* Build "changes" from "base->changes" */
	EVUTIL_ASSERT(kqop->changes);
	n_changes = kq_build_changes_list(&base->changelist, kqop);
	if (n_changes < 0)
		return -1;

	event_changelist_remove_all(&base->changelist, base);

	/* steal the changes array in case some broken code tries to call
	 * dispatch twice at once. */
	changes = kqop->changes;
	kqop->changes = NULL;

	/* Make sure that 'events' is at least as long as the list of changes:
	 * otherwise errors in the changes can get reported as a -1 return
	 * value from kevent() rather than as EV_ERROR events in the events
	 * array.
	 *
	 * (We could instead handle -1 return values from kevent() by
	 * retrying with a smaller changes array or a larger events array,
	 * but this approach seems less risky for now.)
	 */
	if (kqop->events_size < n_changes) {
		int new_size = kqop->events_size;
		do {
			new_size *= 2;
		} while (new_size < n_changes);

		kq_grow_events(kqop, new_size);
		events = kqop->events;
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = kevent(kqop->kq, changes, n_changes,
	    events, kqop->events_size, ts_p);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	EVUTIL_ASSERT(kqop->changes == NULL);
	kqop->changes = changes;

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("kevent");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: kevent reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;

		if (events[i].flags & EV_ERROR) {
			switch (events[i].data) {

			/* Can occur on delete if we are not currently
			 * watching any events on this fd.  That can
			 * happen when the fd was closed and another
			 * file was opened with that fd. */
			case ENOENT:
			/* Can occur for reasons not fully understood
			 * on FreeBSD. */
			case EINVAL:
				continue;

			/* Can occur on a delete if the fd is closed. */
			case EBADF:
				/* XXXX On NetBSD, we can also get EBADF if we
				 * try to add the write side of a pipe, but
				 * the read side has already been closed.
				 * Other BSDs call this situation 'EPIPE'. It
				 * would be good if we had a way to report
				 * this situation. */
				continue;
			/* These two can occur on an add if the fd was one side
			 * of a pipe, and the other side was closed. */
			case EPERM:
			case EPIPE:
				/* Report read events, if we're listening for
				 * them, so that the user can learn about any
				 * add errors.  (If the operation was a
				 * delete, then udata should be cleared.) */
				if (events[i].udata) {
					/* The operation was an add:
					 * report the error as a read. */
					which |= EV_READ;
					break;
				} else {
					/* The operation was a del:
					 * report nothing. */
					continue;
				}

			/* Other errors shouldn't occur. */
			default:
				errno = events[i].data;
				return (-1);
			}
		} else if (events[i].filter == EVFILT_READ) {
			which |= EV_READ;
		} else if (events[i].filter == EVFILT_WRITE) {
			which |= EV_WRITE;
		} else if (events[i].filter == EVFILT_SIGNAL) {
			which |= EV_SIGNAL;
		}

		if (!which)
			continue;

		if (events[i].filter == EVFILT_SIGNAL) {
			evmap_signal_active(base, events[i].ident, 1);
		} else {
			evmap_io_active(base, events[i].ident, which | EV_ET);
		}
	}

	if (res == kqop->events_size) {
		/* We used all the events space that we have. Maybe we should
		   make it bigger. */
		kq_grow_events(kqop, kqop->events_size * 2);
	}

	return (0);
}
Beispiel #3
0
static int
kq_dispatch(struct event_base *base, struct timeval *tv)
{
	struct kqop *kqop = base->evbase;
	struct kevent *events = kqop->events;
	struct kevent *changes;
	struct timespec ts, *ts_p = NULL;
	int i, n_changes, res;

	if (tv != NULL) {
		TIMEVAL_TO_TIMESPEC(tv, &ts);
		ts_p = &ts;
	}

	/* Build "changes" from "base->changes" */
	EVUTIL_ASSERT(kqop->changes);
	n_changes = kq_build_changes_list(&base->changelist, kqop);
	if (n_changes < 0)
		return -1;

	event_changelist_remove_all(&base->changelist, base);

	/* steal the changes array in case some broken code tries to call
	 * dispatch twice at once. */
	changes = kqop->changes;
	kqop->changes = NULL;

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = kevent(kqop->kq, changes, n_changes,
	    events, kqop->events_size, ts_p);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	EVUTIL_ASSERT(kqop->changes == NULL);
	kqop->changes = changes;

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("kevent");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: kevent reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;

		if (events[i].flags & EV_ERROR) {
			/*
			 * Error messages that can happen, when a delete fails.
			 *   EBADF happens when the file descriptor has been
			 *   closed,
			 *   ENOENT when the file descriptor was closed and
			 *   then reopened.
			 *   EINVAL for some reasons not understood; EINVAL
			 *   should not be returned ever; but FreeBSD does :-\
			 * An error is also indicated when a callback deletes
			 * an event we are still processing.  In that case
			 * the data field is set to ENOENT.
			 */
			if (events[i].data == EBADF ||
			    events[i].data == EINVAL ||
			    events[i].data == ENOENT)
				continue;
			errno = events[i].data;
			return (-1);
		}

		if (events[i].filter == EVFILT_READ) {
			which |= EV_READ;
		} else if (events[i].filter == EVFILT_WRITE) {
			which |= EV_WRITE;
		} else if (events[i].filter == EVFILT_SIGNAL) {
			which |= EV_SIGNAL;
		}

		if (!which)
			continue;

		if (events[i].filter == EVFILT_SIGNAL) {
			evmap_signal_active(base, events[i].ident, 1);
		} else {
			evmap_io_active(base, events[i].ident, which | EV_ET);
		}
	}

	if (res == kqop->events_size) {
		struct kevent *newresult;
		int size = kqop->events_size;
		/* We used all the events space that we have. Maybe we should
		   make it bigger. */
		size *= 2;
		newresult = mm_realloc(kqop->events,
		    size * sizeof(struct kevent));
		if (newresult) {
			kqop->events = newresult;
			kqop->events_size = size;
		}
	}

	return (0);
}