Beispiel #1
0
void EventMachine_t::_AddNewDescriptors()
{
	/* Avoid adding descriptors to the main descriptor list
	 * while we're actually traversing the list.
	 * Any descriptors that are added as a result of processing timers
	 * or acceptors should go on a temporary queue and then added
	 * while we're not traversing the main list.
	 * Also, it (rarely) happens that a newly-created descriptor
	 * is immediately scheduled to close. It might be a good
	 * idea not to bother scheduling these for I/O but if
	 * we do that, we might bypass some important processing.
	 */

	for (size_t i = 0; i < NewDescriptors.size(); i++) {
		EventableDescriptor *ed = NewDescriptors[i];
		if (ed == NULL)
			throw std::runtime_error ("adding bad descriptor");

		#if HAVE_EPOLL
		if (bEpoll) {
			assert (epfd != -1);
			int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent());
			if (e) {
				char buf [200];
				snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno));
				throw std::runtime_error (buf);
			}
		}
		#endif

		#if HAVE_KQUEUE
		/*
		if (bKqueue) {
			// INCOMPLETE. Some descriptors don't want to be readable.
			assert (kqfd != -1);
			struct kevent k;
			EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
			int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
			assert (t == 0);
		}
		*/
		#endif

		Descriptors.push_back (ed);
	}
	NewDescriptors.clear();
}
Beispiel #2
0
bool EventMachine_t::_RunEpollOnce()
{
	#ifdef HAVE_EPOLL
	assert (epfd != -1);
	struct epoll_event ev [MaxEpollDescriptors];
	int s = epoll_wait (epfd, ev, MaxEpollDescriptors, 50);
	if (s > 0) {
		for (int i=0; i < s; i++) {
			EventableDescriptor *ed = (EventableDescriptor*) ev[i].data.ptr;

			if (ev[i].events & (EPOLLERR | EPOLLHUP))
				ed->ScheduleClose (false);
			if (ev[i].events & EPOLLIN)
				ed->Read();
			if (ev[i].events & EPOLLOUT) {
				ed->Write();
				epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
				// Ignoring return value
			}
		}
	}
	else if (s < 0) {
		// epoll_wait can fail on error in a handful of ways.
		// If this happens, then wait for a little while to avoid busy-looping.
		// If the error was EINTR, we probably caught SIGCHLD or something,
		// so keep the wait short.
		timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
		EmSelect (0, NULL, NULL, NULL, &tv);
	}

	{ // cleanup dying sockets
		// vector::pop_back works in constant time.
		// TODO, rip this out and only delete the descriptors we know have died,
		// rather than traversing the whole list.
		//  Modified 05Jan08 per suggestions by Chris Heath. It's possible that
		//  an EventableDescriptor will have a descriptor value of -1. That will
		//  happen if EventableDescriptor::Close was called on it. In that case,
		//  don't call epoll_ctl to remove the socket's filters from the epoll set.
		//  According to the epoll docs, this happens automatically when the
		//  descriptor is closed anyway. This is different from the case where
		//  the socket has already been closed but the descriptor in the ED object
		//  hasn't yet been set to INVALID_SOCKET.
		int i, j;
		int nSockets = Descriptors.size();
		for (i=0, j=0; i < nSockets; i++) {
			EventableDescriptor *ed = Descriptors[i];
			assert (ed);
			if (ed->ShouldDelete()) {
				if (ed->GetSocket() != INVALID_SOCKET) {
					assert (bEpoll); // wouldn't be in this method otherwise.
					assert (epfd != -1);
					int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
					// ENOENT or EBADF are not errors because the socket may be already closed when we get here.
					if (e && (errno != ENOENT) && (errno != EBADF)) {
						char buf [200];
						snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
						throw std::runtime_error (buf);
					}
				}

				ModifiedDescriptors.erase (ed);
				delete ed;
			}
			else
				Descriptors [j++] = ed;
		}
		while ((size_t)j < Descriptors.size())
			Descriptors.pop_back();

	}

	// TODO, heartbeats.
	// Added 14Sep07, its absence was noted by Brian Candler. But the comment was here, indicated
	// that this got thought about and not done when EPOLL was originally written. Was there a reason
	// not to do it, or was it an oversight? Certainly, running a heartbeat on 50,000 connections every
	// two seconds can get to be a real bear, especially if all we're doing is timing out dead ones.
	// Maybe there's a better way to do this. (Or maybe it's not that expensive after all.)
	//
	{ // dispatch heartbeats
		if (gCurrentLoopTime >= NextHeartbeatTime) {
			NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;

			for (int i=0; i < Descriptors.size(); i++) {
				EventableDescriptor *ed = Descriptors[i];
				assert (ed);
				ed->Heartbeat();
			}
		}
	}

	timeval tv = {0,0};
	EmSelect (0, NULL, NULL, NULL, &tv);

	return true;
	#else
	throw std::runtime_error ("epoll is not implemented on this platform");
	#endif
}