bool EventMachine_t::_RunKqueueOnce() { #ifdef HAVE_KQUEUE assert (kqfd != -1); const int maxKevents = 2000; struct kevent Karray [maxKevents]; struct timespec ts = {0, 10000000}; // Too frequent. Use blocking_region int k = kevent (kqfd, NULL, 0, Karray, maxKevents, &ts); struct kevent *ke = Karray; while (k > 0) { EventableDescriptor *ed = (EventableDescriptor*) (ke->udata); assert (ed); if (ke->filter == EVFILT_READ) ed->Read(); else if (ke->filter == EVFILT_WRITE) ed->Write(); else cerr << "Discarding unknown kqueue event " << ke->filter << endl; --k; ++ke; } { // cleanup dying sockets // vector::pop_back works in constant time. // TODO, rip this out and only delete the descriptors we know have died, // rather than traversing the whole list. // In kqueue, closing a descriptor automatically removes its event filters. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) { ModifiedDescriptors.erase (ed); delete ed; } else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (int i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } // TODO, replace this with rb_thread_blocking_region for 1.9 builds. timeval tv = {0,0}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #else throw std::runtime_error ("kqueue is not implemented on this platform"); #endif }
bool EventMachine_t::_RunSelectOnce() { // Crank the event machine once. // If there are no descriptors to process, then sleep // for a few hundred mills to avoid busy-looping. // Return T/F to indicate whether we should continue. // This is based on a select loop. Alternately provide epoll // if we know we're running on a 2.6 kernel. // epoll will be effective if we provide it as an alternative, // however it has the same problem interoperating with Ruby // threads that select does. //cerr << "X"; /* This protection is now obsolete, because we will ALWAYS * have at least one descriptor (the loop-breaker) to read. */ /* if (Descriptors.size() == 0) { #ifdef OS_UNIX timeval tv = {0, 200 * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #endif #ifdef OS_WIN32 Sleep (200); return true; #endif } */ SelectData_t SelectData; /* fd_set fdreads, fdwrites; FD_ZERO (&fdreads); FD_ZERO (&fdwrites); int maxsocket = 0; */ // Always read the loop-breaker reader. // Changed 23Aug06, provisionally implemented for Windows with a UDP socket // running on localhost with a randomly-chosen port. (*Puke*) // Windows has a version of the Unix pipe() library function, but it doesn't // give you back descriptors that are selectable. FD_SET (LoopBreakerReader, &(SelectData.fdreads)); if (SelectData.maxsocket < LoopBreakerReader) SelectData.maxsocket = LoopBreakerReader; // prepare the sockets for reading and writing size_t i; for (i = 0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); int sd = ed->GetSocket(); assert (sd != INVALID_SOCKET); if (ed->SelectForRead()) FD_SET (sd, &(SelectData.fdreads)); if (ed->SelectForWrite()) FD_SET (sd, &(SelectData.fdwrites)); if (SelectData.maxsocket < sd) SelectData.maxsocket = sd; } { // read and write the sockets //timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000. //timeval tv = Quantum; SelectData.tv = Quantum; int s = SelectData._Select(); //rb_thread_blocking_region(xxx,(void*)&SelectData,RB_UBF_DFL,0); //int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv)); //int s = SelectData.nSockets; if (s > 0) { /* Changed 01Jun07. We used to handle the Loop-breaker right here. * Now we do it AFTER all the regular descriptors. There's an * incredibly important and subtle reason for this. Code on * loop breakers is sometimes used to cause the reactor core to * cycle (for example, to allow outbound network buffers to drain). * If a loop-breaker handler reschedules itself (say, after determining * that the write buffers are still too full), then it will execute * IMMEDIATELY if _ReadLoopBreaker is done here instead of after * the other descriptors are processed. That defeats the whole purpose. */ for (i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); int sd = ed->GetSocket(); assert (sd != INVALID_SOCKET); if (FD_ISSET (sd, &(SelectData.fdwrites))) ed->Write(); if (FD_ISSET (sd, &(SelectData.fdreads))) ed->Read(); } if (FD_ISSET (LoopBreakerReader, &(SelectData.fdreads))) _ReadLoopBreaker(); } else if (s < 0) { // select can fail on error in a handful of ways. // If this happens, then wait for a little while to avoid busy-looping. // If the error was EINTR, we probably caught SIGCHLD or something, // so keep the wait short. timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); } } { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } { // cleanup dying sockets // vector::pop_back works in constant time. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) delete ed; else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } return true; }
bool EventMachine_t::_RunEpollOnce() { #ifdef HAVE_EPOLL assert (epfd != -1); struct epoll_event ev [MaxEpollDescriptors]; int s = epoll_wait (epfd, ev, MaxEpollDescriptors, 50); if (s > 0) { for (int i=0; i < s; i++) { EventableDescriptor *ed = (EventableDescriptor*) ev[i].data.ptr; if (ev[i].events & (EPOLLERR | EPOLLHUP)) ed->ScheduleClose (false); if (ev[i].events & EPOLLIN) ed->Read(); if (ev[i].events & EPOLLOUT) { ed->Write(); epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent()); // Ignoring return value } } } else if (s < 0) { // epoll_wait can fail on error in a handful of ways. // If this happens, then wait for a little while to avoid busy-looping. // If the error was EINTR, we probably caught SIGCHLD or something, // so keep the wait short. timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); } { // cleanup dying sockets // vector::pop_back works in constant time. // TODO, rip this out and only delete the descriptors we know have died, // rather than traversing the whole list. // Modified 05Jan08 per suggestions by Chris Heath. It's possible that // an EventableDescriptor will have a descriptor value of -1. That will // happen if EventableDescriptor::Close was called on it. In that case, // don't call epoll_ctl to remove the socket's filters from the epoll set. // According to the epoll docs, this happens automatically when the // descriptor is closed anyway. This is different from the case where // the socket has already been closed but the descriptor in the ED object // hasn't yet been set to INVALID_SOCKET. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) { if (ed->GetSocket() != INVALID_SOCKET) { assert (bEpoll); // wouldn't be in this method otherwise. assert (epfd != -1); int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent()); // ENOENT or EBADF are not errors because the socket may be already closed when we get here. if (e && (errno != ENOENT) && (errno != EBADF)) { char buf [200]; snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno)); throw std::runtime_error (buf); } } ModifiedDescriptors.erase (ed); delete ed; } else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } // TODO, heartbeats. // Added 14Sep07, its absence was noted by Brian Candler. But the comment was here, indicated // that this got thought about and not done when EPOLL was originally written. Was there a reason // not to do it, or was it an oversight? Certainly, running a heartbeat on 50,000 connections every // two seconds can get to be a real bear, especially if all we're doing is timing out dead ones. // Maybe there's a better way to do this. (Or maybe it's not that expensive after all.) // { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (int i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } timeval tv = {0,0}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #else throw std::runtime_error ("epoll is not implemented on this platform"); #endif }