extern "C" int evma_get_outbound_data_size (const char *binding) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); return ed ? ed->GetOutboundDataSize() : 0; }
void ConnectionDescriptor::CloseConnection (const char *binding, bool after_writing) { // TODO: This is something of a hack, or at least it's a static method of the wrong class. EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->ScheduleClose (after_writing); }
extern "C" void evma_stop_proxy (const unsigned long from) { ensure_eventmachine("evma_stop_proxy"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (from)); if (ed) ed->StopProxy(); }
extern "C" void evma_start_tls (const unsigned long binding) { ensure_eventmachine("evma_start_tls"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->StartTls(); }
extern "C" void evma_start_proxy (const unsigned long from, const unsigned long to, const unsigned long bufsize) { ensure_eventmachine("evma_start_proxy"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (from)); if (ed) ed->StartProxy(to, bufsize); }
extern "C" void evma_set_tls_parms (const unsigned long binding, const char *privatekey_filename, const char *certchain_filename, int verify_peer) { ensure_eventmachine("evma_set_tls_parms"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->SetTlsParms (privatekey_filename, certchain_filename, (verify_peer == 1 ? true : false)); }
extern "C" void evma_close_connection (const unsigned long binding, int after_writing) { ensure_eventmachine("evma_close_connection"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->ScheduleClose (after_writing ? true : false); }
extern "C" int evma_is_paused (const unsigned long binding) { EventableDescriptor *cd = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (cd) return cd->IsPaused() ? 1 : 0; return 0; }
extern "C" void evma_set_tls_parms (const char *binding, const char *privatekey_filename, const char *certchain_filename) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->SetTlsParms (privatekey_filename, certchain_filename); }
extern "C" X509 *evma_get_peer_cert (const unsigned long binding) { ensure_eventmachine("evma_get_peer_cert"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) return ed->GetPeerCert(); return NULL; }
extern "C" int evma_report_connection_error_status (const unsigned long binding) { ensure_eventmachine("evma_report_connection_error_status"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) return ed->ReportErrorStatus(); return -1; }
extern "C" int evma_send_data_to_connection (const unsigned long binding, const char *data, int data_length) { ensure_eventmachine("evma_send_data_to_connection"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) return ed->SendOutboundData(data, data_length); return -1; }
extern "C" void evma_start_tls (const char *binding) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) ed->StartTls(); }
extern "C" uint64_t evma_get_last_activity_time(const unsigned long from) { ensure_eventmachine("evma_get_last_activity_time"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (from)); if (ed) return ed->GetLastActivity(); else return 0; }
extern "C" unsigned long evma_proxied_bytes (const unsigned long from) { ensure_eventmachine("evma_proxied_bytes"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (from)); if (ed) return ed->GetProxiedBytes(); else return 0; }
extern "C" int evma_set_pending_connect_timeout (const unsigned long binding, float value) { ensure_eventmachine("evma_set_pending_connect_timeout"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->SetPendingConnectTimeout ((uint64_t)(value * 1000)); } else return 0; }
extern "C" float evma_get_comm_inactivity_timeout (const unsigned long binding) { ensure_eventmachine("evma_get_comm_inactivity_timeout"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ((float)ed->GetCommInactivityTimeout() / 1000); } else return 0.0; //Perhaps this should be an exception. Access to an unknown binding. }
extern "C" int evma_get_sockname (const unsigned long binding, struct sockaddr *sa) { ensure_eventmachine("evma_get_sockname"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->GetSockname (sa) ? 1 : 0; } else return 0; }
extern "C" int evma_set_comm_inactivity_timeout (const unsigned long binding, float value) { ensure_eventmachine("evma_set_comm_inactivity_timeout"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->SetCommInactivityTimeout ((uint64_t)(value * 1000)); } else return 0; //Perhaps this should be an exception. Access to an unknown binding. }
extern "C" float evma_get_pending_connect_timeout (const unsigned long binding) { ensure_eventmachine("evma_get_pending_connect_timeout"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ((float)ed->GetPendingConnectTimeout() / 1000); } else return 0.0; }
extern "C" int evma_set_comm_inactivity_timeout (const char *binding, int *value) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->SetCommInactivityTimeout (value); } else return 0; //Perhaps this should be an exception. Access to an unknown binding. }
extern "C" int evma_get_sockname (const char *binding, struct sockaddr *sa) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->GetSockname (sa) ? 1 : 0; } else return 0; }
extern "C" int evma_get_subprocess_pid (const char *binding, pid_t *pid) { if (!EventMachine) throw std::runtime_error ("not initialized"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) { return ed->GetSubprocessPid (pid) ? 1 : 0; } else return 0; }
void EventableDescriptor::StartProxy(const unsigned long to, const unsigned long bufsize, const unsigned long length) { EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (to)); if (ed) { StopProxy(); ProxyTarget = ed; BytesToProxy = length; ed->SetProxiedFrom(this, bufsize); return; } throw std::runtime_error ("Tried to proxy to an invalid descriptor"); }
extern "C" int evma_get_file_descriptor (const unsigned long binding) { ensure_eventmachine("evma_get_file_descriptor"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); if (ed) return ed->GetSocket(); else #ifdef BUILD_FOR_RUBY rb_raise(rb_eRuntimeError, "invalid binding to get_fd"); #else throw std::runtime_error ("invalid binding to get_fd"); #endif }
void EventMachine_t::_AddNewDescriptors() { /* Avoid adding descriptors to the main descriptor list * while we're actually traversing the list. * Any descriptors that are added as a result of processing timers * or acceptors should go on a temporary queue and then added * while we're not traversing the main list. * Also, it (rarely) happens that a newly-created descriptor * is immediately scheduled to close. It might be a good * idea not to bother scheduling these for I/O but if * we do that, we might bypass some important processing. */ for (size_t i = 0; i < NewDescriptors.size(); i++) { EventableDescriptor *ed = NewDescriptors[i]; if (ed == NULL) throw std::runtime_error ("adding bad descriptor"); #if HAVE_EPOLL if (bEpoll) { assert (epfd != -1); int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent()); if (e) { char buf [200]; snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno)); throw std::runtime_error (buf); } } #endif #if HAVE_KQUEUE /* if (bKqueue) { // INCOMPLETE. Some descriptors don't want to be readable. assert (kqfd != -1); struct kevent k; EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed); int t = kevent (kqfd, &k, 1, NULL, 0, NULL); assert (t == 0); } */ #endif Descriptors.push_back (ed); } NewDescriptors.clear(); }
extern "C" int evma_get_outbound_data_size (const unsigned long binding) { ensure_eventmachine("evma_get_outbound_data_size"); EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (binding)); return ed ? ed->GetOutboundDataSize() : 0; }
bool EventMachine_t::_RunSelectOnce() { // Crank the event machine once. // If there are no descriptors to process, then sleep // for a few hundred mills to avoid busy-looping. // Return T/F to indicate whether we should continue. // This is based on a select loop. Alternately provide epoll // if we know we're running on a 2.6 kernel. // epoll will be effective if we provide it as an alternative, // however it has the same problem interoperating with Ruby // threads that select does. //cerr << "X"; /* This protection is now obsolete, because we will ALWAYS * have at least one descriptor (the loop-breaker) to read. */ /* if (Descriptors.size() == 0) { #ifdef OS_UNIX timeval tv = {0, 200 * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #endif #ifdef OS_WIN32 Sleep (200); return true; #endif } */ SelectData_t SelectData; /* fd_set fdreads, fdwrites; FD_ZERO (&fdreads); FD_ZERO (&fdwrites); int maxsocket = 0; */ // Always read the loop-breaker reader. // Changed 23Aug06, provisionally implemented for Windows with a UDP socket // running on localhost with a randomly-chosen port. (*Puke*) // Windows has a version of the Unix pipe() library function, but it doesn't // give you back descriptors that are selectable. FD_SET (LoopBreakerReader, &(SelectData.fdreads)); if (SelectData.maxsocket < LoopBreakerReader) SelectData.maxsocket = LoopBreakerReader; // prepare the sockets for reading and writing size_t i; for (i = 0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); int sd = ed->GetSocket(); assert (sd != INVALID_SOCKET); if (ed->SelectForRead()) FD_SET (sd, &(SelectData.fdreads)); if (ed->SelectForWrite()) FD_SET (sd, &(SelectData.fdwrites)); if (SelectData.maxsocket < sd) SelectData.maxsocket = sd; } { // read and write the sockets //timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000. //timeval tv = Quantum; SelectData.tv = Quantum; int s = SelectData._Select(); //rb_thread_blocking_region(xxx,(void*)&SelectData,RB_UBF_DFL,0); //int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv)); //int s = SelectData.nSockets; if (s > 0) { /* Changed 01Jun07. We used to handle the Loop-breaker right here. * Now we do it AFTER all the regular descriptors. There's an * incredibly important and subtle reason for this. Code on * loop breakers is sometimes used to cause the reactor core to * cycle (for example, to allow outbound network buffers to drain). * If a loop-breaker handler reschedules itself (say, after determining * that the write buffers are still too full), then it will execute * IMMEDIATELY if _ReadLoopBreaker is done here instead of after * the other descriptors are processed. That defeats the whole purpose. */ for (i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); int sd = ed->GetSocket(); assert (sd != INVALID_SOCKET); if (FD_ISSET (sd, &(SelectData.fdwrites))) ed->Write(); if (FD_ISSET (sd, &(SelectData.fdreads))) ed->Read(); } if (FD_ISSET (LoopBreakerReader, &(SelectData.fdreads))) _ReadLoopBreaker(); } else if (s < 0) { // select can fail on error in a handful of ways. // If this happens, then wait for a little while to avoid busy-looping. // If the error was EINTR, we probably caught SIGCHLD or something, // so keep the wait short. timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); } } { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } { // cleanup dying sockets // vector::pop_back works in constant time. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) delete ed; else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } return true; }
bool EventMachine_t::_RunKqueueOnce() { #ifdef HAVE_KQUEUE assert (kqfd != -1); const int maxKevents = 2000; struct kevent Karray [maxKevents]; struct timespec ts = {0, 10000000}; // Too frequent. Use blocking_region int k = kevent (kqfd, NULL, 0, Karray, maxKevents, &ts); struct kevent *ke = Karray; while (k > 0) { EventableDescriptor *ed = (EventableDescriptor*) (ke->udata); assert (ed); if (ke->filter == EVFILT_READ) ed->Read(); else if (ke->filter == EVFILT_WRITE) ed->Write(); else cerr << "Discarding unknown kqueue event " << ke->filter << endl; --k; ++ke; } { // cleanup dying sockets // vector::pop_back works in constant time. // TODO, rip this out and only delete the descriptors we know have died, // rather than traversing the whole list. // In kqueue, closing a descriptor automatically removes its event filters. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) { ModifiedDescriptors.erase (ed); delete ed; } else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (int i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } // TODO, replace this with rb_thread_blocking_region for 1.9 builds. timeval tv = {0,0}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #else throw std::runtime_error ("kqueue is not implemented on this platform"); #endif }
bool EventMachine_t::_RunEpollOnce() { #ifdef HAVE_EPOLL assert (epfd != -1); struct epoll_event ev [MaxEpollDescriptors]; int s = epoll_wait (epfd, ev, MaxEpollDescriptors, 50); if (s > 0) { for (int i=0; i < s; i++) { EventableDescriptor *ed = (EventableDescriptor*) ev[i].data.ptr; if (ev[i].events & (EPOLLERR | EPOLLHUP)) ed->ScheduleClose (false); if (ev[i].events & EPOLLIN) ed->Read(); if (ev[i].events & EPOLLOUT) { ed->Write(); epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent()); // Ignoring return value } } } else if (s < 0) { // epoll_wait can fail on error in a handful of ways. // If this happens, then wait for a little while to avoid busy-looping. // If the error was EINTR, we probably caught SIGCHLD or something, // so keep the wait short. timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000}; EmSelect (0, NULL, NULL, NULL, &tv); } { // cleanup dying sockets // vector::pop_back works in constant time. // TODO, rip this out and only delete the descriptors we know have died, // rather than traversing the whole list. // Modified 05Jan08 per suggestions by Chris Heath. It's possible that // an EventableDescriptor will have a descriptor value of -1. That will // happen if EventableDescriptor::Close was called on it. In that case, // don't call epoll_ctl to remove the socket's filters from the epoll set. // According to the epoll docs, this happens automatically when the // descriptor is closed anyway. This is different from the case where // the socket has already been closed but the descriptor in the ED object // hasn't yet been set to INVALID_SOCKET. int i, j; int nSockets = Descriptors.size(); for (i=0, j=0; i < nSockets; i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); if (ed->ShouldDelete()) { if (ed->GetSocket() != INVALID_SOCKET) { assert (bEpoll); // wouldn't be in this method otherwise. assert (epfd != -1); int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent()); // ENOENT or EBADF are not errors because the socket may be already closed when we get here. if (e && (errno != ENOENT) && (errno != EBADF)) { char buf [200]; snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno)); throw std::runtime_error (buf); } } ModifiedDescriptors.erase (ed); delete ed; } else Descriptors [j++] = ed; } while ((size_t)j < Descriptors.size()) Descriptors.pop_back(); } // TODO, heartbeats. // Added 14Sep07, its absence was noted by Brian Candler. But the comment was here, indicated // that this got thought about and not done when EPOLL was originally written. Was there a reason // not to do it, or was it an oversight? Certainly, running a heartbeat on 50,000 connections every // two seconds can get to be a real bear, especially if all we're doing is timing out dead ones. // Maybe there's a better way to do this. (Or maybe it's not that expensive after all.) // { // dispatch heartbeats if (gCurrentLoopTime >= NextHeartbeatTime) { NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval; for (int i=0; i < Descriptors.size(); i++) { EventableDescriptor *ed = Descriptors[i]; assert (ed); ed->Heartbeat(); } } } timeval tv = {0,0}; EmSelect (0, NULL, NULL, NULL, &tv); return true; #else throw std::runtime_error ("epoll is not implemented on this platform"); #endif }