static void send_request(asio_event_t* ev, int req) { asio_backend_t* b = ponyint_asio_get_backend(); asio_msg_t* msg = (asio_msg_t*)pony_alloc_msg( POOL_INDEX(sizeof(asio_msg_t)), 0); msg->event = ev; msg->flags = req; ponyint_messageq_push(&b->q, (pony_msg_t*)msg); eventfd_write(b->wakeup, 1); }
void pony_asio_event_subscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); if(ev->noisy) ponyint_asio_noisy_add(); struct epoll_event ep; ep.data.ptr = ev; ep.events = EPOLLRDHUP | EPOLLET; if(ev->flags & ASIO_READ) ep.events |= EPOLLIN; if(ev->flags & ASIO_WRITE) ep.events |= EPOLLOUT; if(ev->flags & ASIO_TIMER) { ev->fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); timer_set_nsec(ev->fd, ev->nsec); ep.events |= EPOLLIN; } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = NULL; if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, ev, memory_order_release, memory_order_relaxed)) { signal(sig, signal_handler); ev->fd = eventfd(0, EFD_NONBLOCK); ep.events |= EPOLLIN; } else { return; } } epoll_ctl(b->epfd, EPOLL_CTL_ADD, ev->fd, &ep); }
static void signal_handler(int sig) { if(sig >= MAX_SIGNAL) return; // Reset the signal handler. signal(sig, signal_handler); asio_backend_t* b = ponyint_asio_get_backend(); asio_event_t* ev = atomic_load_explicit(&b->sighandlers[sig], memory_order_acquire); if(ev == NULL) return; eventfd_write(ev->fd, 1); }
static void send_request(asio_event_t* ev, int req) { asio_backend_t* b = ponyint_asio_get_backend(); pony_assert(b != NULL); asio_msg_t* msg = (asio_msg_t*)pony_alloc_msg( POOL_INDEX(sizeof(asio_msg_t)), 0); msg->event = ev; msg->flags = req; ponyint_thread_messageq_push(&b->q, (pony_msg_t*)msg, (pony_msg_t*)msg #ifdef USE_DYNAMIC_TRACE , SPECIAL_THREADID_EPOLL, SPECIAL_THREADID_EPOLL #endif ); eventfd_write(b->wakeup, 1); }
void pony_asio_event_unsubscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) return; asio_backend_t* b = ponyint_asio_get_backend(); if(ev->noisy) { ponyint_asio_noisy_remove(); ev->noisy = false; } epoll_ctl(b->epfd, EPOLL_CTL_DEL, ev->fd, NULL); if(ev->flags & ASIO_TIMER) { if(ev->fd != -1) { close(ev->fd); ev->fd = -1; } } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = ev; if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, NULL, memory_order_release, memory_order_relaxed)) { signal(sig, SIG_DFL); close(ev->fd); ev->fd = -1; } } ev->flags = ASIO_DISPOSABLE; send_request(ev, ASIO_DISPOSABLE); }
static void signal_handler(int sig) { if(sig >= MAX_SIGNAL) return; asio_backend_t* b = ponyint_asio_get_backend(); pony_assert(b != NULL); asio_event_t* ev = atomic_load_explicit(&b->sighandlers[sig], memory_order_acquire); #ifdef USE_VALGRIND ANNOTATE_HAPPENS_AFTER(&b->sighandlers[sig]); #endif if(ev == NULL) return; eventfd_write(ev->fd, 1); }
// Single function for resubscribing to both reads and writes for an event PONY_API void pony_asio_event_resubscribe(asio_event_t* ev) { // needs to be a valid event that is one shot enabled if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED) || !(ev->flags & ASIO_ONESHOT)) { pony_assert(0); return; } asio_backend_t* b = ponyint_asio_get_backend(); pony_assert(b != NULL); struct epoll_event ep; ep.data.ptr = ev; ep.events = EPOLLONESHOT; bool something_to_resub = false; // if the event is supposed to be listening for write notifications // and it is currently not writeable if((ev->flags & ASIO_WRITE) && !ev->writeable) { something_to_resub = true; ep.events |= EPOLLOUT; } // if the event is supposed to be listening for read notifications // and it is currently not readable if((ev->flags & ASIO_READ) && !ev->readable) { something_to_resub = true; ep.events |= EPOLLRDHUP; ep.events |= EPOLLIN; } // only resubscribe if there is something to resubscribe to if (something_to_resub) epoll_ctl(b->epfd, EPOLL_CTL_MOD, ev->fd, &ep); }
PONY_API void pony_asio_event_unsubscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) { pony_assert(0); return; } asio_backend_t* b = ponyint_asio_get_backend(); pony_assert(b != NULL); if(ev->noisy) { uint64_t old_count = ponyint_asio_noisy_remove(); // tell scheduler threads that asio has no noisy actors // if the old_count was 1 if (old_count == 1) { ponyint_sched_unnoisy_asio(SPECIAL_THREADID_EPOLL); // maybe wake up a scheduler thread if they've all fallen asleep ponyint_sched_maybe_wakeup_if_all_asleep(-1); } ev->noisy = false; } epoll_ctl(b->epfd, EPOLL_CTL_DEL, ev->fd, NULL); if(ev->flags & ASIO_TIMER) { if(ev->fd != -1) { close(ev->fd); ev->fd = -1; } } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = ev; #ifdef USE_VALGRIND ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]); #endif if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, NULL, memory_order_release, memory_order_relaxed)) { struct sigaction new_action; #if !defined(USE_SCHEDULER_SCALING_PTHREADS) // Make sure we ignore signals related to scheduler sleeping/waking // as the default for those signals is termination if(sig == PONY_SCHED_SLEEP_WAKE_SIGNAL) new_action.sa_handler = empty_signal_handler; else #endif new_action.sa_handler = SIG_DFL; sigemptyset (&new_action.sa_mask); // ask to restart interrupted syscalls to match `signal` behavior new_action.sa_flags = SA_RESTART; sigaction(sig, &new_action, NULL); close(ev->fd); ev->fd = -1; } } ev->flags = ASIO_DISPOSABLE; send_request(ev, ASIO_DISPOSABLE); }
PONY_API void pony_asio_event_subscribe(asio_event_t* ev) { if((ev == NULL) || (ev->flags == ASIO_DISPOSABLE) || (ev->flags == ASIO_DESTROYED)) { pony_assert(0); return; } asio_backend_t* b = ponyint_asio_get_backend(); pony_assert(b != NULL); if(ev->noisy) { uint64_t old_count = ponyint_asio_noisy_add(); // tell scheduler threads that asio has at least one noisy actor // if the old_count was 0 if (old_count == 0) ponyint_sched_noisy_asio(SPECIAL_THREADID_EPOLL); } struct epoll_event ep; ep.data.ptr = ev; ep.events = EPOLLRDHUP; if(ev->flags & ASIO_READ) ep.events |= EPOLLIN; if(ev->flags & ASIO_WRITE) ep.events |= EPOLLOUT; if(ev->flags & ASIO_TIMER) { ev->fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); timer_set_nsec(ev->fd, ev->nsec); ep.events |= EPOLLIN; } if(ev->flags & ASIO_SIGNAL) { int sig = (int)ev->nsec; asio_event_t* prev = NULL; #ifdef USE_VALGRIND ANNOTATE_HAPPENS_BEFORE(&b->sighandlers[sig]); #endif if((sig < MAX_SIGNAL) && atomic_compare_exchange_strong_explicit(&b->sighandlers[sig], &prev, ev, memory_order_release, memory_order_relaxed)) { struct sigaction new_action; new_action.sa_handler = signal_handler; sigemptyset (&new_action.sa_mask); // ask to restart interrupted syscalls to match `signal` behavior new_action.sa_flags = SA_RESTART; sigaction(sig, &new_action, NULL); ev->fd = eventfd(0, EFD_NONBLOCK); ep.events |= EPOLLIN; } else { return; } } if(ev->flags & ASIO_ONESHOT) { ep.events |= EPOLLONESHOT; } else { // Only use edge triggering if one shot isn't enabled. // This is because of how the runtime gets notifications // from epoll in this ASIO thread and then notifies the // appropriate actor to read/write as necessary. // specifically, it seems there's an edge case/race condition // with edge triggering where if there is already data waiting // on the socket, then epoll might not be triggering immediately // when an edge triggered epoll request is made. ep.events |= EPOLLET; } epoll_ctl(b->epfd, EPOLL_CTL_ADD, ev->fd, &ep); }