static eventer_t eventer_kqueue_impl_remove(eventer_t e) { eventer_t removed = NULL; if(e->mask & EVENTER_ASYNCH) { mtevFatal(mtev_error, "error in eventer_kqueue_impl_remove: got unexpected EVENTER_ASYNCH mask\n"); } if(e->mask & (EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION)) { ev_lock_state_t lockstate; lockstate = acquire_master_fd(e->fd); mtevL(eventer_deb, "kqueue: remove(%d)\n", e->fd); if(e == master_fds[e->fd].e) { removed = e; master_fds[e->fd].e = NULL; if(e->mask & (EVENTER_READ | EVENTER_EXCEPTION)) ke_change(e->fd, EVFILT_READ, EV_DELETE | EV_DISABLE, e); if(e->mask & (EVENTER_WRITE)) ke_change(e->fd, EVFILT_WRITE, EV_DELETE | EV_DISABLE, e); } else mtevL(eventer_deb, "kqueue: remove(%d) failed.\n", e->fd); release_master_fd(e->fd, lockstate); } else if(e->mask & EVENTER_TIMER) { removed = eventer_remove_timed(e); } else if(e->mask & EVENTER_RECURRENT) { removed = eventer_remove_recurrent(e); } else { mtevFatal(mtev_error, "error in eventer_kqueue_impl_remove: got unknown mask (0x%04x)\n", e->mask); } return removed; }
static eventer_t eventer_ports_impl_remove(eventer_t e) { eventer_t removed = NULL; if(e->mask & EVENTER_ASYNCH) { mtevFatal(mtev_error, "error in eventer_ports_impl_remove: got unexpected EVENTER_ASYNCH mask\n"); } if(e->mask & (EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION)) { ev_lock_state_t lockstate; lockstate = acquire_master_fd(e->fd); if(e == master_fds[e->fd].e) { removed = e; master_fds[e->fd].e = NULL; alter_fd(e, 0); } release_master_fd(e->fd, lockstate); } else if(e->mask & EVENTER_TIMER) { removed = eventer_remove_timed(e); } else if(e->mask & EVENTER_RECURRENT) { removed = eventer_remove_recurrent(e); } else { mtevFatal(mtev_error, "error in eventer_ports_impl_remove: got unknown mask (0x%04x)\n", e->mask); } return removed; }
static void eventer_epoll_impl_update(eventer_t e, int mask) { struct epoll_event _ev; int ctl_op = EPOLL_CTL_MOD; if(e->mask & EVENTER_TIMER) { eventer_update_timed(e,mask); return; } memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; if(e->mask == 0) ctl_op = EPOLL_CTL_ADD; e->mask = mask; if(e->mask & (EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION)) { struct epoll_spec *spec; spec = eventer_get_spec_for_event(e); if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); mtevL(eventer_deb, "epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, ctl_op == EPOLL_CTL_ADD ? "add" : "mod", e->fd); int epoll_rv = epoll_ctl(spec->epoll_fd, ctl_op, e->fd, &_ev); if(epoll_rv != 0 && ((ctl_op == EPOLL_CTL_ADD && errno == EEXIST) || (ctl_op == EPOLL_CTL_MOD && errno == ENOENT))) { /* try the other way */ ctl_op = (ctl_op == EPOLL_CTL_ADD) ? EPOLL_CTL_MOD : EPOLL_CTL_ADD; epoll_rv = epoll_ctl(spec->epoll_fd, ctl_op, e->fd, &_ev); if (epoll_rv != 0) { mtevFatal(mtev_error, "epoll_ctl(%d, %s, %d) -> %s\n", spec->epoll_fd, ctl_op == EPOLL_CTL_ADD ? "add" : "mod", e->fd, strerror(errno)); } } } }
static void *eventer_epoll_spec_alloc() { struct epoll_spec *spec; spec = calloc(1, sizeof(*spec)); spec->epoll_fd = epoll_create(1024); if(spec->epoll_fd < 0) { mtevFatal(mtev_error, "error in eveter_epoll_spec_alloc... spec->epoll_fd < 0 (%d)\n", spec->epoll_fd); } spec->event_fd = -1; #if defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC) spec->event_fd = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC); #elif defined(HAVE_SYS_EVENTFD_H) spec->event_fd = eventfd(0, 0); if(spec->event_fd >= 0) { int flags; if(((flags = fcntl(spec->event_fd, F_GETFL, 0)) == -1) || (fcntl(spec->event_fd, F_SETFL, flags|O_NONBLOCK) == -1)) { close(spec->event_fd); spec->event_fd = -1; } } if(spec->event_fd >= 0) { int flags; if(((flags = fcntl(spec->event_fd, F_GETFD, 0)) == -1) || (fcntl(spec->event_fd, F_SETFD, flags|FD_CLOEXEC) == -1)) { close(spec->event_fd); spec->event_fd = -1; } } #endif return spec; }
static int generate_dh_params(eventer_t e, int mask, void *cl, struct timeval *now) { int bits = (int)(intptr_t)cl; if(mask != EVENTER_ASYNCH_WORK) return 0; switch(bits) { case 1024: if(!dh1024_tmp) dh1024_tmp = load_dh_params(dh1024_file); if(!dh1024_tmp) { mtevL(mtev_notice, "Generating 1024 bit DH parameters.\n"); dh1024_tmp = DH_generate_parameters(1024, 2, NULL, NULL); mtevL(mtev_notice, "Finished generating 1024 bit DH parameters.\n"); save_dh_params(dh1024_tmp, dh1024_file); } break; case 2048: if(!dh2048_tmp) dh2048_tmp = load_dh_params(dh2048_file); if(!dh2048_tmp) { mtevL(mtev_notice, "Generating 2048 bit DH parameters.\n"); dh2048_tmp = DH_generate_parameters(2048, 2, NULL, NULL); mtevL(mtev_notice, "Finished generating 2048 bit DH parameters.\n"); save_dh_params(dh2048_tmp, dh2048_file); } break; default: mtevFatal(mtev_error, "Unexpected DH parameter request: %d\n", bits); } return 0; }
void noit_websocket_handler_init() { mtev_register_logops("noit_websocket_livestream", &noit_websocket_logio_ops); int rval = mtev_http_rest_websocket_register("/livestream/", "^(.*)$", NOIT_WEBSOCKET_DATA_FEED_PROTOCOL, noit_websocket_msg_handler); if (rval == -1) { mtevFatal(mtev_error, "Unabled to register websocket handler for /livestream/"); } }
static void *eventer_ports_spec_alloc(void) { struct ports_spec *spec; spec = calloc(1, sizeof(*spec)); spec->port_fd = port_create(); if(spec->port_fd < 0) { mtevFatal(mtev_error, "error in eveter_ports_spec_alloc... spec->port_fd < 0 (%d)\n", spec->port_fd); } return spec; }
static void alter_fd_dissociate(eventer_t e, int mask, struct ports_spec *spec) { int s_errno = 0, ret; errno = 0; ret = port_dissociate(spec->port_fd, PORT_SOURCE_FD, e->fd); s_errno = errno; if (ret == -1) { if(s_errno == ENOENT) return; /* Fine */ if(s_errno == EBADFD) return; /* Fine */ mtevFatal(mtev_error, "eventer port_dissociate failed(%d-%d): %d/%s\n", e->fd, spec->port_fd, s_errno, strerror(s_errno)); } }
static void *eventer_kqueue_spec_alloc() { struct kqueue_spec *spec; spec = calloc(1, sizeof(*spec)); spec->kqueue_fd = kqueue(); if(spec->kqueue_fd == -1) { mtevFatal(mtev_error, "error in eveter_kqueue_spec_alloc... spec->epoll_fd < 0 (%d)\n", spec->kqueue_fd); } kqs_init(spec); pthread_mutex_init(&spec->lock, NULL); return spec; }
static eventer_t eventer_epoll_impl_remove(eventer_t e) { struct epoll_spec *spec; eventer_t removed = NULL; if(e->mask & EVENTER_ASYNCH) { mtevFatal(mtev_error, "error in eventer_epoll_impl_remove: got unexpected EVENTER_ASYNCH mask\n"); } if(e->mask & (EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION)) { ev_lock_state_t lockstate; struct epoll_event _ev; spec = eventer_get_spec_for_event(e); memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; lockstate = acquire_master_fd(e->fd); if(e == master_fds[e->fd].e) { removed = e; master_fds[e->fd].e = NULL; mtevL(eventer_deb, "epoll_ctl(%d, del, %d)\n", spec->epoll_fd, e->fd); if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, e->fd, &_ev) != 0) { mtevL(mtev_error, "epoll_ctl(%d, EPOLL_CTL_DEL, %d) -> %s\n", spec->epoll_fd, e->fd, strerror(errno)); if(errno != ENOENT) { mtevFatal(mtev_error, "errno != ENOENT: %d (%s)\n", errno, strerror(errno)); } } } release_master_fd(e->fd, lockstate); } else if(e->mask & EVENTER_TIMER) { removed = eventer_remove_timed(e); } else if(e->mask & EVENTER_RECURRENT) { removed = eventer_remove_recurrent(e); } else { mtevFatal(mtev_error, "error in eventer_epoll_impl_remove: got unknown mask (0x%04x)\n", e->mask); } return removed; }
static int64_t metric_value_int64(noit_metric_value_t *v) { if(!v->is_null) { switch(v->type) { case METRIC_INT32: return (int64_t)v->value.v_int32; case METRIC_UINT32: return (int64_t)v->value.v_uint32; case METRIC_INT64: return v->value.v_int64; default: ; } } mtevFatal(mtev_error, "failed metric_value_int64(%x, null: %d)\n", v->type, v->is_null); }
static void alter_fd_associate(eventer_t e, int mask, struct ports_spec *spec) { int events = 0, s_errno = 0, ret; if(mask & EVENTER_READ) events |= POLLIN; if(mask & EVENTER_WRITE) events |= POLLOUT; if(mask & EVENTER_EXCEPTION) events |= POLLERR; errno = 0; ret = port_associate(spec->port_fd, PORT_SOURCE_FD, e->fd, events, (void *)(intptr_t)e->fd); s_errno = errno; if (ret == -1) { mtevFatal(mtev_error, "eventer port_associate failed(%d-%d): %d/%s\n", e->fd, spec->port_fd, s_errno, strerror(s_errno)); } }
static double metric_value_double(noit_metric_value_t *v) { if(!v->is_null) { switch(v->type) { case METRIC_INT32: return (double)v->value.v_int32; case METRIC_UINT32: return (double)v->value.v_uint32; case METRIC_INT64: return (double)v->value.v_int64; case METRIC_UINT64: return (double)v->value.v_uint64; case METRIC_DOUBLE: return v->value.v_double; default: ; } } mtevFatal(mtev_error, "failed metric_value_double(%x, null: %d)\n", v->type, v->is_null); }
static void eventer_epoll_impl_add(eventer_t e) { int rv; struct epoll_spec *spec; struct epoll_event _ev; ev_lock_state_t lockstate; mtevAssert(e->mask); if(e->mask & EVENTER_ASYNCH) { eventer_add_asynch(NULL, e); return; } /* Recurrent delegation */ if(e->mask & EVENTER_RECURRENT) { eventer_add_recurrent(e); return; } /* Timed events are simple */ if(e->mask & EVENTER_TIMER) { eventer_add_timed(e); return; } spec = eventer_get_spec_for_event(e); /* file descriptor event */ mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0); memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); lockstate = acquire_master_fd(e->fd); master_fds[e->fd].e = e; mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, e->fd); rv = epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, e->fd, &_ev); if(rv != 0) { mtevFatal(mtev_error, "epoll_ctl(%d,add,%d,%x) -> %d (%d: %s)\n", spec->epoll_fd, e->fd, e->mask, rv, errno, strerror(errno)); } release_master_fd(e->fd, lockstate); }
static int metric_value_is_negative(noit_metric_value_t *v) { if(!v->is_null) { switch(v->type) { case METRIC_UINT32: case METRIC_UINT64: return 0; case METRIC_INT32: return (v->value.v_int32 < 0); case METRIC_INT64: return (v->value.v_int64 < 0); case METRIC_DOUBLE: return (v->value.v_double < 0); default: ; } } mtevFatal(mtev_error, "failed metric_value_is_negative(%x, null: %d)\n", v->type, v->is_null); }
const char *strnstrn(const char *needle, int needle_len, const char *haystack, int haystack_len) { int i=0, j=0, compiled[KMPPATSIZE]; if(needle_len > KMPPATSIZE) { mtevFatal(mtev_error, "errorin strnstrn: needle_len (%d) < KMPPATSIZE (%d)\n", needle_len, KMPPATSIZE); } kmp_precompute(needle, needle_len, compiled); while (j < haystack_len) { while (i > -1 && needle[i] != haystack[j]) i = compiled[i]; i++; j++; if (i >= needle_len) { return haystack + j - i; } } return NULL; }
static void eventer_epoll_impl_update(eventer_t e, int mask) { struct epoll_event _ev; if(e->mask & EVENTER_TIMER) { eventer_update_timed(e,mask); return; } memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; e->mask = mask; if(e->mask & (EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION)) { struct epoll_spec *spec; spec = eventer_get_spec_for_event(e); if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_MOD, e->fd, &_ev) != 0) { mtevFatal(mtev_error, "epoll_ctl(%d, EPOLL_CTL_MOD, %d) -> %s\n", spec->epoll_fd, e->fd, strerror(errno)); } } }
static eventer_t eventer_epoll_impl_remove_fd(int fd) { eventer_t eiq = NULL; ev_lock_state_t lockstate; if(master_fds[fd].e) { struct epoll_spec *spec; struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; lockstate = acquire_master_fd(fd); eiq = master_fds[fd].e; spec = eventer_get_spec_for_event(eiq); master_fds[fd].e = NULL; if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) { mtevL(mtev_error, "epoll_ctl(%d, EPOLL_CTL_DEL, %d) -> %s\n", spec->epoll_fd, fd, strerror(errno)); if(errno != ENOENT) { mtevFatal(mtev_error, "errno != ENOENT: %d (%s)\n", errno, strerror(errno)); } } release_master_fd(fd, lockstate); } return eiq; }
static void eventer_epoll_impl_trigger(eventer_t e, int mask) { struct epoll_spec *spec; struct timeval __now; int fd, newmask, needs_add = 0; const char *cbname; ev_lock_state_t lockstate; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; uint64_t start, duration; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { lockstate = acquire_master_fd(fd); if (lockstate == EV_ALREADY_OWNED) { /* The incoming triggered event is already owned by this thread. * This means our floated event completed before the current * event handler even exited. So it retriggered recursively * from inside the event handler. * * Treat this special case the same as a cross thread trigger * and just queue this event to be picked up on the next loop */ eventer_cross_thread_trigger(e, mask); return; } /* * If we are readding the event to the master list here, also do the needful * with the epoll_ctl. * * This can happen in cases where some event was floated and the float * completed so fast that we finished the job in the same thread * that it started in. Since we `eventer_remove_fd` before we float * the re-add here should replace the fd in the epoll_ctl. */ master_fds[fd].e = e; e->mask = 0; struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; spec = eventer_get_spec_for_event(e); if(mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd); if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) != 0) { mtevL(mtev_error, "epoll_ctl(%d, add, %d, %d)\n", spec->epoll_fd, fd, errno); } release_master_fd(fd, lockstate); } if(e != master_fds[fd].e) { mtevL(mtev_error, "Incoming event: %p, does not match master list: %p\n", e, master_fds[fd].e); return; } lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) { mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e); return; } mtevAssert(lockstate == EV_OWNED); mtev_gettimeofday(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); spec = eventer_get_spec_for_event(e); mtevLT(eventer_deb, &__now, "epoll(%d): fire on %d/%x to %s(%p)\n", spec->epoll_fd, fd, mask, cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = e->callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(master_fds[fd].e == NULL) { mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n", cbname?cbname:"???", e->callback, fd); } else { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); spec = eventer_get_spec_for_event(e); if(e->mask != 0 && !needs_add) { mtevL(eventer_deb, "epoll_ctl(%d, del, %d)\n", spec->epoll_fd, fd); if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) { mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n", spec->epoll_fd, fd, errno, strerror(errno)); } } e->thr_owner = tgt; spec = eventer_get_spec_for_event(e); mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd); mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0); mtevL(eventer_deb, "epoll(%d) moved event[%p] from t@%d to t@%d\n", spec->epoll_fd, e, (int)pthread_self(), (int)tgt); } else { int epoll_rv; int epoll_cmd = (e->mask == 0 || needs_add) ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; spec = eventer_get_spec_for_event(e); mtevL(eventer_deb, "epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd); epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev); if(epoll_rv != 0 && ((epoll_cmd == EPOLL_CTL_ADD && errno == EEXIST) || (epoll_cmd == EPOLL_CTL_MOD && errno == ENOENT))) { /* try the other way */ epoll_cmd = (epoll_cmd == EPOLL_CTL_ADD) ? EPOLL_CTL_MOD : EPOLL_CTL_ADD; mtevL(eventer_deb, "retry epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd); epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev); } if(epoll_rv != 0) { const char *cb_name = eventer_name_for_callback_e(e->callback, e); mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, %s, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n", epoll_cmd == EPOLL_CTL_ADD ? "EPOLL_CTL_ADD" : "EPOLL_CTL_MOD", spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???"); } } } /* Set our mask */ e->mask = newmask; } else { /* see kqueue implementation for details on the next line */ if(master_fds[fd].e == e) { /* if newmask == 0 the user has floated the connection. If we get here * and they have not called `eventer_remove_fd` it is a misuse of mtev. * * Check if they are compliant with floats here and remove_fd if they * forgot to and warn in the log */ spec = eventer_get_spec_for_event(e); struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, e->fd, &_ev) == 0) { mtevL(mtev_error, "WARNING: You forgot to 'eventer_remove_fd()' before returning a mask of zero.\n"); } master_fds[fd].e = NULL; } eventer_free(e); } release_master_fd(fd, lockstate); }
static int eventer_kqueue_impl_loop() { struct timeval __dyna_sleep = { 0, 0 }; KQUEUE_DECL; KQUEUE_SETUP(NULL); if(eventer_kqueue_impl_register_wakeup(kqs) == -1) { mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n"); } while(1) { struct timeval __now, __sleeptime; struct timespec __kqueue_sleeptime; int fd_cnt = 0; if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0) __dyna_sleep = eventer_max_sleeptime; __sleeptime = __dyna_sleep; eventer_dispatch_timed(&__now, &__sleeptime); if(compare_timeval(__sleeptime, __dyna_sleep) > 0) __sleeptime = __dyna_sleep; /* Handle cross_thread dispatches */ eventer_cross_thread_process(); /* Handle recurrent events */ eventer_dispatch_recurrent(&__now); /* Now we move on to our fd-based events */ __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec; __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000; fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used, ke_vec, ke_vec_a, &__kqueue_sleeptime); kqs->wakeup_notify = 0; if(fd_cnt > 0 || ke_vec_used) mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt); ke_vec_used = 0; if(fd_cnt < 0) { mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno)); } else if(fd_cnt == 0 || (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) { /* timeout */ if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs); add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep); } else { int idx; __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */ /* loop once to clear */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; if(ke->filter == EVFILT_USER) { eventer_kqueue_impl_register_wakeup(kqs); continue; } masks[ke->ident] = 0; } /* Loop again to aggregate */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; ke = &ke_vec[idx]; if(ke->flags & EV_ERROR) continue; if(ke->filter == EVFILT_USER) continue; if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ; if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE; } /* Loop a last time to process */ for(idx = 0; idx < fd_cnt; idx++) { struct kevent *ke; eventer_t e; int fd; ke = &ke_vec[idx]; if(ke->filter == EVFILT_USER) continue; if(ke->flags & EV_ERROR) { if(ke->data != EBADF && ke->data != ENOENT) mtevLT(eventer_err, &__now, "error [%d]: %s\n", (int)ke->ident, strerror(ke->data)); continue; } mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident); fd = ke->ident; e = master_fds[fd].e; /* If we've seen this fd, don't callback twice */ if(!masks[fd]) continue; /* It's possible that someone removed the event and freed it * before we got here. */ if(e) eventer_kqueue_impl_trigger(e, masks[fd]); masks[fd] = 0; /* indicates we've processed this fd */ } } } /* NOTREACHED */ return 0; }
void noit_metric_rollup_accumulate_numeric(noit_numeric_rollup_accu* accu, noit_metric_value_t* value) { noit_metric_value_t last_value = accu->last_value; accu->last_value = *value; nnt_multitype *w1 = &accu->accumulated; int *w1_drun, *w1_crun/*, *w5_drun, *w5_crun*/; /* get the actual datum to update */ w1_drun = &accu->drun; w1_crun = &accu->crun; if (accu->first_value_time_ms >= value->whence_ms) { /* It's older! */ return; } else if (last_value.type != METRIC_ABSENT) { /* here we have last_value and value */ /* Handle the numeric case */ int drun = 0; double dy, derivative = private_nan; nnt_multitype current; if (value->type == METRIC_ABSENT && value->is_null) return; /* set derivative and drun */ calculate_change(&last_value, value, &dy, &drun); if (drun > 0) { derivative = (1000.0 * dy) / (double) drun; } /* setup a faux nnt_multitype so we can accum */ memset(¤t, 0, sizeof(current)); memcpy(¤t.value, &value->value, sizeof(current.value)); current.count = 1; current.type = value->type; current.stddev_present = 1; current.derivative = derivative; if (derivative >= 0) current.counter = derivative; else current.counter = private_nan; /* NaN */ nnt_multitype_accum_counts(w1, w1->count, *w1_drun, *w1_crun, ¤t, 1, drun, derivative >= 0 ? drun : 0); *w1_drun += drun; if (derivative >= 0) { *w1_crun += drun; } /* We've added one data point */ w1->count++; } else { /* Handle the case where this is the first value */ w1->type = value->type; w1->count = 0; accu->first_value_time_ms = value->whence_ms; if(value->is_null) return; switch (value->type) { case METRIC_ABSENT: return; case METRIC_STRING: mtevFatal(mtev_error, "METRIC_STRING in numeric path\n"); break; // break without effect but used to get rid of gcc warning message default: // This will copy all 64 bits and hence works for every type w1->count = 1; w1->value.v_uint64 = value->value.v_uint64; break; } } }
static void eventer_epoll_impl_trigger(eventer_t e, int mask) { struct epoll_spec *spec; struct timeval __now; int fd, newmask; const char *cbname; ev_lock_state_t lockstate; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; int added_to_master_fds = 0; u_int64_t start, duration; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { master_fds[fd].e = e; e->mask = 0; added_to_master_fds = 1; } if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; mtevAssert(lockstate == EV_OWNED); mtev_gettimeofday(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); mtevLT(eventer_deb, &__now, "epoll: fire on %d/%x to %s(%p)\n", fd, mask, cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = e->callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(master_fds[fd].e == NULL) { mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n", cbname?cbname:"???", e->callback, fd); } else { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); spec = eventer_get_spec_for_event(e); if(! added_to_master_fds && epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) { mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n", spec->epoll_fd, fd, errno, strerror(errno)); } e->thr_owner = tgt; spec = eventer_get_spec_for_event(e); mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0); mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, (int)pthread_self(), (int)tgt); } else { int epoll_cmd = added_to_master_fds ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; spec = eventer_get_spec_for_event(e); if(epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev) != 0) { const char *cb_name = eventer_name_for_callback_e(e->callback, e); mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_MOD, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n", spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???"); } } } /* Set our mask */ e->mask = newmask; } else { /* see kqueue implementation for details on the next line */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }