static void dns_ctx_handle_free(void *vh) { dns_ctx_handle_t *h = vh; free(h->ns); eventer_remove_fd(h->e->fd); eventer_free(h->e); h->e = NULL; if(h->timeout) { eventer_remove(h->timeout); eventer_free(h->timeout); h->timeout = NULL; } dns_close(h->ctx); dns_free(h->ctx); assert(h->timeout == NULL); free(h); }
static void dns_module_eventer_dns_utm_fn(struct dns_ctx *ctx, int timeout, void *data) { dns_ctx_handle_t *h = data; eventer_t e = NULL, newe = NULL; if(ctx == NULL) { if(h && h->timeout) e = eventer_remove(h->timeout); } else { assert(h->ctx == ctx); if(h->timeout) e = eventer_remove(h->timeout); if(timeout > 0) { newe = eventer_alloc(); newe->mask = EVENTER_TIMER; newe->callback = dns_module_invoke_timeouts; newe->closure = h; gettimeofday(&newe->whence, NULL); newe->whence.tv_sec += timeout; } } if(e) { eventer_free(e); if(h) dns_module_dns_ctx_release(h); } if(newe) { dns_module_dns_ctx_acquire(h); eventer_add(newe); } if(h) { h->timeout = newe; } }
void noit_poller_free_check(noit_check_t *checker) { noit_module_t *mod; if(checker->flags & NP_RUNNING) { recycle_check(checker); return; } mod = noit_module_lookup(checker->module); if(mod->cleanup) mod->cleanup(mod, checker); if(checker->fire_event) { eventer_remove(checker->fire_event); free(checker->fire_event->closure); eventer_free(checker->fire_event); checker->fire_event = NULL; } if(checker->closure) free(checker->closure); if(checker->target) free(checker->target); if(checker->module) free(checker->module); if(checker->name) free(checker->name); if(checker->config) { noit_hash_destroy(checker->config, free, free); free(checker->config); checker->config = NULL; } free(checker); }
static void eventer_ports_impl_trigger(eventer_t e, int mask) { ev_lock_state_t lockstate; const char *cbname; struct timeval __now; int fd, oldmask, newmask; fd = e->fd; if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; assert(lockstate == EV_OWNED); gettimeofday(&__now, NULL); oldmask = e->mask; cbname = eventer_name_for_callback(e->callback); noitLT(eventer_deb, &__now, "ports: fire on %d/%x to %s(%p)\n", fd, mask, cbname?cbname:"???", e->callback); EVENTER_CALLBACK_ENTRY((void *)e->callback, (char *)cbname, fd, e->mask, mask); newmask = e->callback(e, mask, e->closure, &__now); EVENTER_CALLBACK_RETURN((void *)e->callback, (char *)cbname, newmask); if(newmask) { alter_fd(e, newmask); /* Set our mask */ e->mask = newmask; noitLT(eventer_deb, &__now, "ports: complete on %d/(%x->%x) to %s(%p)\n", fd, mask, newmask, cbname?cbname:"???", e->callback); } else { noitLT(eventer_deb, &__now, "ports: complete on %d/none to %s(%p)\n", fd, cbname?cbname:"???", e->callback); /* * Long story long: * When integrating with a few external event systems, we find * it difficult to make their use of remove+add as an update * as it can be recurrent in a single handler call and you cannot * remove completely from the event system if you are going to * just update (otherwise the eventer_t in your call stack could * be stale). What we do is perform a superficial remove, marking * the mask as 0, but not eventer_remove_fd. Then on an add, if * we already have an event, we just update the mask (as we * have not yet returned to the eventer's loop. * This leaves us in a tricky situation when a remove is called * and the add doesn't roll in, we return 0 (mask == 0) and hit * this spot. We have intended to remove the event, but it still * resides at master_fds[fd].e -- even after we free it. * So, in the evnet that we return 0 and the event that * master_fds[fd].e == the event we're about to free... we NULL * it out. */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
static void external_cleanup(noit_module_t *self, noit_check_t *check) { struct check_info *ci = (struct check_info *)check->closure; if(ci) { if(ci->timeout_event) { eventer_remove(ci->timeout_event); free(ci->timeout_event->closure); eventer_free(ci->timeout_event); ci->timeout_event = NULL; } } }
static void dns_cache_utm_fn(struct dns_ctx *ctx, int timeout, void *data) { eventer_t e = NULL, newe = NULL; if(ctx == NULL) e = eventer_remove(dns_cache_timeout); else { if(timeout < 0) e = eventer_remove(dns_cache_timeout); else { newe = eventer_in_s_us(dns_invoke_timeouts, dns_ctx, timeout, 0); } } if(e) eventer_free(e); if(newe) eventer_add(newe); dns_cache_timeout = newe; }
static void dns_cache_utm_fn(struct dns_ctx *ctx, int timeout, void *data) { eventer_t e = NULL, newe = NULL; if(ctx == NULL) e = eventer_remove(dns_cache_timeout); else { if(timeout < 0) e = eventer_remove(dns_cache_timeout); else { newe = eventer_alloc(); newe->mask = EVENTER_TIMER; newe->callback = dns_invoke_timeouts; newe->closure = dns_ctx; gettimeofday(&newe->whence, NULL); newe->whence.tv_sec += timeout; } } if(e) eventer_free(e); if(newe) eventer_add(newe); dns_cache_timeout = newe; }
static void eventer_epoll_impl_trigger(eventer_t e, int mask) { struct timeval __now; int fd, newmask; const char *cbname; ev_lock_state_t lockstate; fd = e->fd; if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; assert(lockstate == EV_OWNED); gettimeofday(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); noitLT(eventer_deb, &__now, "epoll: fire on %d/%x to %s(%p)\n", fd, mask, cbname?cbname:"???", e->callback); EVENTER_CALLBACK_ENTRY((void *)e->callback, (char *)cbname, fd, e->mask, mask); newmask = e->callback(e, mask, e->closure, &__now); EVENTER_CALLBACK_RETURN((void *)e->callback, (char *)cbname, newmask); if(newmask) { struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(master_fds[fd].e == NULL) { noitL(noit_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n", cbname?cbname:"???", e->callback, fd); } else { assert(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &_ev) == 0); } /* Set our mask */ e->mask = newmask; } else { /* see kqueue implementation for details on the next line */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
static int ssh2_connect_timeout(eventer_t e, int mask, void *closure, struct timeval *now) { eventer_t fde; ssh2_check_info_t *ci = closure; noit_check_t *check = ci->check; ci->timeout_event = NULL; /* This is us, return 0 will free this */ ci->error = strdup("ssh connect timeout"); if(ci->synch_fd_event) { fde = ci->synch_fd_event; eventer_remove_fd(fde->fd); fde->opset->close(fde->fd, &mask, fde); eventer_free(fde); ci->synch_fd_event = NULL; } ssh2_log_results(ci->self, ci->check); ssh2_cleanup(ci->self, ci->check); check->flags &= ~NP_RUNNING; return 0; }
static void eventer_dns_utm_fn(struct dns_ctx *ctx, int timeout, void *data) { dns_ctx_handle_t *h = data; eventer_t e = NULL, newe = NULL; if(ctx == NULL) e = eventer_remove(h->timeout); else { assert(h->ctx == ctx); if(timeout < 0) e = eventer_remove(h->timeout); else { newe = eventer_alloc(); newe->mask = EVENTER_TIMER; newe->callback = dns_invoke_timeouts; newe->closure = h; gettimeofday(&newe->whence, NULL); newe->whence.tv_sec += timeout; } } if(e) eventer_free(e); if(newe) eventer_add(newe); h->timeout = newe; }
static void noit_event_dispose(void *ev) { int mask; eventer_t *value = ev; eventer_t removed, e = *value; noitL(nldeb, "lua check cleanup: dropping (%p)->fd (%d)\n", e, e->fd); removed = eventer_remove(e); noitL(nldeb, " remove from eventer system %s\n", removed ? "succeeded" : "failed"); if(e->mask & (EVENTER_READ|EVENTER_WRITE|EVENTER_EXCEPTION)) { noitL(nldeb, " closing down fd %d\n", e->fd); e->opset->close(e->fd, &mask, e); } if(e->closure) { struct nl_generic_cl *cl; cl = e->closure; if(cl->free) cl->free(cl); } eventer_free(e); free(ev); }
static void ssh2_cleanup(noit_module_t *self, noit_check_t *check) { ssh2_check_info_t *ci = check->closure; if(ci) { if(ci->timeout_event) { eventer_remove(ci->timeout_event); eventer_free(ci->timeout_event); } if(ci->session) { libssh2_session_disconnect(ci->session, "Bye!"); libssh2_session_free(ci->session); } if(ci->methods.kex) free(ci->methods.kex); if(ci->methods.hostkey) free(ci->methods.hostkey); if(ci->methods.crypt_cs) free(ci->methods.crypt_cs); if(ci->methods.crypt_sc) free(ci->methods.crypt_sc); if(ci->methods.mac_cs) free(ci->methods.mac_cs); if(ci->methods.mac_sc) free(ci->methods.mac_sc); if(ci->methods.comp_cs) free(ci->methods.comp_cs); if(ci->methods.comp_sc) free(ci->methods.comp_sc); if(ci->error) free(ci->error); memset(ci, 0, sizeof(*ci)); } }
int noit_livestream_handler(eventer_t e, int mask, void *closure, struct timeval *now) { eventer_t newe; pthread_t tid; int newmask = EVENTER_READ | EVENTER_EXCEPTION; acceptor_closure_t *ac = closure; noit_livestream_closure_t *jcl = ac->service_ctx; if(mask & EVENTER_EXCEPTION || (jcl && jcl->wants_shutdown)) { socket_error: /* Exceptions cause us to simply snip the connection */ eventer_remove_fd(e->fd); e->opset->close(e->fd, &newmask, e); if(jcl) noit_livestream_closure_free(jcl); if(ac) acceptor_closure_free(ac); return 0; } if(!ac->service_ctx || !jcl->feed) { int len; jcl = ac->service_ctx = noit_livestream_closure_alloc(); /* Setup logger to this channel */ if(!jcl->period) { u_int32_t nperiod; len = e->opset->read(e->fd, &nperiod, sizeof(nperiod), &mask, e); if(len == -1 && errno == EAGAIN) return mask | EVENTER_EXCEPTION; if(len != sizeof(nperiod)) goto socket_error; jcl->period = ntohl(nperiod); if(!jcl->period) { noitL(noit_error, "period of 0 specified in livestream. not allowed.\n"); goto socket_error; } } while(jcl->uuid_read < 36) { len = e->opset->read(e->fd, jcl->uuid_str + jcl->uuid_read, 36 - jcl->uuid_read, &mask, e); if(len == -1 && errno == EAGAIN) return mask | EVENTER_EXCEPTION; if(len == 0) goto socket_error; jcl->uuid_read += len; } jcl->uuid_str[36] = '\0'; if(uuid_parse(jcl->uuid_str, jcl->uuid)) { noitL(noit_error, "bad uuid received in livestream handler '%s'\n", jcl->uuid_str); goto socket_error; } jcl->feed = malloc(32); snprintf(jcl->feed, 32, "livestream/%d", noit_atomic_inc32(&ls_counter)); noit_log_stream_new(jcl->feed, "noit_livestream", jcl->feed, jcl, NULL); jcl->check = noit_check_watch(jcl->uuid, jcl->period); if(!jcl->check) { e->opset->close(e->fd, &newmask, e); return 0; } /* This check must be watched from the livestream */ noit_check_transient_add_feed(jcl->check, jcl->feed); /* Note the check */ noit_check_log_check(jcl->check); /* kick it off, if it isn't running already */ if(!NOIT_CHECK_LIVE(jcl->check)) noit_check_activate(jcl->check); } eventer_remove_fd(e->fd); newe = eventer_alloc(); memcpy(newe, e, sizeof(*e)); if(pthread_create(&tid, NULL, noit_livestream_thread_main, newe) == 0) { return 0; } noit_check_transient_remove_feed(jcl->check, jcl->feed); noit_livestream_closure_free(jcl); /* Undo our dup */ eventer_free(newe); /* Creating the thread failed, close it down and deschedule. */ e->opset->close(e->fd, &newmask, e); return 0; }
static int external_invoke(noit_module_t *self, noit_check_t *check, noit_check_t *cause) { struct timeval when, p_int; external_closure_t *ecl; struct check_info *ci = (struct check_info *)check->closure; eventer_t newe; external_data_t *data; noit_hash_table check_attrs_hash = NOIT_HASH_EMPTY; int i, klen; noit_hash_iter iter = NOIT_HASH_ITER_ZERO; const char *name, *value; char interp_fmt[4096], interp_buff[4096]; data = noit_module_get_userdata(self); check->flags |= NP_RUNNING; noitL(data->nldeb, "external_invoke(%p,%s)\n", self, check->target); /* remove a timeout if we still have one -- we should unless someone * has set a lower timeout than the period. */ if(ci->timeout_event) { eventer_remove(ci->timeout_event); free(ci->timeout_event->closure); eventer_free(ci->timeout_event); ci->timeout_event = NULL; } check_info_clean(ci); gettimeofday(&when, NULL); memcpy(&check->last_fire_time, &when, sizeof(when)); /* Setup all our check bits */ ci->check_no = noit_atomic_inc64(&data->check_no_seq); ci->check = check; /* We might want to extract metrics */ if(noit_hash_retr_str(check->config, "output_extract", strlen("output_extract"), &value) != 0) { const char *error; int erroffset; ci->matcher = pcre_compile(value, 0, &error, &erroffset, NULL); if(!ci->matcher) { noitL(data->nlerr, "external pcre /%s/ failed @ %d: %s\n", value, erroffset, error); } } noit_check_make_attrs(check, &check_attrs_hash); /* Count the args */ i = 1; while(1) { char argname[10]; snprintf(argname, sizeof(argname), "arg%d", i); if(noit_hash_retr_str(check->config, argname, strlen(argname), &value) == 0) break; i++; } ci->argcnt = i + 1; /* path, arg0, (i-1 more args) */ ci->arglens = calloc(ci->argcnt, sizeof(*ci->arglens)); ci->args = calloc(ci->argcnt, sizeof(*ci->args)); /* Make the command */ if(noit_hash_retr_str(check->config, "command", strlen("command"), &value) == 0) { value = "/bin/true"; } ci->args[0] = strdup(value); ci->arglens[0] = strlen(ci->args[0]) + 1; i = 0; while(1) { char argname[10]; snprintf(argname, sizeof(argname), "arg%d", i); if(noit_hash_retr_str(check->config, argname, strlen(argname), &value) == 0) { if(i == 0) { /* if we don't have arg0, make it last element of path */ char *cp = ci->args[0] + strlen(ci->args[0]); while(cp > ci->args[0] && *(cp-1) != '/') cp--; value = cp; } else break; /* if we don't have argn, we're done */ } noit_check_interpolate(interp_buff, sizeof(interp_buff), value, &check_attrs_hash, check->config); ci->args[i+1] = strdup(interp_buff); ci->arglens[i+1] = strlen(ci->args[i+1]) + 1; i++; } /* Make the environment */ memset(&iter, 0, sizeof(iter)); ci->envcnt = 0; while(noit_hash_next_str(check->config, &iter, &name, &klen, &value)) if(!strncasecmp(name, "env_", 4)) ci->envcnt++; memset(&iter, 0, sizeof(iter)); ci->envlens = calloc(ci->envcnt, sizeof(*ci->envlens)); ci->envs = calloc(ci->envcnt, sizeof(*ci->envs)); ci->envcnt = 0; while(noit_hash_next_str(check->config, &iter, &name, &klen, &value)) if(!strncasecmp(name, "env_", 4)) { snprintf(interp_fmt, sizeof(interp_fmt), "%s=%s", name+4, value); noit_check_interpolate(interp_buff, sizeof(interp_buff), interp_fmt, &check_attrs_hash, check->config); ci->envs[ci->envcnt] = strdup(interp_buff); ci->envlens[ci->envcnt] = strlen(ci->envs[ci->envcnt]) + 1; ci->envcnt++; } noit_hash_destroy(&check_attrs_hash, NULL, NULL); noit_hash_store(&data->external_checks, (const char *)&ci->check_no, sizeof(ci->check_no), ci); /* Setup a timeout */ newe = eventer_alloc(); newe->mask = EVENTER_TIMER; gettimeofday(&when, NULL); p_int.tv_sec = check->timeout / 1000; p_int.tv_usec = (check->timeout % 1000) * 1000; add_timeval(when, p_int, &newe->whence); ecl = calloc(1, sizeof(*ecl)); ecl->self = self; ecl->check = check; newe->closure = ecl; newe->callback = external_timeout; eventer_add(newe); ci->timeout_event = newe; /* Setup push */ newe = eventer_alloc(); newe->mask = EVENTER_ASYNCH; add_timeval(when, p_int, &newe->whence); ecl = calloc(1, sizeof(*ecl)); ecl->self = self; ecl->check = check; newe->closure = ecl; newe->callback = external_enqueue; eventer_add(newe); return 0; }
static void eventer_epoll_impl_trigger(eventer_t e, int mask) { struct epoll_spec *spec; struct timeval __now; int fd, newmask, needs_add = 0; const char *cbname; ev_lock_state_t lockstate; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; uint64_t start, duration; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { lockstate = acquire_master_fd(fd); if (lockstate == EV_ALREADY_OWNED) { /* The incoming triggered event is already owned by this thread. * This means our floated event completed before the current * event handler even exited. So it retriggered recursively * from inside the event handler. * * Treat this special case the same as a cross thread trigger * and just queue this event to be picked up on the next loop */ eventer_cross_thread_trigger(e, mask); return; } /* * If we are readding the event to the master list here, also do the needful * with the epoll_ctl. * * This can happen in cases where some event was floated and the float * completed so fast that we finished the job in the same thread * that it started in. Since we `eventer_remove_fd` before we float * the re-add here should replace the fd in the epoll_ctl. */ master_fds[fd].e = e; e->mask = 0; struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; spec = eventer_get_spec_for_event(e); if(mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd); if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) != 0) { mtevL(mtev_error, "epoll_ctl(%d, add, %d, %d)\n", spec->epoll_fd, fd, errno); } release_master_fd(fd, lockstate); } if(e != master_fds[fd].e) { mtevL(mtev_error, "Incoming event: %p, does not match master list: %p\n", e, master_fds[fd].e); return; } lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) { mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e); return; } mtevAssert(lockstate == EV_OWNED); mtev_gettimeofday(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); spec = eventer_get_spec_for_event(e); mtevLT(eventer_deb, &__now, "epoll(%d): fire on %d/%x to %s(%p)\n", spec->epoll_fd, fd, mask, cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = e->callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(master_fds[fd].e == NULL) { mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n", cbname?cbname:"???", e->callback, fd); } else { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); spec = eventer_get_spec_for_event(e); if(e->mask != 0 && !needs_add) { mtevL(eventer_deb, "epoll_ctl(%d, del, %d)\n", spec->epoll_fd, fd); if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) { mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n", spec->epoll_fd, fd, errno, strerror(errno)); } } e->thr_owner = tgt; spec = eventer_get_spec_for_event(e); mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd); mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0); mtevL(eventer_deb, "epoll(%d) moved event[%p] from t@%d to t@%d\n", spec->epoll_fd, e, (int)pthread_self(), (int)tgt); } else { int epoll_rv; int epoll_cmd = (e->mask == 0 || needs_add) ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; spec = eventer_get_spec_for_event(e); mtevL(eventer_deb, "epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd); epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev); if(epoll_rv != 0 && ((epoll_cmd == EPOLL_CTL_ADD && errno == EEXIST) || (epoll_cmd == EPOLL_CTL_MOD && errno == ENOENT))) { /* try the other way */ epoll_cmd = (epoll_cmd == EPOLL_CTL_ADD) ? EPOLL_CTL_MOD : EPOLL_CTL_ADD; mtevL(eventer_deb, "retry epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd); epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev); } if(epoll_rv != 0) { const char *cb_name = eventer_name_for_callback_e(e->callback, e); mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, %s, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n", epoll_cmd == EPOLL_CTL_ADD ? "EPOLL_CTL_ADD" : "EPOLL_CTL_MOD", spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???"); } } } /* Set our mask */ e->mask = newmask; } else { /* see kqueue implementation for details on the next line */ if(master_fds[fd].e == e) { /* if newmask == 0 the user has floated the connection. If we get here * and they have not called `eventer_remove_fd` it is a misuse of mtev. * * Check if they are compliant with floats here and remove_fd if they * forgot to and warn in the log */ spec = eventer_get_spec_for_event(e); struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, e->fd, &_ev) == 0) { mtevL(mtev_error, "WARNING: You forgot to 'eventer_remove_fd()' before returning a mask of zero.\n"); } master_fds[fd].e = NULL; } eventer_free(e); } release_master_fd(fd, lockstate); }
static int external_handler(eventer_t e, int mask, void *closure, struct timeval *now) { noit_module_t *self = (noit_module_t *)closure; external_data_t *data; data = noit_module_get_userdata(self); while(1) { int inlen, expectlen; noit_check_t *check; struct check_info *ci; void *vci; if(!data->cr) { struct external_response r; struct msghdr msg; struct iovec v[3]; memset(&r, 0, sizeof(r)); v[0].iov_base = (char *)&r.check_no; v[0].iov_len = sizeof(r.check_no); v[1].iov_base = (char *)&r.exit_code; v[1].iov_len = sizeof(r.exit_code); v[2].iov_base = (char *)&r.stdoutlen; v[2].iov_len = sizeof(r.stdoutlen); expectlen = v[0].iov_len + v[1].iov_len + v[2].iov_len; /* Make this into a recv'ble message so we can PEEK */ memset(&msg, 0, sizeof(msg)); msg.msg_iov = v; msg.msg_iovlen = 3; inlen = recvmsg(e->fd, &msg, MSG_PEEK); if(inlen == 0) goto widowed; if((inlen == -1 && errno == EAGAIN) || (inlen > 0 && inlen < expectlen)) return EVENTER_READ | EVENTER_EXCEPTION; if(inlen == -1) noitL(noit_error, "recvmsg() failed: %s\n", strerror(errno)); assert(inlen == expectlen); while(-1 == (inlen = recvmsg(e->fd, &msg, 0)) && errno == EINTR); assert(inlen == expectlen); data->cr = calloc(sizeof(*data->cr), 1); memcpy(data->cr, &r, sizeof(r)); data->cr->stdoutbuff = malloc(data->cr->stdoutlen); } if(data->cr) { while(data->cr->stdoutlen_sofar < data->cr->stdoutlen) { while((inlen = read(e->fd, data->cr->stdoutbuff + data->cr->stdoutlen_sofar, data->cr->stdoutlen - data->cr->stdoutlen_sofar)) == -1 && errno == EINTR); if(inlen == -1 && errno == EAGAIN) return EVENTER_READ | EVENTER_EXCEPTION; if(inlen == 0) goto widowed; data->cr->stdoutlen_sofar += inlen; } assert(data->cr->stdoutbuff[data->cr->stdoutlen-1] == '\0'); if(!data->cr->stderrbuff) { while((inlen = read(e->fd, &data->cr->stderrlen, sizeof(data->cr->stderrlen))) == -1 && errno == EINTR); if(inlen == -1 && errno == EAGAIN) return EVENTER_READ | EVENTER_EXCEPTION; if(inlen == 0) goto widowed; assert(inlen == sizeof(data->cr->stderrlen)); data->cr->stderrbuff = malloc(data->cr->stderrlen); } while(data->cr->stderrlen_sofar < data->cr->stderrlen) { while((inlen = read(e->fd, data->cr->stderrbuff + data->cr->stderrlen_sofar, data->cr->stderrlen - data->cr->stderrlen_sofar)) == -1 && errno == EINTR); if(inlen == -1 && errno == EAGAIN) return EVENTER_READ | EVENTER_EXCEPTION; if(inlen == 0) goto widowed; data->cr->stderrlen_sofar += inlen; } assert(data->cr->stderrbuff[data->cr->stderrlen-1] == '\0'); } assert(data->cr && data->cr->stdoutbuff && data->cr->stderrbuff); gettimeofday(now, NULL); /* set it, as we care about accuracy */ /* Lookup data in check_no hash */ if(noit_hash_retrieve(&data->external_checks, (const char *)&data->cr->check_no, sizeof(data->cr->check_no), &vci) == 0) vci = NULL; ci = (struct check_info *)vci; /* We've seen it, it ain't coming again... * remove it, we'll free it ourselves */ noit_hash_delete(&data->external_checks, (const char *)&data->cr->check_no, sizeof(data->cr->check_no), NULL, NULL); /* If there is no timeout_event, the check must have completed. * We have nothing to do. */ if(!ci || !ci->timeout_event) { free(data->cr->stdoutbuff); free(data->cr->stderrbuff); free(data->cr); data->cr = NULL; continue; } ci->exit_code = data->cr->exit_code; ci->output = data->cr->stdoutbuff; ci->error = data->cr->stderrbuff; free(data->cr); data->cr = NULL; check = ci->check; external_log_results(self, check); eventer_remove(ci->timeout_event); free(ci->timeout_event->closure); eventer_free(ci->timeout_event); ci->timeout_event = NULL; check->flags &= ~NP_RUNNING; } widowed: noitL(noit_error, "external module terminated, must restart.\n"); exit(1); }
static int ssh2_initiate(noit_module_t *self, noit_check_t *check, noit_check_t *cause) { ssh2_check_info_t *ci = check->closure; struct timeval p_int, __now; int fd = -1, rv = -1; eventer_t e; union { struct sockaddr_in sin; struct sockaddr_in6 sin6; } sockaddr; socklen_t sockaddr_len; unsigned short ssh_port = DEFAULT_SSH_PORT; const char *port_str = NULL; /* We cannot be running */ BAIL_ON_RUNNING_CHECK(check); check->flags |= NP_RUNNING; ci->self = self; ci->check = check; ci->timed_out = 1; if(ci->timeout_event) { eventer_remove(ci->timeout_event); free(ci->timeout_event->closure); eventer_free(ci->timeout_event); ci->timeout_event = NULL; } gettimeofday(&__now, NULL); memcpy(&check->last_fire_time, &__now, sizeof(__now)); if(check->target_ip[0] == '\0') { ci->error = strdup("name resolution failure"); goto fail; } /* Open a socket */ fd = socket(check->target_family, NE_SOCK_CLOEXEC|SOCK_STREAM, 0); if(fd < 0) goto fail; /* Make it non-blocking */ if(eventer_set_fd_nonblocking(fd)) goto fail; if(noit_hash_retr_str(check->config, "port", strlen("port"), &port_str)) { ssh_port = (unsigned short)atoi(port_str); } #define config_method(a) do { \ const char *v; \ if(noit_hash_retr_str(check->config, "method_" #a, strlen("method_" #a), \ &v)) \ ci->methods.a = strdup(v); \ } while(0) config_method(kex); config_method(hostkey); config_method(crypt_cs); config_method(crypt_sc); config_method(mac_cs); config_method(mac_sc); config_method(comp_cs); config_method(comp_sc); memset(&sockaddr, 0, sizeof(sockaddr)); sockaddr.sin6.sin6_family = check->target_family; if(check->target_family == AF_INET) { memcpy(&sockaddr.sin.sin_addr, &check->target_addr.addr, sizeof(sockaddr.sin.sin_addr)); sockaddr.sin.sin_port = htons(ssh_port); sockaddr_len = sizeof(sockaddr.sin); } else { memcpy(&sockaddr.sin6.sin6_addr, &check->target_addr.addr6, sizeof(sockaddr.sin6.sin6_addr)); sockaddr.sin6.sin6_port = htons(ssh_port); sockaddr_len = sizeof(sockaddr.sin6); } /* Initiate a connection */ rv = connect(fd, (struct sockaddr *)&sockaddr, sockaddr_len); if(rv == -1 && errno != EINPROGRESS) goto fail; /* Register a handler for connection completion */ e = eventer_alloc(); e->fd = fd; e->mask = EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION; e->callback = ssh2_connect_complete; e->closure = ci; ci->synch_fd_event = e; eventer_add(e); e = eventer_alloc(); e->mask = EVENTER_TIMER; e->callback = ssh2_connect_timeout; e->closure = ci; memcpy(&e->whence, &__now, sizeof(__now)); p_int.tv_sec = check->timeout / 1000; p_int.tv_usec = (check->timeout % 1000) * 1000; add_timeval(e->whence, p_int, &e->whence); ci->timeout_event = e; eventer_add(e); return 0; fail: if(fd >= 0) close(fd); ssh2_log_results(ci->self, ci->check); ssh2_cleanup(ci->self, ci->check); check->flags &= ~NP_RUNNING; return -1; }
static void eventer_ports_impl_trigger(eventer_t e, int mask) { ev_lock_state_t lockstate; const char *cbname; struct timeval __now; int fd, newmask; uint64_t start, duration; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { lockstate = acquire_master_fd(fd); if (lockstate == EV_ALREADY_OWNED) { /* The incoming triggered event is already owned by this thread. This means our floated event completed before the current event handler even exited. So it retriggered recursively from inside the event handler. Treat this special case the same as a cross thread trigger and just queue this event to be picked up on the next loop */ eventer_cross_thread_trigger(e, mask); return; } release_master_fd(fd, lockstate); master_fds[fd].e = e; e->mask = 0; } if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) { mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e); return; } mtevAssert(lockstate == EV_OWNED); eventer_mark_callback_time(); eventer_gettimeofcallback(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); mtevL(eventer_deb, "ports: fire on %d/%x to %s(%p)\n", fd, mask, cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = eventer_run_callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); alter_fd(e, 0); e->thr_owner = tgt; alter_fd(e, newmask); mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, pthread_self(), tgt); } else { alter_fd(e, newmask); /* Set our mask */ e->mask = newmask; mtevL(eventer_deb, "ports: complete on %d/(%x->%x) to %s(%p)\n", fd, mask, newmask, cbname?cbname:"???", e->callback); } } else { mtevL(eventer_deb, "ports: complete on %d/none to %s(%p)\n", fd, cbname?cbname:"???", e->callback); /* * Long story long: * When integrating with a few external event systems, we find * it difficult to make their use of remove+add as an update * as it can be recurrent in a single handler call and you cannot * remove completely from the event system if you are going to * just update (otherwise the eventer_t in your call stack could * be stale). What we do is perform a superficial remove, marking * the mask as 0, but not eventer_remove_fd. Then on an add, if * we already have an event, we just update the mask (as we * have not yet returned to the eventer's loop. * This leaves us in a tricky situation when a remove is called * and the add doesn't roll in, we return 0 (mask == 0) and hit * this spot. We have intended to remove the event, but it still * resides at master_fds[fd].e -- even after we free it. * So, in the evnet that we return 0 and the event that * master_fds[fd].e == the event we're about to free... we NULL * it out. */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
static void eventer_epoll_impl_trigger(eventer_t e, int mask) { struct epoll_spec *spec; struct timeval __now; int fd, newmask; const char *cbname; ev_lock_state_t lockstate; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; int added_to_master_fds = 0; u_int64_t start, duration; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { master_fds[fd].e = e; e->mask = 0; added_to_master_fds = 1; } if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; mtevAssert(lockstate == EV_OWNED); mtev_gettimeofday(&__now, NULL); cbname = eventer_name_for_callback_e(e->callback, e); mtevLT(eventer_deb, &__now, "epoll: fire on %d/%x to %s(%p)\n", fd, mask, cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = e->callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { struct epoll_event _ev; memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = fd; if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); if(master_fds[fd].e == NULL) { mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n", cbname?cbname:"???", e->callback, fd); } else { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); spec = eventer_get_spec_for_event(e); if(! added_to_master_fds && epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) { mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n", spec->epoll_fd, fd, errno, strerror(errno)); } e->thr_owner = tgt; spec = eventer_get_spec_for_event(e); mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0); mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, (int)pthread_self(), (int)tgt); } else { int epoll_cmd = added_to_master_fds ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; spec = eventer_get_spec_for_event(e); if(epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev) != 0) { const char *cb_name = eventer_name_for_callback_e(e->callback, e); mtevFatal(mtev_error, "epoll_ctl(spec->epoll_fd, EPOLL_CTL_MOD, fd, &_ev) failed; " "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n", spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???"); } } } /* Set our mask */ e->mask = newmask; } else { /* see kqueue implementation for details on the next line */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
static int ping_icmp_handler(eventer_t e, int mask, void *closure, struct timeval *now, u_int8_t family) { noit_module_t *self = (noit_module_t *)closure; ping_icmp_data_t *ping_data; struct check_info *data; char packet[1500]; int packet_len = sizeof(packet); union { struct sockaddr_in in4; struct sockaddr_in6 in6; } from; unsigned int from_len; struct ping_payload *payload; if(family != AF_INET && family != AF_INET6) return EVENTER_READ; ping_data = noit_module_get_userdata(self); while(1) { struct ping_session_key k; int inlen; u_int8_t iphlen = 0; void *vcheck; noit_check_t *check; struct timeval tt, whence; from_len = sizeof(from); inlen = recvfrom(e->fd, packet, packet_len, 0, (struct sockaddr *)&from, &from_len); mtev_gettimeofday(now, NULL); /* set it, as we care about accuracy */ if(inlen < 0) { if(errno == EAGAIN || errno == EINTR) break; mtevLT(nldeb, now, "ping_icmp recvfrom: %s\n", strerror(errno)); break; } if(family == AF_INET) { struct icmp *icp4; iphlen = ((struct ip *)packet)->ip_hl << 2; if((inlen-iphlen) != sizeof(struct icmp)+PING_PAYLOAD_LEN) { mtevLT(nldeb, now, "ping_icmp bad size: %d+%d\n", iphlen, inlen-iphlen); continue; } icp4 = (struct icmp *)(packet + iphlen); payload = (struct ping_payload *)(icp4 + 1); if(icp4->icmp_type != ICMP_ECHOREPLY) { mtevLT(nldeb, now, "ping_icmp bad type: %d\n", icp4->icmp_type); continue; } if(icp4->icmp_id != (((vpsized_uint)self) & 0xffff)) { mtevLT(nldeb, now, "ping_icmp not sent from this instance (%d:%d) vs. %lu\n", icp4->icmp_id, ntohs(icp4->icmp_seq), (unsigned long)(((vpsized_uint)self) & 0xffff)); continue; } } else if(family == AF_INET6) { struct icmp6_hdr *icp6 = (struct icmp6_hdr *)packet; if((inlen) != sizeof(struct icmp6_hdr)+PING_PAYLOAD_LEN) { mtevLT(nldeb, now, "ping_icmp bad size: %d+%d\n", iphlen, inlen-iphlen); continue; } payload = (struct ping_payload *)(icp6+1); if(icp6->icmp6_type != ICMP6_ECHO_REPLY) { mtevLT(nldeb, now, "ping_icmp bad type: %d\n", icp6->icmp6_type); continue; } if(icp6->icmp6_id != (((vpsized_uint)self) & 0xffff)) { mtevLT(nldeb, now, "ping_icmp not sent from this instance (%d:%d) vs. %lu\n", icp6->icmp6_id, ntohs(icp6->icmp6_seq), (unsigned long)(((vpsized_uint)self) & 0xffff)); continue; } } else { /* This should be unreachable */ continue; } check = NULL; k.addr_of_check = payload->addr_of_check; uuid_copy(k.checkid, payload->checkid); if(mtev_hash_retrieve(ping_data->in_flight, (const char *)&k, sizeof(k), &vcheck)) check = vcheck; /* make sure this check is from this generation! */ if(!check) { char uuid_str[37]; uuid_unparse_lower(payload->checkid, uuid_str); mtevLT(nldeb, now, "ping_icmp response for unknown check '%s'\n", uuid_str); continue; } if((check->generation & 0xffff) != payload->generation) { mtevLT(nldeb, now, "ping_icmp response in generation gap\n"); continue; } data = (struct check_info *)check->closure; /* If there is no timeout_event, the check must have completed. * We have nothing to do. */ if(!data->timeout_event) continue; /* Sanity check the payload */ if(payload->check_no != data->check_no) continue; if(payload->check_pack_cnt != data->expected_count) continue; if(payload->check_pack_no >= data->expected_count) continue; whence.tv_sec = payload->tv_sec; whence.tv_usec = payload->tv_usec; sub_timeval(*now, whence, &tt); data->turnaround[payload->check_pack_no] = (float)tt.tv_sec + (float)tt.tv_usec / 1000000.0; if(ping_icmp_is_complete(self, check)) { ping_icmp_log_results(self, check); eventer_remove(data->timeout_event); free(data->timeout_event->closure); eventer_free(data->timeout_event); data->timeout_event = NULL; check->flags &= ~NP_RUNNING; k.addr_of_check = (vpsized_uint)check ^ random_num; uuid_copy(k.checkid, check->checkid); mtev_hash_delete(ping_data->in_flight, (const char *)&k, sizeof(k), free, NULL); } } return EVENTER_READ; }
static void dns_cb(struct dns_ctx *ctx, void *result, void *data) { int r = dns_status(ctx); int len, i; struct dns_check_info *ci = data; struct dns_parse p; struct dns_rr rr; unsigned nrr; unsigned char dn[DNS_MAXDN]; const unsigned char *pkt, *cur, *end; char *result_str[MAX_RR] = { NULL }; char *result_combined = NULL; /* If out ci isn't active, we must have timed out already */ if(!__isactive_ci(ci)) { if(result) free(result); return; } ci->timed_out = 0; /* If we don't have a result, explode */ if (!result) { ci->error = strdup(dns_strerror(r)); goto cleanup; } /* Process the packet */ pkt = result; end = pkt + r; cur = dns_payload(pkt); dns_getdn(pkt, &cur, end, dn, sizeof(dn)); dns_initparse(&p, NULL, pkt, cur, end); p.dnsp_qcls = 0; p.dnsp_qtyp = 0; nrr = 0; while((r = dns_nextrr(&p, &rr)) > 0) { if (!dns_dnequal(dn, rr.dnsrr_dn)) continue; if ((ci->query_ctype == DNS_C_ANY || ci->query_ctype == rr.dnsrr_cls) && (ci->query_rtype == DNS_T_ANY || ci->query_rtype == rr.dnsrr_typ)) ++nrr; else if (rr.dnsrr_typ == DNS_T_CNAME && !nrr) { if (dns_getdn(pkt, &rr.dnsrr_dptr, end, p.dnsp_dnbuf, sizeof(p.dnsp_dnbuf)) <= 0 || rr.dnsrr_dptr != rr.dnsrr_dend) { ci->error = strdup("protocol error"); break; } else { int32_t on = 1; /* This actually updates what we're looking for */ dns_dntodn(p.dnsp_dnbuf, ci->dn, sizeof(dn)); noit_stats_set_metric(ci->check, &ci->current, "cname", METRIC_INT32, &on); /* Now follow the leader */ noitL(nldeb, "%s. CNAME %s.\n", dns_dntosp(dn), dns_dntosp(p.dnsp_dnbuf)); dns_dntodn(p.dnsp_dnbuf, dn, sizeof(dn)); noitL(nldeb, " ---> '%s'\n", dns_dntosp(dn)); } } } if (!r && !nrr) { ci->error = strdup("no data"); } dns_rewind(&p, NULL); p.dnsp_qtyp = ci->query_rtype == DNS_T_ANY ? 0 : ci->query_rtype; p.dnsp_qcls = ci->query_ctype == DNS_C_ANY ? 0 : ci->query_ctype; while(dns_nextrr(&p, &rr) && ci->nrr < MAX_RR) decode_rr(ci, &p, &rr, &result_str[ci->nrr]); if(ci->sort) qsort(result_str, ci->nrr, sizeof(*result_str), cstring_cmp); /* calculate the length and allocate on the stack */ len = 0; for(i=0; i<ci->nrr; i++) len += strlen(result_str[i]) + 2; result_combined = alloca(len); result_combined[0] = '\0'; /* string it together */ len = 0; for(i=0; i<ci->nrr; i++) { int slen; if(i) { memcpy(result_combined + len, ", ", 2); len += 2; } slen = strlen(result_str[i]); memcpy(result_combined + len, result_str[i], slen); len += slen; result_combined[len] = '\0'; free(result_str[i]); /* free as we go */ } noit_stats_set_metric(ci->check, &ci->current, "answer", METRIC_STRING, result_combined); cleanup: if(result) free(result); if(ci->timeout_event) { eventer_t e = eventer_remove(ci->timeout_event); ci->timeout_event = NULL; if(e) eventer_free(e); } ci->check->flags &= ~NP_RUNNING; dns_check_log_results(ci); __deactivate_ci(ci); }
static void eventer_kqueue_impl_trigger(eventer_t e, int mask) { ev_lock_state_t lockstate; struct timeval __now; int oldmask, newmask; const char *cbname; int fd; fd = e->fd; if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; assert(lockstate == EV_OWNED); gettimeofday(&__now, NULL); /* We're going to lie to ourselves. You'd think this should be: * oldmask = e->mask; However, we just fired with masks[fd], so * kqueue is clearly looking for all of the events in masks[fd]. * So, we combine them "just to be safe." */ oldmask = e->mask | masks[fd]; cbname = eventer_name_for_callback(e->callback); noitLT(eventer_deb, &__now, "kqueue: fire on %d/%x to %s(%p)\n", fd, masks[fd], cbname?cbname:"???", e->callback); newmask = e->callback(e, mask, e->closure, &__now); if(newmask) { /* toggle the read bits if needed */ if(newmask & (EVENTER_READ | EVENTER_EXCEPTION)) { if(!(oldmask & (EVENTER_READ | EVENTER_EXCEPTION))) ke_change(fd, EVFILT_READ, EV_ADD | EV_ENABLE, e); } else if(oldmask & (EVENTER_READ | EVENTER_EXCEPTION)) ke_change(fd, EVFILT_READ, EV_DELETE | EV_DISABLE, e); /* toggle the write bits if needed */ if(newmask & EVENTER_WRITE) { if(!(oldmask & EVENTER_WRITE)) ke_change(fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, e); } else if(oldmask & EVENTER_WRITE) ke_change(fd, EVFILT_WRITE, EV_DELETE | EV_DISABLE, e); /* Set our mask */ e->mask = newmask; } else { /* * Long story long: * When integrating with a few external event systems, we find * it difficult to make their use of remove+add as an update * as it can be recurrent in a single handler call and you cannot * remove completely from the event system if you are going to * just update (otherwise the eventer_t in your call stack could * be stale). What we do is perform a superficial remove, marking * the mask as 0, but not eventer_remove_fd. Then on an add, if * we already have an event, we just update the mask (as we * have not yet returned to the eventer's loop. * This leaves us in a tricky situation when a remove is called * and the add doesn't roll in, we return 0 (mask == 0) and hit * this spot. We have intended to remove the event, but it still * resides at master_fds[fd].e -- even after we free it. * So, in the evnet that we return 0 and the event that * master_fds[fd].e == the event we're about to free... we NULL * it out. */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
int noit_jlog_handler(eventer_t e, int mask, void *closure, struct timeval *now) { eventer_t newe; pthread_t tid; pthread_attr_t tattr; int newmask = EVENTER_READ | EVENTER_EXCEPTION; acceptor_closure_t *ac = closure; noit_jlog_closure_t *jcl = ac->service_ctx; char errbuff[256]; const char *errstr = "unknown error"; if(mask & EVENTER_EXCEPTION || (jcl && jcl->wants_shutdown)) { int len, nlen; socket_error: /* Exceptions cause us to simply snip the connection */ len = strlen(errstr); nlen = htonl(0 - len); e->opset->write(e->fd, &nlen, sizeof(nlen), &newmask, e); e->opset->write(e->fd, errstr, strlen(errstr), &newmask, e); eventer_remove_fd(e->fd); e->opset->close(e->fd, &newmask, e); if(jcl) noit_jlog_closure_free(jcl); acceptor_closure_free(ac); return 0; } if(!ac->service_ctx) { noit_log_stream_t ls; const char *logname, *type; int first_attempt = 1; char path[PATH_MAX], subscriber[256], *sub; jcl = ac->service_ctx = noit_jlog_closure_alloc(); if(!noit_hash_retr_str(ac->config, "log_transit_feed_name", strlen("log_transit_feed_name"), &logname)) { errstr = "No 'log_transit_feed_name' specified in log_transit."; noitL(noit_error, "%s\n", errstr); goto socket_error; } ls = noit_log_stream_find(logname); if(!ls) { snprintf(errbuff, sizeof(errbuff), "Could not find log '%s' for log_transit.", logname); errstr = errbuff; noitL(noit_error, "%s\n", errstr); goto socket_error; } type = noit_log_stream_get_type(ls); if(!type || strcmp(type, "jlog")) { snprintf(errbuff, sizeof(errbuff), "Log '%s' for log_transit is not a jlog.", logname); errstr = errbuff; noitL(noit_error, "%s\n", errstr); goto socket_error; } if(ac->cmd == NOIT_JLOG_DATA_FEED) { if(!ac->remote_cn) { errstr = "jlog transit started to unidentified party."; noitL(noit_error, "%s\n", errstr); goto socket_error; } strlcpy(subscriber, ac->remote_cn, sizeof(subscriber)); jcl->feed_stats = noit_jlog_feed_stats(subscriber); } else { jcl->feed_stats = noit_jlog_feed_stats("~"); snprintf(subscriber, sizeof(subscriber), "~%07d", noit_atomic_inc32(&tmpfeedcounter)); } jcl->subscriber = strdup(subscriber); strlcpy(path, noit_log_stream_get_path(ls), sizeof(path)); sub = strchr(path, '('); if(sub) { char *esub = strchr(sub, ')'); if(esub) { *esub = '\0'; *sub++ = '\0'; } } jcl->jlog = jlog_new(path); if(ac->cmd == NOIT_JLOG_DATA_TEMP_FEED) { add_sub: if(jlog_ctx_add_subscriber(jcl->jlog, jcl->subscriber, JLOG_END) == -1) { snprintf(errbuff, sizeof(errbuff), "jlog reader[%s] error: %s", jcl->subscriber, jlog_ctx_err_string(jcl->jlog)); errstr = errbuff; noitL(noit_error, "%s\n", errstr); } } if(jlog_ctx_open_reader(jcl->jlog, jcl->subscriber) == -1) { if(sub && !strcmp(sub, "*")) { if(first_attempt) { jlog_ctx_close(jcl->jlog); jcl->jlog = jlog_new(path); first_attempt = 0; goto add_sub; } } snprintf(errbuff, sizeof(errbuff), "jlog reader[%s] error: %s", jcl->subscriber, jlog_ctx_err_string(jcl->jlog)); errstr = errbuff; noitL(noit_error, "%s\n", errstr); goto socket_error; } } /* The jlog stuff is disk I/O and can block us. * We'll create a new thread to just handle this connection. */ eventer_remove_fd(e->fd); newe = eventer_alloc(); memcpy(newe, e, sizeof(*e)); pthread_attr_init(&tattr); pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED); gettimeofday(&jcl->feed_stats->last_connection, NULL); noit_atomic_inc32(&jcl->feed_stats->connections); if(pthread_create(&tid, &tattr, noit_jlog_thread_main, newe) == 0) { return 0; } /* Undo our dup */ eventer_free(newe); /* Creating the thread failed, close it down and deschedule. */ e->opset->close(e->fd, &newmask, e); return 0; }
static void eventer_kqueue_impl_trigger(eventer_t e, int mask) { ev_lock_state_t lockstate; struct timeval __now; int oldmask, newmask; const char *cbname; int fd; u_int64_t start, duration; int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER; mask = mask & ~(EVENTER_RESERVED); fd = e->fd; if(cross_thread) { if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ } if(!pthread_equal(pthread_self(), e->thr_owner)) { /* If we're triggering across threads, it can't be registered yet */ if(master_fds[fd].e != NULL) { mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd); } /* mtevAssert(master_fds[fd].e == NULL); */ eventer_cross_thread_trigger(e,mask); return; } if(master_fds[fd].e == NULL) { master_fds[fd].e = e; e->mask = 0; } if(e != master_fds[fd].e) return; lockstate = acquire_master_fd(fd); if(lockstate == EV_ALREADY_OWNED) return; mtevAssert(lockstate == EV_OWNED); mtev_gettimeofday(&__now, NULL); /* We're going to lie to ourselves. You'd think this should be: * oldmask = e->mask; However, we just fired with masks[fd], so * kqueue is clearly looking for all of the events in masks[fd]. * So, we combine them "just to be safe." */ oldmask = e->mask | masks[fd]; cbname = eventer_name_for_callback_e(e->callback, e); mtevLT(eventer_deb, &__now, "kqueue: fire on %d/%x to %s(%p)\n", fd, masks[fd], cbname?cbname:"???", e->callback); mtev_memory_begin(); LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask); start = mtev_gethrtime(); newmask = e->callback(e, mask, e->closure, &__now); duration = mtev_gethrtime() - start; LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask); mtev_memory_end(); stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1); stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1); if(newmask) { if(!pthread_equal(pthread_self(), e->thr_owner)) { pthread_t tgt = e->thr_owner; e->thr_owner = pthread_self(); alter_kqueue_mask(e, oldmask, 0); e->thr_owner = tgt; mtevL(eventer_deb, "moved event[%p] from t@%llx to t@%llx\n", e, (vpsized_int)pthread_self(), (vpsized_int)tgt); if(newmask) eventer_cross_thread_trigger(e, newmask & ~(EVENTER_EXCEPTION)); } else { if(master_fds[fd].e != e) { e = master_fds[fd].e; mtevL(eventer_deb, "%strigger complete [event switched] %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, master_fds[fd].e->mask, newmask); } else { mtevL(eventer_deb, "%strigger complete %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, oldmask, newmask); } alter_kqueue_mask(e, (e->mask == 0 || cross_thread) ? 0 : oldmask, newmask); /* Set our mask */ e->mask = newmask; } } else { /* * Long story long: * When integrating with a few external event systems, we find * it difficult to make their use of remove+add as an update * as it can be recurrent in a single handler call and you cannot * remove completely from the event system if you are going to * just update (otherwise the eventer_t in your call stack could * be stale). What we do is perform a superficial remove, marking * the mask as 0, but not eventer_remove_fd. Then on an add, if * we already have an event, we just update the mask (as we * have not yet returned to the eventer's loop. * This leaves us in a tricky situation when a remove is called * and the add doesn't roll in, we return 0 (mask == 0) and hit * this spot. We have intended to remove the event, but it still * resides at master_fds[fd].e -- even after we free it. * So, in the evnet that we return 0 and the event that * master_fds[fd].e == the event we're about to free... we NULL * it out. */ if(master_fds[fd].e == e) master_fds[fd].e = NULL; eventer_free(e); } release_master_fd(fd, lockstate); }
void eventer_deref(eventer_t e) { eventer_free(e); }
static int noit_listener_acceptor(eventer_t e, int mask, void *closure, struct timeval *tv) { int conn, newmask = EVENTER_READ; socklen_t salen; listener_closure_t listener_closure = (listener_closure_t)closure; acceptor_closure_t *ac = NULL; if(mask & EVENTER_EXCEPTION) { socketfail: if(ac) acceptor_closure_free(ac); /* We don't shut down the socket, it's out listener! */ return EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION; } do { ac = malloc(sizeof(*ac)); memcpy(ac, listener_closure->dispatch_closure, sizeof(*ac)); salen = sizeof(ac->remote); conn = e->opset->accept(e->fd, &ac->remote.remote_addr, &salen, &newmask, e); if(conn >= 0) { eventer_t newe; noitL(nldeb, "noit_listener[%s] accepted fd %d\n", eventer_name_for_callback(listener_closure->dispatch_callback), conn); if(eventer_set_fd_nonblocking(conn)) { close(conn); free(ac); goto accept_bail; } newe = eventer_alloc(); newe->fd = conn; newe->mask = EVENTER_READ | EVENTER_WRITE | EVENTER_EXCEPTION; if(listener_closure->sslconfig->size) { const char *layer, *cert, *key, *ca, *ciphers, *crl; eventer_ssl_ctx_t *ctx; /* We have an SSL configuration. While our socket accept is * complete, we now have to SSL_accept, which could require * several reads and writes and needs its own event callback. */ #define SSLCONFGET(var,name) do { \ if(!noit_hash_retr_str(listener_closure->sslconfig, name, strlen(name), \ &var)) var = NULL; } while(0) SSLCONFGET(layer, "layer"); SSLCONFGET(cert, "certificate_file"); SSLCONFGET(key, "key_file"); SSLCONFGET(ca, "ca_chain"); SSLCONFGET(ciphers, "ciphers"); ctx = eventer_ssl_ctx_new(SSL_SERVER, layer, cert, key, ca, ciphers); if(!ctx) { newe->opset->close(newe->fd, &newmask, e); eventer_free(newe); goto socketfail; } SSLCONFGET(crl, "crl"); if(crl) { if(!eventer_ssl_use_crl(ctx, crl)) { noitL(noit_error, "Failed to load CRL from %s\n", crl); eventer_ssl_ctx_free(ctx); newe->opset->close(newe->fd, &newmask, e); eventer_free(newe); goto socketfail; } } eventer_ssl_ctx_set_verify(ctx, eventer_ssl_verify_cert, listener_closure->sslconfig); EVENTER_ATTACH_SSL(newe, ctx); newe->callback = noit_listener_accept_ssl; newe->closure = malloc(sizeof(*listener_closure)); memcpy(newe->closure, listener_closure, sizeof(*listener_closure)); ((listener_closure_t)newe->closure)->dispatch_closure = ac; } else { newe->callback = listener_closure->dispatch_callback; /* We must make a copy of the acceptor_closure_t for each new * connection. */ newe->closure = ac; } eventer_add(newe); } else { if(errno == EAGAIN) { if(ac) acceptor_closure_free(ac); } else if(errno != EINTR) { noitL(noit_error, "accept socket error: %s\n", strerror(errno)); goto socketfail; } } } while(conn >= 0); accept_bail: return newmask | EVENTER_EXCEPTION; }