static void eventer_ports_impl_add(eventer_t e) { mtevAssert(e->mask); ev_lock_state_t lockstate; const char *cbname; cbname = eventer_name_for_callback_e(e->callback, e); if(e->mask & EVENTER_ASYNCH) { mtevL(eventer_deb, "debug: eventer_add asynch (%s)\n", cbname ? cbname : "???"); eventer_add_asynch(NULL, e); return; } /* Recurrent delegation */ if(e->mask & EVENTER_RECURRENT) { mtevL(eventer_deb, "debug: eventer_add recurrent (%s)\n", cbname ? cbname : "???"); eventer_add_recurrent(e); return; } /* Timed events are simple */ if(e->mask & EVENTER_TIMER) { eventer_add_timed(e); return; } /* file descriptor event */ mtevL(eventer_deb, "debug: eventer_add fd (%s,%d,0x%04x)\n", cbname ? cbname : "???", e->fd, e->mask); lockstate = acquire_master_fd(e->fd); mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0); master_fds[e->fd].e = e; alter_fd(e, e->mask); release_master_fd(e->fd, lockstate); }
int noit_watchdog_child_eventer_heartbeat() { eventer_t e; assert(__eventer); /* Setup our hearbeat */ e = eventer_alloc(); e->mask = EVENTER_RECURRENT; e->callback = watchdog_tick; eventer_add_recurrent(e); return 0; }
static void eventer_epoll_impl_add(eventer_t e) { int rv; struct epoll_spec *spec; struct epoll_event _ev; ev_lock_state_t lockstate; mtevAssert(e->mask); if(e->mask & EVENTER_ASYNCH) { eventer_add_asynch(NULL, e); return; } /* Recurrent delegation */ if(e->mask & EVENTER_RECURRENT) { eventer_add_recurrent(e); return; } /* Timed events are simple */ if(e->mask & EVENTER_TIMER) { eventer_add_timed(e); return; } spec = eventer_get_spec_for_event(e); /* file descriptor event */ mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0); memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); lockstate = acquire_master_fd(e->fd); master_fds[e->fd].e = e; mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, e->fd); rv = epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, e->fd, &_ev); if(rv != 0) { mtevFatal(mtev_error, "epoll_ctl(%d,add,%d,%x) -> %d (%d: %s)\n", spec->epoll_fd, e->fd, e->mask, rv, errno, strerror(errno)); } release_master_fd(e->fd, lockstate); }
static void eventer_kqueue_impl_add(eventer_t e) { mtevAssert(e->mask); mtevAssert(eventer_is_loop(e->thr_owner) >= 0); ev_lock_state_t lockstate; const char *cbname; cbname = eventer_name_for_callback_e(e->callback, e); if(e->mask & EVENTER_ASYNCH) { mtevL(eventer_deb, "debug: eventer_add asynch (%s)\n", cbname ? cbname : "???"); eventer_add_asynch(NULL, e); return; } /* Recurrent delegation */ if(e->mask & EVENTER_RECURRENT) { mtevL(eventer_deb, "debug: eventer_add recurrent (%s)\n", cbname ? cbname : "???"); eventer_add_recurrent(e); return; } /* Timed events are simple */ if(e->mask & EVENTER_TIMER) { eventer_add_timed(e); return; } /* file descriptor event */ mtevL(eventer_deb, "debug: eventer_add fd (%s,%d,0x%04x)\n", cbname ? cbname : "???", e->fd, e->mask); mtevAssert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0); lockstate = acquire_master_fd(e->fd); master_fds[e->fd].e = e; if(e->mask & (EVENTER_READ | EVENTER_EXCEPTION)) ke_change(e->fd, EVFILT_READ, EV_ADD | EV_ENABLE, e); if(e->mask & (EVENTER_WRITE)) ke_change(e->fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, e); release_master_fd(e->fd, lockstate); }
static void eventer_epoll_impl_add(eventer_t e) { struct epoll_event _ev; ev_lock_state_t lockstate; assert(e->mask); if(e->mask & EVENTER_ASYNCH) { eventer_add_asynch(NULL, e); return; } /* Recurrent delegation */ if(e->mask & EVENTER_RECURRENT) { eventer_add_recurrent(e); return; } /* Timed events are simple */ if(e->mask & EVENTER_TIMER) { eventer_add_timed(e); return; } /* file descriptor event */ assert(e->whence.tv_sec == 0 && e->whence.tv_usec == 0); memset(&_ev, 0, sizeof(_ev)); _ev.data.fd = e->fd; if(e->mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI); if(e->mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT); if(e->mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP); lockstate = acquire_master_fd(e->fd); master_fds[e->fd].e = e; assert(epoll_ctl(epoll_fd, EPOLL_CTL_ADD, e->fd, &_ev) == 0); release_master_fd(e->fd, lockstate); }
static int child_main() { eventer_t e; /* Load our config... * to ensure it is current w.r.t. to this child starting */ if(noit_conf_load(config_file) == -1) { noitL(noit_error, "Cannot load config: '%s'\n", config_file); exit(2); } noit_log_reopen_all(); signal(SIGHUP, request_conf_reload); /* initialize the eventer */ if(eventer_init() == -1) { noitL(noit_stderr, "Cannot initialize eventer\n"); exit(-1); } /* rotation init requires, eventer_init() */ noit_conf_log_init_rotate(APPNAME, noit_false); /* Setup our heartbeat */ noit_watchdog_child_eventer_heartbeat(); e = eventer_alloc(); e->mask = EVENTER_RECURRENT; e->callback = noitice_hup; eventer_add_recurrent(e); /* Initialize all of our listeners */ noit_console_init(APPNAME); noit_console_conf_init(); noit_console_conf_checks_init(); noit_capabilities_listener_init(); noit_jlog_listener_init(); noit_http_rest_init(); noit_check_rest_init(); noit_filters_rest_init(); noit_livestream_listener_init(); noit_module_init(); if(strict_module_load && noit_module_load_failures() > 0) { noitL(noit_stderr, "Failed to load some modules and -M given.\n"); exit(2); } /* Drop privileges */ if(chrootpath && noit_security_chroot(chrootpath)) { noitL(noit_stderr, "Failed to chroot(), exiting.\n"); exit(2); } if(noit_security_usergroup(droptouser, droptogroup, noit_false)) { noitL(noit_stderr, "Failed to drop privileges, exiting.\n"); exit(2); } /* Prepare for launch... */ noit_filters_init(); noit_poller_init(); noit_listener_init(APPNAME); /* Write our log out, and setup a watchdog to write it out on change. */ noit_conf_write_log(NULL); noit_conf_coalesce_changes(10); /* 10 seconds of no changes before we write */ noit_conf_watch_and_journal_watchdog(noit_conf_write_log, NULL); eventer_loop(); return 0; }