static void log_threaded_dest_driver_do_work(gpointer data) { LogThrDestDriver *self = (LogThrDestDriver *)data; gint timeout_msec = 0; self->suspended = FALSE; log_threaded_dest_driver_stop_watches(self); if (!self->worker.connected) { __connect(self); } else if (log_queue_check_items(self->queue, &timeout_msec, log_threaded_dest_driver_message_became_available_in_the_queue, self, NULL)) { log_threaded_dest_driver_do_insert(self); if (!self->suspended) log_threaded_dest_driver_start_watches(self); } else if (timeout_msec != 0) { log_queue_reset_parallel_push(self->queue); iv_validate_now(); self->timer_throttle.expires = iv_now; timespec_add_msec(&self->timer_throttle.expires, timeout_msec); iv_timer_register(&self->timer_throttle); } }
/* * Update the suppress timer in a deferred manner, possibly batching the * results of multiple updates to the suppress timer. This is necessary as * suppress timer updates must run in the main thread, and updating it every * time a new message comes in would cause enormous latency in the fast * path. By collecting multiple updates * * msec == 0 means to turn off the suppress timer * msec > 0 to enable the timer with the specified timeout * * NOTE: suppress_lock must be held. */ static void log_writer_update_suppress_timer(LogWriter *self, glong sec) { gboolean invoke; struct timespec next_expires; iv_validate_now(); /* we deliberately use nsec == 0 in order to increase the likelyhood that * we target the same second, in case only a fraction of a second has * passed between two updates. */ if (sec) { next_expires.tv_nsec = 0; next_expires.tv_sec = iv_now.tv_sec + sec; } else { next_expires.tv_sec = 0; next_expires.tv_nsec = 0; } /* last update was finished, we need to invoke the updater again */ invoke = ((next_expires.tv_sec != self->suppress_timer_expires.tv_sec) || (next_expires.tv_nsec != self->suppress_timer_expires.tv_nsec)) && self->suppress_timer_updated; self->suppress_timer_updated = FALSE; if (invoke) { self->suppress_timer_expires = next_expires; g_static_mutex_unlock(&self->suppress_lock); log_pipe_ref(&self->super); main_loop_call((void *(*)(void *)) log_writer_perform_suppress_timer_update, self, FALSE); g_static_mutex_lock(&self->suppress_lock); } }
static void open_child_request(struct req *req) { int f; IV_POPEN_REQUEST_INIT(&req->popen_req); req->popen_req.file = "/usr/bin/vmstat"; req->argv[0] = "/usr/bin/vmstat"; req->argv[1] = "1"; req->argv[2] = NULL; req->popen_req.argv = req->argv; req->popen_req.type = "r"; f = iv_popen_request_submit(&req->popen_req); printf("submitted the popen request, fd is %d\n", f); IV_FD_INIT(&req->popen_fd); req->popen_fd.fd = f; req->popen_fd.cookie = req; req->popen_fd.handler_in = got_data; iv_fd_register(&req->popen_fd); IV_TIMER_INIT(&req->closeit); iv_validate_now(); req->closeit.expires = iv_now; req->closeit.expires.tv_sec += 5; req->closeit.cookie = req; req->closeit.handler = do_close; iv_timer_register(&req->closeit); }
static void iv_work_thread_got_event(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; uint32_t last_seq; mutex_lock(&pool->lock); thr->kicked = 0; if (!iv_list_empty(&thr->list)) { iv_list_del_init(&thr->list); iv_timer_unregister(&thr->idle_timer); } last_seq = pool->seq_tail; while ((int32_t)(last_seq - pool->seq_head) > 0) { struct iv_work_item *work; pool->seq_head++; work = iv_container_of(pool->work_items.next, struct iv_work_item, list); iv_list_del(&work->list); mutex_unlock(&pool->lock); work->work(work->cookie); iv_invalidate_now(); mutex_lock(&pool->lock); if (iv_list_empty(&pool->work_done)) iv_event_post(&pool->ev); iv_list_add_tail(&work->list, &pool->work_done); } if (pool->seq_head == pool->seq_tail) { if (!pool->shutting_down) { iv_list_add(&thr->list, &pool->idle_threads); iv_validate_now(); thr->idle_timer.expires = iv_now; thr->idle_timer.expires.tv_sec += 10; iv_timer_register(&thr->idle_timer); } else { __iv_work_thread_die(thr); } } else { /* * If we're already at the maximum number of pool * threads, and none of those threads were idle when * more work arrived, then there may have been no * kick sent for the new work item(s) (and no new * pool thread started either), so if we're leaving * with work items still pending, make sure we get * called again, so that we don't deadlock. */ iv_event_post(&thr->kick); } mutex_unlock(&pool->lock); }
static void poll_file_changes_rearm_timer(PollFileChanges *self) { iv_validate_now(); self->follow_timer.expires = iv_now; timespec_add_msec(&self->follow_timer.expires, self->follow_freq); iv_timer_register(&self->follow_timer); }
void log_threaded_dest_driver_suspend(LogThrDestDriver *self) { iv_validate_now(); self->timer_reopen.expires = iv_now; self->timer_reopen.expires.tv_sec += self->time_reopen; iv_timer_register(&self->timer_reopen); }
int dgp_reader_read(struct dgp_reader *dr, int fd) { int ret; int off; do { ret = read(fd, dr->buf + dr->bytes, sizeof(dr->buf) - dr->bytes); } while (ret < 0 && errno == EINTR); if (ret <= 0) { if (ret < 0) { if (errno == EAGAIN) return 0; perror("dgp_reader_read"); } return -1; } dr->bytes += ret; iv_timer_unregister(&dr->keepalive_timeout); iv_validate_now(); dr->keepalive_timeout.expires = iv_now; timespec_add_ms(&dr->keepalive_timeout.expires, 1000 * KEEPALIVE_TIMEOUT, 1000 * KEEPALIVE_TIMEOUT); iv_timer_register(&dr->keepalive_timeout); off = 0; while (off < dr->bytes) { int len; struct lsa *lsa; len = lsa_deserialise(&lsa, dr->buf + off, dr->bytes - off); if (len < 0) return -1; if (len == 0) { if (off == 0 && dr->bytes == sizeof(dr->buf)) return -1; break; } if (lsa != NULL) { if (dr->remoteid != NULL) adj_rib_in_add_lsa(&dr->adj_rib_in, lsa); lsa_put(lsa); } off += len; } dr->bytes -= off; memmove(dr->buf, dr->buf + off, dr->bytes); return 0; }
static void affile_dw_arm_reaper(AFFileDestWriter *self) { /* not yet reaped, set up the next callback */ iv_validate_now(); self->reap_timer.expires = iv_now; timespec_add_msec(&self->reap_timer.expires, self->owner->time_reap * 1000 / 2); iv_timer_register(&self->reap_timer); }
static void log_reader_update_watches(LogReader *self) { GIOCondition cond; gint idle_timeout = -1; main_loop_assert_main_thread(); log_reader_stop_idle_timer(self); if (!log_reader_is_opened(self)) return; log_reader_start_watches_if_stopped(self); gboolean free_to_send = log_source_free_to_send(&self->super); if (!free_to_send) { log_reader_suspend_until_awoken(self); return; } LogProtoPrepareAction prepare_action = log_proto_server_prepare(self->proto, &cond, &idle_timeout); if (idle_timeout > 0) { iv_validate_now(); self->idle_timer.expires = iv_now; self->idle_timer.expires.tv_sec += idle_timeout; iv_timer_register(&self->idle_timer); } if (self->immediate_check) { log_reader_force_check_in_next_poll(self); return; } switch (prepare_action) { case LPPA_POLL_IO: poll_events_update_watches(self->poll_events, cond); break; case LPPA_FORCE_SCHEDULE_FETCH: log_reader_force_check_in_next_poll(self); break; case LPPA_SUSPEND: log_reader_suspend_until_awoken(self); break; default: g_assert_not_reached(); break; } }
void log_writer_arm_suspend_timer(LogWriter *self, void (*handler)(void *), gint timeout_msec) { if (iv_timer_registered(&self->suspend_timer)) iv_timer_unregister(&self->suspend_timer); iv_validate_now(); self->suspend_timer.handler = handler; self->suspend_timer.expires = iv_now; timespec_add_msec(&self->suspend_timer.expires, timeout_msec); iv_timer_register(&self->suspend_timer); }
int main() { long long nsec; iv_init(); #ifdef USE_SIGNAL signal(SIGALRM, got_signal_timeout); alarm(5); #else IV_TIMER_INIT(&timeout); iv_validate_now(); timeout.expires = iv_now; timeout.expires.tv_sec += 5; timeout.handler = got_timer_timeout; iv_timer_register(&timeout); #endif IV_SIGNAL_INIT(&is); is.signum = SIGUSR1; is.handler = got_sig; iv_signal_register(&is); iv_validate_now(); tim_start = iv_now; raise(SIGUSR1); iv_main(); iv_deinit(); nsec = 1000000000ULL * (tim_end.tv_sec - tim_start.tv_sec) + (tim_end.tv_nsec - tim_start.tv_nsec); printf("%s: %d in %ld nsec => %d/sec\n", iv_poll_method_name(), sig_received, (long)nsec, (int)(1000000000ULL * sig_received / nsec)); return 0; }
static void got_ev(void *_dummy) { ev_received++; if (!die) { iv_event_raw_post(&ev); } else { iv_validate_now(); tim_end = iv_now; iv_event_raw_unregister(&ev); } }
static void got_sig(void *_dummy) { sig_received++; if (!die) { raise(SIGUSR1); } else { iv_validate_now(); tim_end = iv_now; iv_signal_unregister(&is); } }
int main() { long long nsec; iv_init(); #ifdef USE_SIGNAL signal(SIGALRM, got_signal_timeout); alarm(5); #else IV_TIMER_INIT(&timeout); iv_validate_now(); timeout.expires = iv_now; timeout.expires.tv_sec += 5; timeout.handler = got_timer_timeout; iv_timer_register(&timeout); #endif IV_EVENT_RAW_INIT(&ev); ev.handler = got_ev; iv_event_raw_register(&ev); iv_validate_now(); tim_start = iv_now; iv_event_raw_post(&ev); iv_main(); iv_deinit(); nsec = 1000000000ULL * (tim_end.tv_sec - tim_start.tv_sec) + (tim_end.tv_nsec - tim_start.tv_nsec); printf("%s: %d in %ld nsec => %d/sec\n", iv_poll_method_name(), ev_received, (long)nsec, (int)(1000000000ULL * ev_received / nsec)); return 0; }
static void gotev0(void *_x) { printf("got ev0\n"); iv_event_raw_unregister(&ev0); IV_TIMER_INIT(&ev1); iv_validate_now(); ev1.expires = iv_now; ev1.expires.tv_sec++; ev1.handler = gotev1; iv_timer_register(&ev1); }
static void afsocket_dd_start_reconnect_timer(AFSocketDestDriver *self) { main_loop_assert_main_thread(); if (iv_timer_registered(&self->reconnect_timer)) iv_timer_unregister(&self->reconnect_timer); iv_validate_now(); self->reconnect_timer.expires = iv_now; timespec_add_msec(&self->reconnect_timer.expires, self->time_reopen * 1000); iv_timer_register(&self->reconnect_timer); }
/* Update the expire time of this timer to the current time plus @sec. Can * be invoked from any threads. */ void ml_batched_timer_postpone(MlBatchedTimer *self, glong sec) { struct timespec next_expires; iv_validate_now(); /* we deliberately use nsec == 0 in order to increase the likelyhood that * we target the same second, in case only a fraction of a second has * passed between two updates. */ next_expires.tv_nsec = 0; next_expires.tv_sec = iv_now.tv_sec + sec; ml_batched_timer_update(self, &next_expires); }
static void trigger_source_update_watches (TriggerSource *self) { if (!log_source_free_to_send (&self->super)) { trigger_source_stop_watches (self); return; } iv_validate_now (); trigger_source_stop_watches (self); self->trigger_timer.expires = iv_now; self->trigger_timer.expires.tv_sec += self->options->trigger_freq; trigger_source_start_watches (self); }
static gboolean trigger_source_init (LogPipe *s) { TriggerSource *self = (TriggerSource *)s; if (!log_source_init (s)) return FALSE; iv_validate_now (); self->trigger_timer.expires = iv_now; self->trigger_timer.expires.tv_sec += self->options->trigger_freq; trigger_source_start_watches (self); return TRUE; }
void afinter_postpone_mark(gint mark_freq) { if (mark_freq > 0) { iv_validate_now(); g_static_mutex_lock(&internal_mark_target_lock); next_mark_target = iv_now; next_mark_target.tv_sec += mark_freq; g_static_mutex_unlock(&internal_mark_target_lock); } else { next_mark_target.tv_sec = -1; } }
static void thr_child(void *_dummy) { iv_init(); IV_EVENT_INIT(&ev_child); ev_child.handler = got_ev_child; iv_event_register(&ev_child); iv_validate_now(); tim_start = iv_now; iv_event_post(&ev_parent); iv_main(); iv_deinit(); }
static void main_loop_exit_initiate(void) { if (main_loop_is_terminating()) return; msg_notice("syslog-ng shutting down", evt_tag_str("version", SYSLOG_NG_VERSION)); IV_TIMER_INIT(&main_loop_exit_timer); iv_validate_now(); main_loop_exit_timer.expires = iv_now; main_loop_exit_timer.handler = main_loop_exit_timer_elapsed; timespec_add_msec(&main_loop_exit_timer.expires, 100); iv_timer_register(&main_loop_exit_timer); __main_loop_is_terminating = TRUE; }
static void got_ev_parent(void *_dummy) { ev_received++; if (die == 0) { iv_event_post(&ev_child); } else if (die == 1) { die = 2; iv_event_post(&ev_child); } else if (die == 2) { iv_fatal("iv_event_bench: entered invalid state"); } else if (die == 3) { iv_validate_now(); tim_end = iv_now; iv_event_unregister(&ev_parent); } }
static gboolean log_db_parser_init(LogPipe *s) { LogDBParser *self = (LogDBParser *) s; GlobalConfig *cfg = log_pipe_get_config(s); self->db = cfg_persist_config_fetch(cfg, log_db_parser_format_persist_name(self)); if (self->db) { struct stat st; if (stat(self->db_file, &st) < 0) { msg_error("Error stating pattern database file, no automatic reload will be performed", evt_tag_str("error", g_strerror(errno)), NULL); } else if (self->db_file_inode != st.st_ino || self->db_file_mtime != st.st_mtime) { self->db = pattern_db_new(); log_db_parser_reload_database(self); self->db_file_inode = st.st_ino; self->db_file_mtime = st.st_mtime; } } else { self->db = pattern_db_new(); log_db_parser_reload_database(self); } if (self->db) pattern_db_set_emit_func(self->db, log_db_parser_emit, self); iv_validate_now(); IV_TIMER_INIT(&self->tick); self->tick.cookie = self; self->tick.handler = log_db_parser_timer_tick; self->tick.expires = iv_now; self->tick.expires.tv_sec++; self->tick.expires.tv_nsec = 0; iv_timer_register(&self->tick); return self->db != NULL; }
int main() { alarm(5); iv_init(); IV_TASK_INIT(&task); task.handler = handler_task; iv_task_register(&task); IV_TIMER_INIT(&timer); iv_validate_now(); timer.expires = iv_now; timer.expires.tv_sec--; timer.handler = handler_timer; iv_main(); iv_deinit(); return !success; }
int iv_get_soonest_timeout(struct iv_state *st, struct timespec *to) { if (st->num_timers) { struct iv_timer_ *t = *get_node(st, 1); iv_validate_now(); to->tv_sec = t->expires.tv_sec - st->time.tv_sec; to->tv_nsec = t->expires.tv_nsec - st->time.tv_nsec; if (to->tv_nsec < 0) { to->tv_sec--; to->tv_nsec += 1000000000; } return !!(to->tv_sec < 0 || (to->tv_sec == 0 && to->tv_nsec == 0)); } to->tv_sec = 3600; to->tv_nsec = 0; return 0; }
void write_stats_to_log_handler(void *arg) { int idx; struct iface_util_by_time info; char timestamp[TIMESTAMP_SIZE]; get_timestamp(timestamp, sizeof(timestamp)); log_write("=========================================================================\n"); log_write("TIMESTAMP: %s\n", timestamp); for( idx = 0; idx < ifaces_cnt; idx++ ) { log_write("Interface %s:\n", iface_info_instances[idx].iface_name); get_util_for_util_last_10_sec(&info, &iface_info_instances[idx]); log_write("\tUtilization for last 10 sec: "); log_write(" RX: %.5f Mbps,", info.rx_mbytes_per_sec); log_write(" TX: %.5f Mbps\n", info.tx_mbytes_per_sec); get_util_for_util_last_30_sec(&info, &iface_info_instances[idx]); log_write("\tUtilization for last 30 sec: "); log_write(" RX: %.5f Mbps,", info.rx_mbytes_per_sec); log_write(" TX: %.5f Mbps\n", info.tx_mbytes_per_sec); get_util_for_util_last_60_sec(&info, &iface_info_instances[idx]); log_write("\tUtilization for last 60 sec: "); log_write(" RX: %.5f Mbps,", info.rx_mbytes_per_sec); log_write(" TX: %.5f Mbps\n", info.tx_mbytes_per_sec); log_write("\n"); } log_write("=========================================================================\n"); iv_validate_now(); log_timer.expires = iv_now; log_timer.expires.tv_sec += WRITE_STATS_PERIOD; iv_timer_register(&log_timer); }
void __test_fd_handling(Journald *journald) { gint fd = journald_get_fd(journald); journald_process(journald); task_called = FALSE; poll_triggered = FALSE; struct iv_task add_entry_task; struct iv_fd fd_to_poll; struct iv_timer stop_timer; IV_TASK_INIT(&add_entry_task); add_entry_task.cookie = journald; add_entry_task.handler = add_mock_entries; IV_FD_INIT(&fd_to_poll); fd_to_poll.fd = fd; fd_to_poll.cookie = journald; fd_to_poll.handler_in = handle_new_entry; iv_validate_now(); IV_TIMER_INIT(&stop_timer); stop_timer.cookie = NULL; stop_timer.expires = iv_now; stop_timer.expires.tv_sec++; stop_timer.handler = stop_timer_expired; iv_task_register(&add_entry_task); iv_fd_register(&fd_to_poll); iv_timer_register(&stop_timer); iv_main(); assert_true(poll_triggered, ASSERTION_ERROR("Poll event isn't triggered")); }
void dgp_reader_register(struct dgp_reader *dr) { dr->bytes = 0; if (dr->remoteid != NULL) { dr->adj_rib_in.myid = dr->myid; dr->adj_rib_in.remoteid = dr->remoteid; adj_rib_in_init(&dr->adj_rib_in); dr->to_loc.dest = dr->rib; rib_listener_to_loc_init(&dr->to_loc); adj_rib_in_listener_register(&dr->adj_rib_in, &dr->to_loc.rl); } IV_TIMER_INIT(&dr->keepalive_timeout); iv_validate_now(); dr->keepalive_timeout.expires = iv_now; timespec_add_ms(&dr->keepalive_timeout.expires, 1000 * KEEPALIVE_TIMEOUT, 1000 * KEEPALIVE_TIMEOUT); dr->keepalive_timeout.cookie = dr; dr->keepalive_timeout.handler = dgp_reader_keepalive_timeout; iv_timer_register(&dr->keepalive_timeout); }
static void log_reader_update_watches(LogReader *self) { gint fd; GIOCondition cond; gboolean free_to_send; main_loop_assert_main_thread(); self->suspended = FALSE; free_to_send = log_source_free_to_send(&self->super); if (!free_to_send || self->immediate_check || log_proto_prepare(self->proto, &fd, &cond)) { /* we disable all I/O related callbacks here because we either know * that we can continue (e.g. immediate_check == TRUE) or we know * that we can't continue even if data would be available (e.g. * free_to_send == FALSE) */ self->immediate_check = FALSE; if (iv_fd_registered(&self->fd_watch)) { iv_fd_set_handler_in(&self->fd_watch, NULL); iv_fd_set_handler_out(&self->fd_watch, NULL); /* we disable the error handler too, as it might be * triggered even when we don't want to read data * (e.g. log_source_free_to_send() is FALSE). * * And at least on Linux, it may happen that EPOLLERR is * set, while there's still data in the socket buffer. Thus * in reaction to an EPOLLERR, we could possibly send * further messages without validating the * log_source_free_to_send() would allow us to, potentially * overflowing our window (and causing a failed assertion in * log_source_queue(). */ iv_fd_set_handler_err(&self->fd_watch, NULL); } if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); if (free_to_send) { /* we have data in our input buffer, we need to start working * on it immediately, without waiting for I/O events */ if (!iv_task_registered(&self->restart_task)) { iv_task_register(&self->restart_task); } } else { self->suspended = TRUE; } return; } if (iv_fd_registered(&self->fd_watch)) { /* this branch is executed when our fd is connected to a non-file * source (e.g. TCP, UDP socket). We set up I/O callbacks here. * files cannot be polled using epoll, as it causes an I/O error * (thus abort in ivykis). */ if (cond & G_IO_IN) iv_fd_set_handler_in(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_in(&self->fd_watch, NULL); if (cond & G_IO_OUT) iv_fd_set_handler_out(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_out(&self->fd_watch, NULL); if (cond & (G_IO_IN + G_IO_OUT)) iv_fd_set_handler_err(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_err(&self->fd_watch, NULL); } else { if (self->options->follow_freq > 0) { if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); iv_validate_now(); self->follow_timer.expires = iv_now; timespec_add_msec(&self->follow_timer.expires, self->options->follow_freq); iv_timer_register(&self->follow_timer); } else { /* NOTE: we don't need to unregister the timer here as follow_freq * never changes during runtime, thus if ever it was registered that * also means that we go into the if branch above. */ } } }