static void log_reader_update_watches(LogReader *self) { GIOCondition cond; gboolean free_to_send; gboolean line_is_ready_in_buffer; main_loop_assert_main_thread(); if (!log_reader_is_opened(self)) return; log_reader_start_watches_if_stopped(self); free_to_send = log_source_free_to_send(&self->super); if (!free_to_send) { log_reader_suspend_until_awoken(self); return; } line_is_ready_in_buffer = log_proto_server_prepare(self->proto, &cond); if (self->immediate_check || line_is_ready_in_buffer) { log_reader_force_check_in_next_poll(self); return; } poll_events_update_watches(self->poll_events, cond); }
static gboolean log_reader_handle_line(LogReader *self, const guchar *line, gint length, GSockAddr *saddr) { LogMessage *m; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; gint i; msg_debug("Incoming log entry", evt_tag_printf("line", "%.*s", length, line), NULL); /* use the current time to get the time zone offset */ m = log_msg_new((gchar *) line, length, saddr, &self->options->parse_options); if (!m->saddr && self->peer_addr) { m->saddr = g_sockaddr_ref(self->peer_addr); } if (self->options->tags) { for (i = 0; i < self->options->tags->len; i++) { log_msg_set_tag_by_id(m, g_array_index(self->options->tags, LogTagId, i)); } } log_msg_set_tag_by_id(m, self->super.options->source_group_tag); log_pipe_queue(&self->super.super, m, &path_options); return log_source_free_to_send(&self->super); }
static void afinter_source_mark(gpointer s) { AFInterSource *self = (AFInterSource *) s; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; struct timespec nmt; main_loop_assert_main_thread(); g_static_mutex_lock(&internal_mark_target_lock); nmt = next_mark_target; g_static_mutex_unlock(&internal_mark_target_lock); if (log_source_free_to_send(&self->super) && nmt.tv_sec <= self->mark_timer.expires.tv_sec) { /* the internal_mark_target has not been overwritten by an incoming message in afinter_postpone_mark (there was no msg in the meantime) -> the mark msg can be sent */ msg = log_msg_new_mark(); path_options.ack_needed = FALSE; log_pipe_queue(&self->super.super, msg, &path_options); /* the next_mark_target will be increased in afinter_postpone_mark */ } afinter_source_update_watches(self); }
static gboolean log_reader_fd_prepare(GSource *source, gint *timeout) { LogReaderWatch *self = (LogReaderWatch *) source; GIOCondition proto_cond; self->pollfd.revents = 0; self->pollfd.events = G_IO_ERR; /* never indicate readability if flow control prevents us from sending messages */ if (!log_source_free_to_send(&self->reader->super)) return FALSE; if (log_proto_prepare(self->proto, &self->pollfd.fd, &proto_cond, timeout)) return TRUE; if (self->reader->immediate_check) { *timeout = 0; self->reader->immediate_check = FALSE; return FALSE; } if (self->reader->options->follow_freq > 0) { *timeout = self->reader->options->follow_freq; return FALSE; } self->pollfd.events |= proto_cond; return FALSE; }
static void afinter_source_update_watches(AFInterSource *self) { if (!log_source_free_to_send(&self->super)) { /* ok, we go to sleep now. let's disable the post event by setting * current_internal_source to NULL. Messages get accumulated into * internal_msg_queue. */ g_static_mutex_lock(&internal_msg_lock); current_internal_source = NULL; g_static_mutex_unlock(&internal_msg_lock); /* Possible race: * * After the check log_source_free_to_send() above, the destination * may actually write out a message, thus by the time we get here, the * window may have space again. This is taken care of by the fact * that the wakeup is running in the main thread, which we do too. So * the wakeup is either completely performed before we entered this * function, or after we return. * * In case it happened earlier, the check above will find that we have * window space, in case it's going to be happening afterwards, we * will be woken up by the schedule_wakeup event (which calls * update_watches again). */ /* MARK events also get disabled */ afinter_source_stop_watches(self); } else { /* ok we can send our stuff. make sure we wake up */ afinter_source_stop_watches(self); self->mark_timer.expires = next_mark_target; afinter_source_start_watches(self); /* Possible race: * * Our current_internal_source pointer is set to NULL here (in case * we're just waking up). In case the sender submits a message, it'll * not trigger the self->post (since the pointer is NULL). This is * taken care of by the queue-length check in the locked region below. * If the queue has elements, we need to wake up, because we may have * lost a wakeup call. If it happens after the locked region, that * doesn't matter, in that case we already pointed * current_internal_source to ourselves, thus the post event will also * be triggered. */ g_static_mutex_lock(&internal_msg_lock); if (internal_msg_queue && g_queue_get_length(internal_msg_queue) > 0) iv_task_register(&self->restart_task); current_internal_source = self; g_static_mutex_unlock(&internal_msg_lock); } }
static void log_reader_update_watches(LogReader *self) { GIOCondition cond; gint idle_timeout = -1; main_loop_assert_main_thread(); log_reader_stop_idle_timer(self); if (!log_reader_is_opened(self)) return; log_reader_start_watches_if_stopped(self); gboolean free_to_send = log_source_free_to_send(&self->super); if (!free_to_send) { log_reader_suspend_until_awoken(self); return; } LogProtoPrepareAction prepare_action = log_proto_server_prepare(self->proto, &cond, &idle_timeout); if (idle_timeout > 0) { iv_validate_now(); self->idle_timer.expires = iv_now; self->idle_timer.expires.tv_sec += idle_timeout; iv_timer_register(&self->idle_timer); } if (self->immediate_check) { log_reader_force_check_in_next_poll(self); return; } switch (prepare_action) { case LPPA_POLL_IO: poll_events_update_watches(self->poll_events, cond); break; case LPPA_FORCE_SCHEDULE_FETCH: log_reader_force_check_in_next_poll(self); break; case LPPA_SUSPEND: log_reader_suspend_until_awoken(self); break; default: g_assert_not_reached(); break; } }
static gboolean __handle_message(JournalReader *self) { LogMessage *msg = log_msg_new_empty(); msg->pri = self->options->default_pri; gpointer args[] = {msg, self->options}; journald_foreach_data(self->journal, __handle_data, args); __set_message_timestamp(self, msg); log_source_post(&self->super, msg); return log_source_free_to_send(&self->super); }
static void trigger_source_update_watches (TriggerSource *self) { if (!log_source_free_to_send (&self->super)) { trigger_source_stop_watches (self); return; } iv_validate_now (); trigger_source_stop_watches (self); self->trigger_timer.expires = iv_now; self->trigger_timer.expires.tv_sec += self->options->trigger_freq; trigger_source_start_watches (self); }
static void afinter_source_post(gpointer s) { AFInterSource *self = (AFInterSource *) s; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; while (log_source_free_to_send(&self->super)) { g_static_mutex_lock(&internal_msg_lock); msg = g_queue_pop_head(internal_msg_queue); g_static_mutex_unlock(&internal_msg_lock); if (!msg) break; log_pipe_queue(&self->super.super, msg, &path_options); } afinter_source_update_watches(self); }
static void trigger_source_triggered (gpointer s) { TriggerSource *self = (TriggerSource *) s; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; main_loop_assert_main_thread (); if (!log_source_free_to_send (&self->super)) goto end; msg = log_msg_new_internal (LOG_INFO, self->options->message); path_options.ack_needed = FALSE; log_pipe_queue (&self->super.super, msg, &path_options); end: trigger_source_update_watches (self); }
static gboolean log_reader_handle_line(LogReader *self, const guchar *line, gint length, LogTransportAuxData *aux) { LogMessage *m; msg_debug("Incoming log entry", evt_tag_printf("line", "%.*s", length, line)); /* use the current time to get the time zone offset */ m = log_msg_new((gchar *) line, length, aux->peer_addr ? : self->peer_addr, &self->options->parse_options); log_msg_refcache_start_producer(m); log_transport_aux_data_foreach(aux, _add_aux_nvpair, m); log_source_post(&self->super, m); log_msg_refcache_stop(); return log_source_free_to_send(&self->super); }
static gboolean log_reader_fd_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { LogReaderWatch *self = (LogReaderWatch *) source; /* The window status can change between check() and dispatch() * because multiple tcp connections can have messages ready * from the same source at check() time, but the queue may * fill before we dispatch() them all */ if (!log_source_free_to_send(&self->reader->super)) return TRUE; if (!log_reader_fetch_log(self->reader, self->proto)) { return FALSE; } return TRUE; }
static void _update_watches(JournalReader *self) { gboolean free_to_send; main_loop_assert_main_thread(); _start_watches_if_stopped(self); free_to_send = log_source_free_to_send(&self->super); if (!free_to_send) { _suspend_until_awoken(self); return; } if (self->immediate_check) { _force_check_in_next_poll(self); return; } poll_events_update_watches(self->poll_events, G_IO_IN); }
static gboolean log_reader_handle_line(LogReader *self, const guchar *line, gint length, GSockAddr *saddr) { LogMessage *m; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; msg_debug("Incoming log entry", evt_tag_printf("line", "%.*s", length, line), NULL); /* use the current time to get the time zone offset */ m = log_msg_new((gchar *) line, length, saddr, &self->options->parse_options); log_msg_refcache_start_producer(m); if (!m->saddr && self->peer_addr) { m->saddr = g_sockaddr_ref(self->peer_addr); } log_pipe_queue(&self->super.super, m, &path_options); log_msg_refcache_stop(); return log_source_free_to_send(&self->super); }
static void log_reader_update_watches(LogReader *self) { gint fd; GIOCondition cond; gboolean free_to_send; main_loop_assert_main_thread(); self->suspended = FALSE; free_to_send = log_source_free_to_send(&self->super); if (!free_to_send || self->immediate_check || log_proto_prepare(self->proto, &fd, &cond)) { /* we disable all I/O related callbacks here because we either know * that we can continue (e.g. immediate_check == TRUE) or we know * that we can't continue even if data would be available (e.g. * free_to_send == FALSE) */ self->immediate_check = FALSE; if (iv_fd_registered(&self->fd_watch)) { iv_fd_set_handler_in(&self->fd_watch, NULL); iv_fd_set_handler_out(&self->fd_watch, NULL); /* we disable the error handler too, as it might be * triggered even when we don't want to read data * (e.g. log_source_free_to_send() is FALSE). * * And at least on Linux, it may happen that EPOLLERR is * set, while there's still data in the socket buffer. Thus * in reaction to an EPOLLERR, we could possibly send * further messages without validating the * log_source_free_to_send() would allow us to, potentially * overflowing our window (and causing a failed assertion in * log_source_queue(). */ iv_fd_set_handler_err(&self->fd_watch, NULL); } if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); if (free_to_send) { /* we have data in our input buffer, we need to start working * on it immediately, without waiting for I/O events */ if (!iv_task_registered(&self->restart_task)) { iv_task_register(&self->restart_task); } } else { self->suspended = TRUE; } return; } if (iv_fd_registered(&self->fd_watch)) { /* this branch is executed when our fd is connected to a non-file * source (e.g. TCP, UDP socket). We set up I/O callbacks here. * files cannot be polled using epoll, as it causes an I/O error * (thus abort in ivykis). */ if (cond & G_IO_IN) iv_fd_set_handler_in(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_in(&self->fd_watch, NULL); if (cond & G_IO_OUT) iv_fd_set_handler_out(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_out(&self->fd_watch, NULL); if (cond & (G_IO_IN + G_IO_OUT)) iv_fd_set_handler_err(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_err(&self->fd_watch, NULL); } else { if (self->options->follow_freq > 0) { if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); iv_validate_now(); self->follow_timer.expires = iv_now; timespec_add_msec(&self->follow_timer.expires, self->options->follow_freq); iv_timer_register(&self->follow_timer); } else { /* NOTE: we don't need to unregister the timer here as follow_freq * never changes during runtime, thus if ever it was registered that * also means that we go into the if branch above. */ } } }
static void log_source_queue(LogPipe *s, LogMessage *msg, const LogPathOptions *path_options, gpointer user_data) { LogSource *self = (LogSource *) s; gint i; msg_set_context(msg); if (msg->timestamps[LM_TS_STAMP].tv_sec == -1 || !self->options->keep_timestamp) msg->timestamps[LM_TS_STAMP] = msg->timestamps[LM_TS_RECVD]; g_assert(msg->timestamps[LM_TS_STAMP].zone_offset != -1); /* $HOST setup */ log_source_mangle_hostname(self, msg); /* $PROGRAM override */ if (self->options->program_override) { if (self->options->program_override_len < 0) self->options->program_override_len = strlen(self->options->program_override); log_msg_set_value(msg, LM_V_PROGRAM, self->options->program_override, self->options->program_override_len); } /* $HOST override */ if (self->options->host_override) { if (self->options->host_override_len < 0) self->options->host_override_len = strlen(self->options->host_override); log_msg_set_value(msg, LM_V_HOST, self->options->host_override, self->options->host_override_len); } /* source specific tags */ if (self->options->tags) { for (i = 0; i < self->options->tags->len; i++) { log_msg_set_tag_by_id(msg, g_array_index(self->options->tags, LogTagId, i)); } } log_msg_set_tag_by_id(msg, self->options->source_group_tag); /* stats counters */ if (stats_check_level(2)) { stats_lock(); stats_register_and_increment_dynamic_counter(2, SCS_HOST | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_HOST, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); if (stats_check_level(3)) { stats_register_and_increment_dynamic_counter(3, SCS_SENDER | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_HOST_FROM, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); stats_register_and_increment_dynamic_counter(3, SCS_PROGRAM | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_PROGRAM, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); } stats_unlock(); } stats_syslog_process_message_pri(msg->pri); /* message setup finished, send it out */ stats_counter_inc(self->recvd_messages); stats_counter_set(self->last_message_seen, msg->timestamps[LM_TS_RECVD].tv_sec); log_pipe_forward_msg(s, msg, path_options); msg_set_context(NULL); if (accurate_nanosleep && self->threaded && self->window_full_sleep_nsec > 0 && !log_source_free_to_send(self)) { struct timespec ts; /* wait one 0.1msec in the hope that the buffer clears up */ ts.tv_sec = 0; ts.tv_nsec = self->window_full_sleep_nsec; nanosleep(&ts, NULL); } }
static void log_source_queue(LogPipe *s, LogMessage *msg, const LogPathOptions *path_options, gpointer user_data) { LogSource *self = (LogSource *) s; LogPathOptions local_options = *path_options; gint old_window_size; gint i; msg_set_context(msg); if (msg->timestamps[LM_TS_STAMP].tv_sec == -1 || !self->options->keep_timestamp) msg->timestamps[LM_TS_STAMP] = msg->timestamps[LM_TS_RECVD]; g_assert(msg->timestamps[LM_TS_STAMP].zone_offset != -1); ack_tracker_track_msg(self->ack_tracker, msg); /* $HOST setup */ log_source_mangle_hostname(self, msg); /* $PROGRAM override */ if (self->options->program_override) { if (self->options->program_override_len < 0) self->options->program_override_len = strlen(self->options->program_override); log_msg_set_value(msg, LM_V_PROGRAM, self->options->program_override, self->options->program_override_len); } /* $HOST override */ if (self->options->host_override) { if (self->options->host_override_len < 0) self->options->host_override_len = strlen(self->options->host_override); log_msg_set_value(msg, LM_V_HOST, self->options->host_override, self->options->host_override_len); } /* source specific tags */ if (self->options->tags) { for (i = 0; i < self->options->tags->len; i++) { log_msg_set_tag_by_id(msg, g_array_index(self->options->tags, LogTagId, i)); } } log_msg_set_tag_by_id(msg, self->options->source_group_tag); /* stats counters */ if (stats_check_level(2)) { stats_lock(); stats_register_and_increment_dynamic_counter(2, SCS_HOST | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_HOST, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); if (stats_check_level(3)) { stats_register_and_increment_dynamic_counter(3, SCS_SENDER | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_HOST_FROM, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); stats_register_and_increment_dynamic_counter(3, SCS_PROGRAM | SCS_SOURCE, NULL, log_msg_get_value(msg, LM_V_PROGRAM, NULL), msg->timestamps[LM_TS_RECVD].tv_sec); } stats_unlock(); } stats_syslog_process_message_pri(msg->pri); /* message setup finished, send it out */ /* NOTE: we start by enabling flow-control, thus we need an acknowledgement */ local_options.ack_needed = TRUE; log_msg_ref(msg); log_msg_add_ack(msg, &local_options); msg->ack_func = log_source_msg_ack; old_window_size = g_atomic_counter_exchange_and_add(&self->window_size, -1); /* * NOTE: this assertion validates that the source is not overflowing its * own flow-control window size, decreased above, by the atomic statement. * * If the _old_ value is zero, that means that the decrement operation * above has decreased the value to -1. */ g_assert(old_window_size > 0); stats_counter_inc(self->recvd_messages); stats_counter_set(self->last_message_seen, msg->timestamps[LM_TS_RECVD].tv_sec); log_pipe_forward_msg(s, msg, &local_options); msg_set_context(NULL); if (accurate_nanosleep && self->threaded && self->window_full_sleep_nsec > 0 && !log_source_free_to_send(self)) { struct timespec ts; /* wait one 0.1msec in the hope that the buffer clears up */ ts.tv_sec = 0; ts.tv_nsec = self->window_full_sleep_nsec; nanosleep(&ts, NULL); } }
static gboolean log_reader_fd_check(GSource *source) { LogReaderWatch *self = (LogReaderWatch *) source; if (!log_source_free_to_send(&self->reader->super)) return FALSE; if (self->reader->options->follow_freq > 0) { struct stat st, followed_st; off_t pos = -1; gint fd = log_proto_get_fd(self->reader->proto); /* FIXME: this is a _HUGE_ layering violation... */ if (fd >= 0) { pos = lseek(fd, 0, SEEK_CUR); if (pos == (off_t) -1) { msg_error("Error invoking seek on followed file", evt_tag_errno("error", errno), NULL); return FALSE; } if (fstat(fd, &st) < 0) { if (errno == ESTALE) { msg_trace("log_reader_fd_check file moved ESTALE", evt_tag_str("follow_filename",self->reader->follow_filename), NULL); log_pipe_notify(self->reader->control, &self->reader->super.super, NC_FILE_MOVED, self); return FALSE; } else { msg_error("Error invoking fstat() on followed file", evt_tag_errno("error", errno), NULL); return FALSE; } } msg_trace("log_reader_fd_check", evt_tag_int("pos", pos), evt_tag_int("size", st.st_size), NULL); if (pos < st.st_size) { return TRUE; } else if (pos == st.st_size) { log_pipe_notify(self->reader->control, &self->reader->super.super, NC_FILE_EOF, self); } else if (pos > st.st_size) { /* reopen the file */ log_pipe_notify(self->reader->control, &self->reader->super.super, NC_FILE_MOVED, self); } } if (self->reader->follow_filename && stat(self->reader->follow_filename, &followed_st) != -1) { if (fd < 0 || (st.st_ino != followed_st.st_ino && followed_st.st_size > 0)) { msg_trace("log_reader_fd_check file moved eof", evt_tag_int("pos", pos), evt_tag_int("size", followed_st.st_size), evt_tag_str("follow_filename",self->reader->follow_filename), NULL); /* file was moved and we are at EOF, follow the new file */ log_pipe_notify(self->reader->control, &self->reader->super.super, NC_FILE_MOVED, self); } } else if (self->reader->follow_filename) { GTimeVal now; g_source_get_current_time(source, &now); if (g_time_val_diff(&now, &self->last_follow_freq_check) > self->reader->options->follow_freq * 1000) { msg_verbose("Follow mode file still does not exist", evt_tag_str("filename", self->reader->follow_filename), NULL); self->last_follow_freq_check = now; } } return FALSE; } return !!(self->pollfd.revents & (G_IO_IN | G_IO_OUT | G_IO_ERR | G_IO_HUP)); }