/* move items from the per-thread input queue to the lock-protected "wait" queue */ static void log_queue_fifo_move_input_unlocked(LogQueueFifo *self, gint thread_id) { gint queue_len; /* since we're in the input thread, queue_len will be racy. It can * increase due to log_queue_fifo_push_head() and can also decrease as * items are removed from the output queue using log_queue_pop_head(). * * The only reason we're using it here is to check for qoverflow * overflows, however the only side-effect of the race (if lost) is that * we would lose a couple of message too many or add some more messages to * qoverflow than permitted by the user. Since if flow-control is used, * the fifo size should be sized larger than the potential window sizes, * otherwise we can lose messages anyway, this is not deemed a cost to * justify proper locking in this case. */ queue_len = log_queue_fifo_get_length(&self->super); if (queue_len + self->qoverflow_input[thread_id].len > self->qoverflow_size) { /* slow path, the input thread's queue would overflow the queue, let's drop some messages */ LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; gint i; gint n; /* NOTE: MAX is needed here to ensure that the lost race on queue_len * doesn't result in n < 0 */ n = self->qoverflow_input[thread_id].len - MAX(0, (self->qoverflow_size - queue_len)); for (i = 0; i < n; i++) { LogMessageQueueNode *node = iv_list_entry(self->qoverflow_input[thread_id].items.next, LogMessageQueueNode, list); LogMessage *msg = node->msg; iv_list_del(&node->list); self->qoverflow_input[thread_id].len--; path_options.ack_needed = node->ack_needed; stats_counter_inc(self->super.dropped_messages); log_msg_free_queue_node(node); log_msg_drop(msg, &path_options); } msg_debug("Destination queue full, dropping messages", evt_tag_int("queue_len", queue_len), evt_tag_int("log_fifo_size", self->qoverflow_size), evt_tag_int("count", n), evt_tag_str("persist_name", self->super.persist_name), NULL); } stats_counter_add(self->super.stored_messages, self->qoverflow_input[thread_id].len); iv_list_splice_tail_init(&self->qoverflow_input[thread_id].items, &self->qoverflow_wait); self->qoverflow_wait_len += self->qoverflow_input[thread_id].len; self->qoverflow_input[thread_id].len = 0; }
gboolean log_queue_fifo_is_empty_racy(LogQueue *s) { LogQueueFifo *self = (LogQueueFifo *) s; gboolean has_message_in_queue = FALSE; g_static_mutex_lock(&self->super.lock); if (log_queue_fifo_get_length(s) > 0) { has_message_in_queue = TRUE; } else { gint i; for (i = 0; i < log_queue_max_threads && !has_message_in_queue; i++) { has_message_in_queue |= self->qoverflow_input[i].finish_cb_registered; } } g_static_mutex_unlock(&self->super.lock); return !has_message_in_queue; }
/** * Assumed to be called from one of the input threads. If the thread_id * cannot be determined, the item is put directly in the wait queue. * * Puts the message to the queue, and logs an error if it caused the * queue to be full. * * It attempts to put the item to the per-thread input queue. * * NOTE: It consumes the reference passed by the caller. **/ static void log_queue_fifo_push_tail(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options) { LogQueueFifo *self = (LogQueueFifo *) s; gint thread_id; LogMessageQueueNode *node; thread_id = main_loop_io_worker_thread_id(); g_assert(thread_id < 0 || log_queue_max_threads > thread_id); /* NOTE: we don't use high-water marks for now, as log_fetch_limit * limits the number of items placed on the per-thread input queue * anyway, and any sane number decreased the performance measurably. * * This means that per-thread input queues contain _all_ items that * a single poll iteration produces. And once the reader is finished * (either because the input is depleted or because of * log_fetch_limit / window_size) the whole bunch is propagated to * the "wait" queue. */ if (thread_id >= 0) { /* fastpath, use per-thread input FIFOs */ if (!self->qoverflow_input[thread_id].finish_cb_registered) { /* this is the first item in the input FIFO, register a finish * callback to make sure it gets moved to the wait_queue if the * input thread finishes */ main_loop_io_worker_register_finish_callback(&self->qoverflow_input[thread_id].cb); self->qoverflow_input[thread_id].finish_cb_registered = TRUE; } node = log_msg_alloc_queue_node(msg, path_options); iv_list_add_tail(&node->list, &self->qoverflow_input[thread_id].items); self->qoverflow_input[thread_id].len++; log_msg_unref(msg); return; } /* slow path, put the pending item and the whole input queue to the wait_queue */ g_static_mutex_lock(&self->super.lock); if (thread_id >= 0) log_queue_fifo_move_input_unlocked(self, thread_id); if (log_queue_fifo_get_length(s) < self->qoverflow_size) { node = log_msg_alloc_queue_node(msg, path_options); iv_list_add_tail(&node->list, &self->qoverflow_wait); self->qoverflow_wait_len++; log_queue_push_notify(&self->super); stats_counter_inc(self->super.stored_messages); g_static_mutex_unlock(&self->super.lock); log_msg_unref(msg); } else { stats_counter_inc(self->super.dropped_messages); g_static_mutex_unlock(&self->super.lock); log_msg_drop(msg, path_options); msg_debug("Destination queue full, dropping message", evt_tag_int("queue_len", log_queue_fifo_get_length(&self->super)), evt_tag_int("log_fifo_size", self->qoverflow_size), NULL); } return; }
/* NOTE: this is inherently racy, can only be called if log processing is suspended (e.g. reload time) */ static gboolean log_queue_fifo_keep_on_reload(LogQueue *s) { return log_queue_fifo_get_length(s) > 0; }
/* NOTE: this is inherently racy, can only be called if log processing is suspended (e.g. reload time) */ static gboolean log_queue_fifo_keep_on_reload(LogQueue *s) { LogQueueFifo *self = (LogQueueFifo *) s; return log_queue_fifo_get_length(s) > 0 || self->qbacklog_len > 0; }