static void _push_tail(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options) { LogQueueDisk *self = (LogQueueDisk *) s; LogPathOptions local_options = *path_options; g_static_mutex_lock(&self->super.lock); if (self->push_tail) { if (self->push_tail(self, msg, &local_options, path_options)) { log_queue_push_notify (&self->super); stats_counter_inc(self->super.stored_messages); log_msg_ack(msg, &local_options, AT_PROCESSED); log_msg_unref(msg); g_static_mutex_unlock(&self->super.lock); return; } } stats_counter_inc (self->super.dropped_messages); if (path_options->flow_control_requested) log_msg_ack(msg, path_options, AT_SUSPENDED); else log_msg_drop(msg, path_options, AT_PROCESSED); g_static_mutex_unlock(&self->super.lock); }
/* move items from the per-thread input queue to the lock-protected * "wait" queue, but grabbing locks first. This is registered as a * callback to be called when the input worker thread finishes its * job. */ static gpointer log_queue_fifo_move_input(gpointer user_data) { LogQueueFifo *self = (LogQueueFifo *) user_data; gint thread_id; thread_id = main_loop_io_worker_thread_id(); g_assert(thread_id >= 0); g_static_mutex_lock(&self->super.lock); log_queue_fifo_move_input_unlocked(self, thread_id); log_queue_push_notify(&self->super); g_static_mutex_unlock(&self->super.lock); self->qoverflow_input[thread_id].finish_cb_registered = FALSE; return NULL; }
/** * Assumed to be called from one of the input threads. If the thread_id * cannot be determined, the item is put directly in the wait queue. * * Puts the message to the queue, and logs an error if it caused the * queue to be full. * * It attempts to put the item to the per-thread input queue. * * NOTE: It consumes the reference passed by the caller. **/ static void log_queue_fifo_push_tail(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options) { LogQueueFifo *self = (LogQueueFifo *) s; gint thread_id; LogMessageQueueNode *node; thread_id = main_loop_io_worker_thread_id(); g_assert(thread_id < 0 || log_queue_max_threads > thread_id); /* NOTE: we don't use high-water marks for now, as log_fetch_limit * limits the number of items placed on the per-thread input queue * anyway, and any sane number decreased the performance measurably. * * This means that per-thread input queues contain _all_ items that * a single poll iteration produces. And once the reader is finished * (either because the input is depleted or because of * log_fetch_limit / window_size) the whole bunch is propagated to * the "wait" queue. */ if (thread_id >= 0) { /* fastpath, use per-thread input FIFOs */ if (!self->qoverflow_input[thread_id].finish_cb_registered) { /* this is the first item in the input FIFO, register a finish * callback to make sure it gets moved to the wait_queue if the * input thread finishes */ main_loop_io_worker_register_finish_callback(&self->qoverflow_input[thread_id].cb); self->qoverflow_input[thread_id].finish_cb_registered = TRUE; } node = log_msg_alloc_queue_node(msg, path_options); iv_list_add_tail(&node->list, &self->qoverflow_input[thread_id].items); self->qoverflow_input[thread_id].len++; log_msg_unref(msg); return; } /* slow path, put the pending item and the whole input queue to the wait_queue */ g_static_mutex_lock(&self->super.lock); if (thread_id >= 0) log_queue_fifo_move_input_unlocked(self, thread_id); if (log_queue_fifo_get_length(s) < self->qoverflow_size) { node = log_msg_alloc_queue_node(msg, path_options); iv_list_add_tail(&node->list, &self->qoverflow_wait); self->qoverflow_wait_len++; log_queue_push_notify(&self->super); stats_counter_inc(self->super.stored_messages); g_static_mutex_unlock(&self->super.lock); log_msg_unref(msg); } else { stats_counter_inc(self->super.dropped_messages); g_static_mutex_unlock(&self->super.lock); log_msg_drop(msg, path_options); msg_debug("Destination queue full, dropping message", evt_tag_int("queue_len", log_queue_fifo_get_length(&self->super)), evt_tag_int("log_fifo_size", self->qoverflow_size), NULL); } return; }