static void log_queue_fifo_rewind_backlog(LogQueue *s, guint rewind_count) { LogQueueFifo *self = (LogQueueFifo *) s; guint pos; if (rewind_count > self->qbacklog_len) rewind_count = self->qbacklog_len; for (pos = 0; pos < rewind_count; pos++) { LogMessageQueueNode *node = iv_list_entry(self->qbacklog.prev, LogMessageQueueNode, list); /* * Because the message go to the backlog only in case of pop_head * and pop_head add ack and ref when it pushes the message into the backlog * The rewind must decrease the ack and ref too */ iv_list_del_init(&node->list); iv_list_add(&node->list, &self->qoverflow_output); self->qbacklog_len--; self->qoverflow_output_len++; stats_counter_inc(self->super.stored_messages); } }
static void iv_work_thread_got_event(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; uint32_t last_seq; mutex_lock(&pool->lock); thr->kicked = 0; if (!iv_list_empty(&thr->list)) { iv_list_del_init(&thr->list); iv_timer_unregister(&thr->idle_timer); } last_seq = pool->seq_tail; while ((int32_t)(last_seq - pool->seq_head) > 0) { struct iv_work_item *work; pool->seq_head++; work = iv_container_of(pool->work_items.next, struct iv_work_item, list); iv_list_del(&work->list); mutex_unlock(&pool->lock); work->work(work->cookie); iv_invalidate_now(); mutex_lock(&pool->lock); if (iv_list_empty(&pool->work_done)) iv_event_post(&pool->ev); iv_list_add_tail(&work->list, &pool->work_done); } if (pool->seq_head == pool->seq_tail) { if (!pool->shutting_down) { iv_list_add(&thr->list, &pool->idle_threads); iv_validate_now(); thr->idle_timer.expires = iv_now; thr->idle_timer.expires.tv_sec += 10; iv_timer_register(&thr->idle_timer); } else { __iv_work_thread_die(thr); } } else { /* * If we're already at the maximum number of pool * threads, and none of those threads were idle when * more work arrived, then there may have been no * kick sent for the new work item(s) (and no new * pool thread started either), so if we're leaving * with work items still pending, make sure we get * called again, so that we don't deadlock. */ iv_event_post(&thr->kick); } mutex_unlock(&pool->lock); }
/* * Put an item back to the front of the queue. * * This is assumed to be called only from the output thread. * * NOTE: It consumes the reference passed by the caller. */ static void log_queue_fifo_push_head(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options) { LogQueueFifo *self = (LogQueueFifo *) s; LogMessageQueueNode *node; /* we don't check limits when putting items "in-front", as it * normally happens when we start processing an item, but at the end * can't deliver it. No checks, no drops either. */ node = log_msg_alloc_dynamic_queue_node(msg, path_options); iv_list_add(&node->list, &self->qoverflow_output); self->qoverflow_output_len++; log_msg_unref(msg); stats_counter_inc(self->super.stored_messages); }