/*
 * Can only run from the output thread.
 */
static void
log_queue_fifo_ack_backlog(LogQueue *s, gint n)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessage *msg;
  LogPathOptions path_options = LOG_PATH_OPTIONS_INIT;
  gint i;

  log_queue_assert_output_thread(s);

  for (i = 0; i < n && self->qbacklog_len > 0; i++)
    {
      LogMessageQueueNode *node;

      node = iv_list_entry(self->qbacklog.next, LogMessageQueueNode, list);
      msg = node->msg;
      path_options.ack_needed = node->ack_needed;

      iv_list_del(&node->list);
      log_msg_free_queue_node(node);
      self->qbacklog_len--;

      log_msg_ack(msg, &path_options);
      log_msg_unref(msg);
    }
}
/*
 * log_queue_rewind_backlog:
 *
 * Move items on our backlog back to our qoverflow queue. Please note that this
 * function does not really care about qoverflow size, it has to put the backlog
 * somewhere. The backlog is emptied as that will be filled if we send the
 * items again.
 *
 * NOTE: this is assumed to be called from the output thread.
 */
static void
log_queue_fifo_rewind_backlog(LogQueue *s)
{
  LogQueueFifo *self = (LogQueueFifo *) s;

  log_queue_assert_output_thread(s);

  iv_list_splice_tail_init(&self->qbacklog, &self->qoverflow_output);
  self->qoverflow_output_len += self->qbacklog_len;
  stats_counter_add(self->super.stored_messages, self->qbacklog_len);
  self->qbacklog_len = 0;
}
/*
 * Put an item back to the front of the queue.
 *
 * This is assumed to be called only from the output thread.
 */
static void
log_queue_fifo_push_head(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessageQueueNode *node;

  /* we don't check limits when putting items "in-front", as it
   * normally happens when we start processing an item, but at the end
   * can't deliver it. No checks, no drops either. */

  log_queue_assert_output_thread(s);

  node = log_msg_alloc_dynamic_queue_node(msg, path_options);
  iv_list_add(&node->list, &self->qoverflow_output);
  self->qoverflow_output_len++;

  stats_counter_inc(self->super.stored_messages);
}
/*
 * Can only run from the output thread.
 *
 * NOTE: this returns a reference which the caller must take care to free.
 */
static gboolean
log_queue_fifo_pop_head(LogQueue *s, LogMessage **msg, LogPathOptions *path_options, gboolean push_to_backlog, gboolean ignore_throttle)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessageQueueNode *node;

  log_queue_assert_output_thread(s);

  if (!ignore_throttle && self->super.throttle && self->super.throttle_buckets == 0)
    {
      return FALSE;
    }
    
  if (self->qoverflow_output_len == 0)
    {
      /* slow path, output queue is empty, get some elements from the wait queue */
      g_static_mutex_lock(&self->super.lock);
      iv_list_splice_tail_init(&self->qoverflow_wait, &self->qoverflow_output);
      self->qoverflow_output_len = self->qoverflow_wait_len;
      self->qoverflow_wait_len = 0;
      g_static_mutex_unlock(&self->super.lock);
    }

  if (self->qoverflow_output_len > 0)
    {
      node = iv_list_entry(self->qoverflow_output.next, LogMessageQueueNode, list);

      *msg = node->msg;
      path_options->ack_needed = node->ack_needed;
      self->qoverflow_output_len--;
      if (!push_to_backlog)
        {
          iv_list_del(&node->list);
          log_msg_free_queue_node(node);
        }
      else
        {
          iv_list_del_init(&node->list);
        }
    }
  else
    {
      /* no items either on the wait queue nor the output queue.
       *
       * NOTE: the input queues may contain items even in this case,
       * however we don't touch them here, they'll be migrated to the
       * wait_queue once the input threads finish their processing (or
       * the high watermark is reached). Also, they are unlocked, so
       * no way to touch them safely.
       */
      return FALSE;
    }
  stats_counter_dec(self->super.stored_messages);

  if (push_to_backlog)
    {
      log_msg_ref(*msg);
      iv_list_add_tail(&node->list, &self->qbacklog);
      self->qbacklog_len++;
    }
  if (!ignore_throttle && self->super.throttle_buckets > 0)
    {
      self->super.throttle_buckets--;
    }

  return TRUE;
}