Esempio n. 1
0
/* main thread **************************************************************/
static void iv_work_event(void *_pool)
{
	struct work_pool_priv *pool = _pool;
	struct iv_list_head items;

	mutex_lock(&pool->lock);
	__iv_list_steal_elements(&pool->work_done, &items);
	mutex_unlock(&pool->lock);

	while (!iv_list_empty(&items)) {
		struct iv_work_item *work;

		work = iv_container_of(items.next, struct iv_work_item, list);
		iv_list_del(&work->list);

		work->completion(work->cookie);
	}

	if (pool->shutting_down) {
		mutex_lock(&pool->lock);
		if (!pool->started_threads && iv_list_empty(&pool->work_done)) {
			mutex_unlock(&pool->lock);
			mutex_destroy(&pool->lock);
			iv_event_unregister(&pool->ev);
			free(pool);
			return;
		}
		mutex_unlock(&pool->lock);
	}
}
Esempio n. 2
0
/*
 * Can only run from the output thread.
 */
static void
log_queue_fifo_ack_backlog(LogQueue *s, gint n)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessage *msg;
  LogPathOptions path_options = LOG_PATH_OPTIONS_INIT;
  gint i;

  log_queue_assert_output_thread(s);

  for (i = 0; i < n && self->qbacklog_len > 0; i++)
    {
      LogMessageQueueNode *node;

      node = iv_list_entry(self->qbacklog.next, LogMessageQueueNode, list);
      msg = node->msg;
      path_options.ack_needed = node->ack_needed;

      iv_list_del(&node->list);
      log_msg_free_queue_node(node);
      self->qbacklog_len--;

      log_msg_ack(msg, &path_options);
      log_msg_unref(msg);
    }
}
Esempio n. 3
0
static void iv_work_thread_got_event(void *_thr)
{
	struct work_pool_thread *thr = _thr;
	struct work_pool_priv *pool = thr->pool;
	uint32_t last_seq;

	mutex_lock(&pool->lock);

	thr->kicked = 0;

	if (!iv_list_empty(&thr->list)) {
		iv_list_del_init(&thr->list);
		iv_timer_unregister(&thr->idle_timer);
	}

	last_seq = pool->seq_tail;
	while ((int32_t)(last_seq - pool->seq_head) > 0) {
		struct iv_work_item *work;

		pool->seq_head++;
		work = iv_container_of(pool->work_items.next,
				       struct iv_work_item, list);
		iv_list_del(&work->list);

		mutex_unlock(&pool->lock);
		work->work(work->cookie);
		iv_invalidate_now();
		mutex_lock(&pool->lock);

		if (iv_list_empty(&pool->work_done))
			iv_event_post(&pool->ev);
		iv_list_add_tail(&work->list, &pool->work_done);
	}

	if (pool->seq_head == pool->seq_tail) {
		if (!pool->shutting_down) {
			iv_list_add(&thr->list, &pool->idle_threads);
			iv_validate_now();
			thr->idle_timer.expires = iv_now;
			thr->idle_timer.expires.tv_sec += 10;
			iv_timer_register(&thr->idle_timer);
		} else {
			__iv_work_thread_die(thr);
		}
	} else {
		/*
		 * If we're already at the maximum number of pool
		 * threads, and none of those threads were idle when
		 * more work arrived, then there may have been no
		 * kick sent for the new work item(s) (and no new
		 * pool thread started either), so if we're leaving
		 * with work items still pending, make sure we get
		 * called again, so that we don't deadlock.
		 */
		iv_event_post(&thr->kick);
	}

	mutex_unlock(&pool->lock);
}
Esempio n. 4
0
/*
 * Can only run from the output thread.
 *
 * NOTE: this returns a reference which the caller must take care to free.
 */
static LogMessage *
log_queue_fifo_pop_head(LogQueue *s, LogPathOptions *path_options)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessageQueueNode *node;
  LogMessage *msg = NULL;

  if (self->qoverflow_output_len == 0)
    {
      /* slow path, output queue is empty, get some elements from the wait queue */
      g_static_mutex_lock(&self->super.lock);
      iv_list_splice_tail_init(&self->qoverflow_wait, &self->qoverflow_output);
      self->qoverflow_output_len = self->qoverflow_wait_len;
      self->qoverflow_wait_len = 0;
      g_static_mutex_unlock(&self->super.lock);
    }

  if (self->qoverflow_output_len > 0)
    {
      node = iv_list_entry(self->qoverflow_output.next, LogMessageQueueNode, list);

      msg = node->msg;
      path_options->ack_needed = node->ack_needed;
      self->qoverflow_output_len--;
      if (!self->super.use_backlog)
        {
          iv_list_del(&node->list);
          log_msg_free_queue_node(node);
        }
      else
        {
          iv_list_del_init(&node->list);
        }
    }
  else
    {
      /* no items either on the wait queue nor the output queue.
       *
       * NOTE: the input queues may contain items even in this case,
       * however we don't touch them here, they'll be migrated to the
       * wait_queue once the input threads finish their processing (or
       * the high watermark is reached). Also, they are unlocked, so
       * no way to touch them safely.
       */
      return NULL;
    }
  stats_counter_dec(self->super.stored_messages);

  if (self->super.use_backlog)
    {
      log_msg_ref(msg);
      iv_list_add_tail(&node->list, &self->qbacklog);
      self->qbacklog_len++;
    }

  return msg;
}
Esempio n. 5
0
/* move items from the per-thread input queue to the lock-protected "wait" queue */
static void
log_queue_fifo_move_input_unlocked(LogQueueFifo *self, gint thread_id)
{
  gint queue_len;

  /* since we're in the input thread, queue_len will be racy. It can
   * increase due to log_queue_fifo_push_head() and can also decrease as
   * items are removed from the output queue using log_queue_pop_head().
   *
   * The only reason we're using it here is to check for qoverflow
   * overflows, however the only side-effect of the race (if lost) is that
   * we would lose a couple of message too many or add some more messages to
   * qoverflow than permitted by the user.  Since if flow-control is used,
   * the fifo size should be sized larger than the potential window sizes,
   * otherwise we can lose messages anyway, this is not deemed a cost to
   * justify proper locking in this case.
   */

  queue_len = log_queue_fifo_get_length(&self->super);
  if (queue_len + self->qoverflow_input[thread_id].len > self->qoverflow_size)
    {
      /* slow path, the input thread's queue would overflow the queue, let's drop some messages */

      LogPathOptions path_options = LOG_PATH_OPTIONS_INIT;
      gint i;
      gint n;

      /* NOTE: MAX is needed here to ensure that the lost race on queue_len
       * doesn't result in n < 0 */
      n = self->qoverflow_input[thread_id].len - MAX(0, (self->qoverflow_size - queue_len));

      for (i = 0; i < n; i++)
        {
          LogMessageQueueNode *node = iv_list_entry(self->qoverflow_input[thread_id].items.next, LogMessageQueueNode, list);
          LogMessage *msg = node->msg;

          iv_list_del(&node->list);
          self->qoverflow_input[thread_id].len--;
          path_options.ack_needed = node->ack_needed;
          stats_counter_inc(self->super.dropped_messages);
          log_msg_free_queue_node(node);
          log_msg_drop(msg, &path_options);
        }
      msg_debug("Destination queue full, dropping messages",
                evt_tag_int("queue_len", queue_len),
                evt_tag_int("log_fifo_size", self->qoverflow_size),
                evt_tag_int("count", n),
                evt_tag_str("persist_name", self->super.persist_name),
                NULL);
    }
  stats_counter_add(self->super.stored_messages, self->qoverflow_input[thread_id].len);
  iv_list_splice_tail_init(&self->qoverflow_input[thread_id].items, &self->qoverflow_wait);
  self->qoverflow_wait_len += self->qoverflow_input[thread_id].len;
  self->qoverflow_input[thread_id].len = 0;
}
Esempio n. 6
0
static void
log_queue_fifo_free_queue(struct iv_list_head *q)
{
  while (!iv_list_empty(q))
    {
      LogMessageQueueNode *node;
      LogPathOptions path_options = LOG_PATH_OPTIONS_INIT;
      LogMessage *msg;

      node = iv_list_entry(q->next, LogMessageQueueNode, list);
      iv_list_del(&node->list);

      path_options.ack_needed = node->ack_needed;
      msg = node->msg;
      log_msg_free_queue_node(node);
      log_msg_ack(msg, &path_options);
      log_msg_unref(msg);
    }
}
Esempio n. 7
0
static void iv_work_thread_idle_timeout(void *_thr)
{
	struct work_pool_thread *thr = _thr;
	struct work_pool_priv *pool = thr->pool;

	pthread_mutex_lock(&pool->lock);
	
	if (thr->kicked) {
		thr->idle_timer.expires = iv_now;
		thr->idle_timer.expires.tv_sec += 10;
		iv_timer_register(&thr->idle_timer);

		pthread_mutex_unlock(&pool->lock);

		return;
	}

	iv_list_del(&thr->list);
	__iv_work_thread_die(thr);

	pthread_mutex_unlock(&pool->lock);
}
Esempio n. 8
0
/*
 * Can only run from the output thread.
 */
static void
log_queue_fifo_ack_backlog(LogQueue *s, gint rewind_count)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessage *msg;
  LogPathOptions path_options = LOG_PATH_OPTIONS_INIT;
  gint pos;

  for (pos = 0; pos < rewind_count && self->qbacklog_len > 0; pos++)
    {
      LogMessageQueueNode *node;
      node = iv_list_entry(self->qbacklog.next, LogMessageQueueNode, list);
      msg = node->msg;

      iv_list_del(&node->list);
      self->qbacklog_len--;
      path_options.ack_needed = node->ack_needed;
      log_msg_ack(msg, &path_options, AT_PROCESSED);
      log_msg_free_queue_node(node);
      log_msg_unref(msg);
    }
}
Esempio n. 9
0
void iv_fd_unregister(struct iv_fd *_fd)
{
	struct iv_state *st = iv_get_state();
	struct iv_fd_ *fd = (struct iv_fd_ *)_fd;

	if (!fd->registered) {
		iv_fatal("iv_fd_unregister: called with fd which is "
			 "not registered");
	}
	fd->registered = 0;

	iv_list_del(&fd->list_active);

	notify_fd(st, fd);
	if (method->unregister_fd != NULL)
		method->unregister_fd(st, fd);

	st->numobjs--;
	st->numfds--;

	if (st->handled_fd == fd)
		st->handled_fd = NULL;
}