Beispiel #1
0
static void iv_work_thread_got_event(void *_thr)
{
	struct work_pool_thread *thr = _thr;
	struct work_pool_priv *pool = thr->pool;
	uint32_t last_seq;

	mutex_lock(&pool->lock);

	thr->kicked = 0;

	if (!iv_list_empty(&thr->list)) {
		iv_list_del_init(&thr->list);
		iv_timer_unregister(&thr->idle_timer);
	}

	last_seq = pool->seq_tail;
	while ((int32_t)(last_seq - pool->seq_head) > 0) {
		struct iv_work_item *work;

		pool->seq_head++;
		work = iv_container_of(pool->work_items.next,
				       struct iv_work_item, list);
		iv_list_del(&work->list);

		mutex_unlock(&pool->lock);
		work->work(work->cookie);
		iv_invalidate_now();
		mutex_lock(&pool->lock);

		if (iv_list_empty(&pool->work_done))
			iv_event_post(&pool->ev);
		iv_list_add_tail(&work->list, &pool->work_done);
	}

	if (pool->seq_head == pool->seq_tail) {
		if (!pool->shutting_down) {
			iv_list_add(&thr->list, &pool->idle_threads);
			iv_validate_now();
			thr->idle_timer.expires = iv_now;
			thr->idle_timer.expires.tv_sec += 10;
			iv_timer_register(&thr->idle_timer);
		} else {
			__iv_work_thread_die(thr);
		}
	} else {
		/*
		 * If we're already at the maximum number of pool
		 * threads, and none of those threads were idle when
		 * more work arrived, then there may have been no
		 * kick sent for the new work item(s) (and no new
		 * pool thread started either), so if we're leaving
		 * with work items still pending, make sure we get
		 * called again, so that we don't deadlock.
		 */
		iv_event_post(&thr->kick);
	}

	mutex_unlock(&pool->lock);
}
Beispiel #2
0
void iv_fd_make_ready(struct iv_list_head *active, struct iv_fd_ *fd, int bands)
{
	if (iv_list_empty(&fd->list_active)) {
		fd->ready_bands = 0;
		iv_list_add_tail(&fd->list_active, active);
	}
	fd->ready_bands |= bands;
}
Beispiel #3
0
/*
 * Can only run from the output thread.
 *
 * NOTE: this returns a reference which the caller must take care to free.
 */
static LogMessage *
log_queue_fifo_pop_head(LogQueue *s, LogPathOptions *path_options)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  LogMessageQueueNode *node;
  LogMessage *msg = NULL;

  if (self->qoverflow_output_len == 0)
    {
      /* slow path, output queue is empty, get some elements from the wait queue */
      g_static_mutex_lock(&self->super.lock);
      iv_list_splice_tail_init(&self->qoverflow_wait, &self->qoverflow_output);
      self->qoverflow_output_len = self->qoverflow_wait_len;
      self->qoverflow_wait_len = 0;
      g_static_mutex_unlock(&self->super.lock);
    }

  if (self->qoverflow_output_len > 0)
    {
      node = iv_list_entry(self->qoverflow_output.next, LogMessageQueueNode, list);

      msg = node->msg;
      path_options->ack_needed = node->ack_needed;
      self->qoverflow_output_len--;
      if (!self->super.use_backlog)
        {
          iv_list_del(&node->list);
          log_msg_free_queue_node(node);
        }
      else
        {
          iv_list_del_init(&node->list);
        }
    }
  else
    {
      /* no items either on the wait queue nor the output queue.
       *
       * NOTE: the input queues may contain items even in this case,
       * however we don't touch them here, they'll be migrated to the
       * wait_queue once the input threads finish their processing (or
       * the high watermark is reached). Also, they are unlocked, so
       * no way to touch them safely.
       */
      return NULL;
    }
  stats_counter_dec(self->super.stored_messages);

  if (self->super.use_backlog)
    {
      log_msg_ref(msg);
      iv_list_add_tail(&node->list, &self->qbacklog);
      self->qbacklog_len++;
    }

  return msg;
}
Beispiel #4
0
void iv_task_register(struct iv_task *_t)
{
	struct iv_state *st = iv_get_state();
	struct iv_task_ *t = (struct iv_task_ *)_t;

	if (!iv_list_empty(&t->list))
		iv_fatal("iv_task_register: called with task still on a list");

	st->numobjs++;
	iv_list_add_tail(&t->list, &st->tasks);
}
Beispiel #5
0
static void iv_fd_kqueue_notify_fd(struct iv_state *st, struct iv_fd_ *fd)
{
	iv_list_del_init(&fd->list_notify);
	if (fd->registered_bands != fd->wanted_bands)
		iv_list_add_tail(&fd->list_notify, &st->u.kqueue.notify);
}
Beispiel #6
0
static int iv_fd_port_poll(struct iv_state *st,
                           struct iv_list_head *active,
                           const struct timespec *abs)
{
    struct timespec _rel;
    struct timespec *rel;
    int run_timers;
    int run_events;
    unsigned int nget;
    port_event_t pe[PORTEV_NUM];
    int ret;
    int i;

    iv_fd_port_upload(st);

    rel = to_relative(st, &_rel, abs);

    run_timers = 0;
    if (rel != NULL && rel->tv_sec == 0 && rel->tv_nsec == 0)
        run_timers = 1;

    run_events = 0;

poll_more:
    nget = 1;

    /*
     * If we get EINTR from port_getn(), no events are returned
     * and nget will not have been updated, but if we get ETIME,
     * events may be returned, and nget will be set to the number
     * of events in the array, and we need to process those
     * events as usual.
     */
    ret = port_getn(st->u.port.port_fd, pe, PORTEV_NUM, &nget, rel);

    __iv_invalidate_now(st);

    if (ret < 0 && errno != ETIME) {
        if (errno == EINTR)
            return run_timers;

        iv_fatal("iv_fd_port_poll: got error %d[%s]", errno,
                 strerror(errno));
    }

    if (ret < 0 && errno == ETIME)
        run_timers = 1;

    for (i = 0; i < nget; i++) {
        int source;

        source = pe[i].portev_source;
        if (source == PORT_SOURCE_FD) {
            int revents;
            struct iv_fd_ *fd;

            revents = pe[i].portev_events;
            fd = pe[i].portev_user;

            if (revents & (POLLIN | POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKIN);

            if (revents & (POLLOUT | POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKOUT);

            if (revents & (POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKERR);

            fd->registered_bands = 0;

            iv_list_del_init(&fd->list_notify);
            if (fd->wanted_bands) {
                iv_list_add_tail(&fd->list_notify,
                                 &st->u.port.notify);
            }
        } else if (source == PORT_SOURCE_TIMER) {
            run_timers = 1;
        } else if (source == PORT_SOURCE_USER) {
            run_events = 1;
        } else {
            iv_fatal("iv_fd_port_poll: received event "
                     "from unknown source %d", source);
        }
    }

    if (nget == PORTEV_NUM) {
        run_timers = 1;
        rel = &_rel;
        rel->tv_sec = 0;
        rel->tv_nsec = 0;
        goto poll_more;
    }

    if (run_events)
        iv_event_run_pending_events();

    return run_timers;
}
/**
 * Assumed to be called from one of the input threads. If the thread_id
 * cannot be determined, the item is put directly in the wait queue.
 *
 * Puts the message to the queue, and logs an error if it caused the
 * queue to be full.
 *
 * It attempts to put the item to the per-thread input queue.
 *
 * NOTE: It consumes the reference passed by the caller.
 **/
static void
log_queue_fifo_push_tail(LogQueue *s, LogMessage *msg, const LogPathOptions *path_options)
{
  LogQueueFifo *self = (LogQueueFifo *) s;
  gint thread_id;
  LogMessageQueueNode *node;

  thread_id = main_loop_io_worker_thread_id();

  g_assert(thread_id < 0 || log_queue_max_threads > thread_id);

  /* NOTE: we don't use high-water marks for now, as log_fetch_limit
   * limits the number of items placed on the per-thread input queue
   * anyway, and any sane number decreased the performance measurably.
   *
   * This means that per-thread input queues contain _all_ items that
   * a single poll iteration produces. And once the reader is finished
   * (either because the input is depleted or because of
   * log_fetch_limit / window_size) the whole bunch is propagated to
   * the "wait" queue.
   */

  if (thread_id >= 0) {
      /* fastpath, use per-thread input FIFOs */
      if (!self->qoverflow_input[thread_id].finish_cb_registered)
        {
          /* this is the first item in the input FIFO, register a finish
           * callback to make sure it gets moved to the wait_queue if the
           * input thread finishes */

          main_loop_io_worker_register_finish_callback(&self->qoverflow_input[thread_id].cb);
          self->qoverflow_input[thread_id].finish_cb_registered = TRUE;
        }

      node = log_msg_alloc_queue_node(msg, path_options);
      iv_list_add_tail(&node->list, &self->qoverflow_input[thread_id].items);
      self->qoverflow_input[thread_id].len++;
      log_msg_unref(msg);
      return;
    }

  /* slow path, put the pending item and the whole input queue to the wait_queue */

  g_static_mutex_lock(&self->super.lock);
  
  if (thread_id >= 0)
    log_queue_fifo_move_input_unlocked(self, thread_id);
  
  if (log_queue_fifo_get_length(s) < self->qoverflow_size)
    {
      node = log_msg_alloc_queue_node(msg, path_options);

      iv_list_add_tail(&node->list, &self->qoverflow_wait);
      self->qoverflow_wait_len++;
      log_queue_push_notify(&self->super);

      stats_counter_inc(self->super.stored_messages);
      g_static_mutex_unlock(&self->super.lock);

      log_msg_unref(msg);
    }
  else
    {
      stats_counter_inc(self->super.dropped_messages);
      g_static_mutex_unlock(&self->super.lock);
      log_msg_drop(msg, path_options);

      msg_debug("Destination queue full, dropping message",
                evt_tag_int("queue_len", log_queue_fifo_get_length(&self->super)),
                evt_tag_int("log_fifo_size", self->qoverflow_size),
                NULL);
    }
  return;
}