Esempio n. 1
0
static void iv_work_thread_got_event(void *_thr)
{
	struct work_pool_thread *thr = _thr;
	struct work_pool_priv *pool = thr->pool;
	uint32_t last_seq;

	mutex_lock(&pool->lock);

	thr->kicked = 0;

	if (!iv_list_empty(&thr->list)) {
		iv_list_del_init(&thr->list);
		iv_timer_unregister(&thr->idle_timer);
	}

	last_seq = pool->seq_tail;
	while ((int32_t)(last_seq - pool->seq_head) > 0) {
		struct iv_work_item *work;

		pool->seq_head++;
		work = iv_container_of(pool->work_items.next,
				       struct iv_work_item, list);
		iv_list_del(&work->list);

		mutex_unlock(&pool->lock);
		work->work(work->cookie);
		iv_invalidate_now();
		mutex_lock(&pool->lock);

		if (iv_list_empty(&pool->work_done))
			iv_event_post(&pool->ev);
		iv_list_add_tail(&work->list, &pool->work_done);
	}

	if (pool->seq_head == pool->seq_tail) {
		if (!pool->shutting_down) {
			iv_list_add(&thr->list, &pool->idle_threads);
			iv_validate_now();
			thr->idle_timer.expires = iv_now;
			thr->idle_timer.expires.tv_sec += 10;
			iv_timer_register(&thr->idle_timer);
		} else {
			__iv_work_thread_die(thr);
		}
	} else {
		/*
		 * If we're already at the maximum number of pool
		 * threads, and none of those threads were idle when
		 * more work arrived, then there may have been no
		 * kick sent for the new work item(s) (and no new
		 * pool thread started either), so if we're leaving
		 * with work items still pending, make sure we get
		 * called again, so that we don't deadlock.
		 */
		iv_event_post(&thr->kick);
	}

	mutex_unlock(&pool->lock);
}
Esempio n. 2
0
static void got_ev_child(void *_dummy)
{
	if (die == 2) {
		die = 3;
		iv_event_post(&ev_parent);
		iv_event_unregister(&ev_child);
	} else {
		iv_event_post(&ev_parent);
	}
}
Esempio n. 3
0
static void iv_work_thread(void *_thr)
{
	struct work_pool_thread *thr = _thr;
	struct work_pool_priv *pool = thr->pool;

	iv_init();

	INIT_IV_LIST_HEAD(&thr->list);

	thr->kicked = 0;

	IV_EVENT_INIT(&thr->kick);
	thr->kick.cookie = thr;
	thr->kick.handler = iv_work_thread_got_event;
	iv_event_register(&thr->kick);

	IV_TIMER_INIT(&thr->idle_timer);
	thr->idle_timer.cookie = thr;
	thr->idle_timer.handler = iv_work_thread_idle_timeout;

	if (pool->thread_start != NULL)
		pool->thread_start(pool->cookie);

	iv_event_post(&thr->kick);

	iv_main();

	iv_deinit();
}
Esempio n. 4
0
static void
afinter_source_wakeup(LogSource *s)
{
  AFInterSource *self = (AFInterSource *) s;

  iv_event_post(&self->schedule_wakeup);
}
Esempio n. 5
0
static void
log_threaded_dest_driver_stop_thread(gpointer s)
{
  LogThrDestDriver *self = (LogThrDestDriver *) s;

  iv_event_post(&self->shutdown_event);
}
Esempio n. 6
0
static void
log_reader_window_empty(LogSource *s)
{
  LogReader *self = (LogReader *) s;
  if (self->super.super.flags & PIF_INITIALIZED)
    iv_event_post(&self->last_msg_sent_event);
}
Esempio n. 7
0
static void
_reader_wakeup(LogSource *s)
{
    JournalReader *self = (JournalReader *) s;

    if (self->super.super.flags & PIF_INITIALIZED)
        iv_event_post(&self->schedule_wakeup);
}
Esempio n. 8
0
static void got_ev_parent(void *_dummy)
{
	ev_received++;

	if (die == 0) {
		iv_event_post(&ev_child);
	} else if (die == 1) {
		die = 2;
		iv_event_post(&ev_child);
	} else if (die == 2) {
		iv_fatal("iv_event_bench: entered invalid state");
	} else if (die == 3) {
		iv_validate_now();
		tim_end = iv_now;
		iv_event_unregister(&ev_parent);
	}
}
Esempio n. 9
0
void
afinter_message_posted(LogMessage *msg)
{
  g_static_mutex_lock(&internal_msg_lock);
  if (!internal_msg_queue)
    {
      internal_msg_queue = g_queue_new();
    }
  g_queue_push_tail(internal_msg_queue, msg);
  if (current_internal_source)
    iv_event_post(&current_internal_source->post);
  g_static_mutex_unlock(&internal_msg_lock);
}
Esempio n. 10
0
static void thr_child(void *_dummy)
{
	iv_init();

	IV_EVENT_INIT(&ev_child);
	ev_child.handler = got_ev_child;
	iv_event_register(&ev_child);

	iv_validate_now();
	tim_start = iv_now;

	iv_event_post(&ev_parent);

	iv_main();

	iv_deinit();
}
Esempio n. 11
0
/* worker thread ************************************************************/
static void __iv_work_thread_die(struct work_pool_thread *thr)
{
	struct work_pool_priv *pool = thr->pool;

	if (thr->kicked)
		iv_fatal("__iv_work_thread_die: called on kicked thread");

	iv_event_unregister(&thr->kick);
	free(thr);

	pool->started_threads--;

	if (pool->thread_stop != NULL)
		pool->thread_stop(pool->cookie);

	if (pool->shutting_down && !pool->started_threads)
		iv_event_post(&pool->ev);
}
Esempio n. 12
0
/* NOTE: may be running in the destination's thread, thus proper locking must be used */
static void
log_reader_wakeup(LogSource *s)
{
  LogReader *self = (LogReader *) s;

  /*
   * We might get called even after this LogReader has been
   * deinitialized, in which case we must not do anything (since the
   * iv_event triggered here is not registered).
   *
   * This happens when log_writer_deinit() flushes its output queue
   * after the reader which produced the message has already been
   * deinited. Since init/deinit calls are made in the main thread, no
   * locking is needed.
   *
   */

  if (self->super.super.flags & PIF_INITIALIZED)
    iv_event_post(&self->schedule_wakeup);
}
Esempio n. 13
0
void
main_loop_reload_config(void)
{
  iv_event_post(&reload_config_requested);
  return;
}
Esempio n. 14
0
void
main_loop_exit(void)
{
  iv_event_post(&exit_requested);
  return;
}
Esempio n. 15
0
/* NOTE: runs in the source thread */
static void
log_writer_schedule_update_watches(LogWriter *self)
{
  iv_event_post(&self->queue_filled);
}
Esempio n. 16
0
static void
log_threaded_dest_driver_message_became_available_in_the_queue(gpointer user_data)
{
  LogThrDestDriver *self = (LogThrDestDriver *) user_data;
  iv_event_post(&self->wake_up_event);
}