Example #1
0
/*
 * Update the suppress timer in a deferred manner, possibly batching the
 * results of multiple updates to the suppress timer.  This is necessary as
 * suppress timer updates must run in the main thread, and updating it every
 * time a new message comes in would cause enormous latency in the fast
 * path. By collecting multiple updates
 *
 * msec == 0 means to turn off the suppress timer
 * msec >  0 to enable the timer with the specified timeout
 *
 * NOTE: suppress_lock must be held.
 */
static void
log_writer_update_suppress_timer(LogWriter *self, glong sec)
{
  gboolean invoke;
  struct timespec next_expires;

  iv_validate_now();

  /* we deliberately use nsec == 0 in order to increase the likelyhood that
   * we target the same second, in case only a fraction of a second has
   * passed between two updates.  */
  if (sec)
    {
      next_expires.tv_nsec = 0;
      next_expires.tv_sec = iv_now.tv_sec + sec;
    }
  else
    {
      next_expires.tv_sec = 0;
      next_expires.tv_nsec = 0;
    }
  /* last update was finished, we need to invoke the updater again */
  invoke = ((next_expires.tv_sec != self->suppress_timer_expires.tv_sec) || (next_expires.tv_nsec != self->suppress_timer_expires.tv_nsec)) && self->suppress_timer_updated;
  self->suppress_timer_updated = FALSE;

  if (invoke)
    {
      self->suppress_timer_expires = next_expires;
      g_static_mutex_unlock(&self->suppress_lock);
      log_pipe_ref(&self->super);
      main_loop_call((void *(*)(void *)) log_writer_perform_suppress_timer_update, self, FALSE);
      g_static_mutex_lock(&self->suppress_lock);
    }

}
Example #2
0
void
log_reader_reopen(LogPipe *s, LogProto *proto, LogPipe *control, LogReaderOptions *options, gint stats_level, gint stats_source, const gchar *stats_id, const gchar *stats_instance, gboolean immediate_check)
{
  LogReader *self = (LogReader *) s;
  gpointer args[] = { s, proto };
  log_source_deinit(s);

  main_loop_call((MainLoopTaskFunc) log_reader_reopen_deferred, args, TRUE);

  if (!main_loop_is_main_thread())
    {
      g_static_mutex_lock(&self->pending_proto_lock);
      while (self->pending_proto_present)
        {
          g_cond_wait(self->pending_proto_cond, g_static_mutex_get_mutex(&self->pending_proto_lock));
        }
      g_static_mutex_unlock(&self->pending_proto_lock);
    }
  if (immediate_check)
    {
      log_reader_set_immediate_check(&self->super.super);
    }
  log_reader_set_options(s, control, options, stats_level, stats_source, stats_id, stats_instance);
  log_reader_set_follow_filename(s, stats_instance);
  log_source_init(s);
}
Example #3
0
void
log_reader_reopen(LogReader *self, LogProtoServer *proto, PollEvents *poll_events)
{
  gpointer args[] = { self, proto, poll_events };

  main_loop_call((MainLoopTaskFunc) log_reader_reopen_deferred, args, TRUE);

  if (!main_loop_is_main_thread())
    {
      g_static_mutex_lock(&self->pending_proto_lock);
      while (self->pending_proto_present)
        {
          g_cond_wait(self->pending_proto_cond, g_static_mutex_get_mutex(&self->pending_proto_lock));
        }
      g_static_mutex_unlock(&self->pending_proto_lock);
    }
}
/*
 * Update the timer in a deferred manner, possibly batching the results of
 * multiple updates to the underlying ivykis timer.  This is necessary as
 * suppress timer updates must run in the main thread, and updating it every
 * time a new message comes in would cause enormous latency in the fast
 * path.  By collecting multiple updates the overhead is drastically
 * reduced.
 */
static void
ml_batched_timer_update(MlBatchedTimer *self, struct timespec *next_expires)
{

  /* NOTE: this check is racy as self->expires might be updated in a
   * different thread without holding a lock.
   *
   * When we lose the race, that means that another thread has already
   * updated the expires field, but we see the old value.  In this case two
   * things may happen:
   *
   *   1) we skip an update because of the race
   *
   *      We're going to skip the update if the other set the "expires" field to
   *      the same value we intended to set it.  This is not an issue, it doesn't
   *      matter whether we or the other thread updates the timer.
   *
   *   2) we perform an update because of the race
   *
   *      In this case, the other thread has updated the field, but we still
   *      see the old value, thus we decide another update is due.  We go
   *      into the locked path, which will sort things out.
   *
   * In both cases we are fine.
   */

  if (ml_batched_timer_expiration_changed(self, next_expires))
    {
      g_static_mutex_lock(&self->lock);

      /* check if we've lost the race */
      if (ml_batched_timer_expiration_changed(self, next_expires))
        {
          /* we need to update the timer */
          self->expires = *next_expires;
          self->ref_cookie(self->cookie);
          g_static_mutex_unlock(&self->lock);
          main_loop_call((MainLoopTaskFunc) ml_batched_timer_perform_update, self, FALSE);
        }
      else
        g_static_mutex_unlock(&self->lock);
    }
}
/*
 * Update the timer in a deferred manner, possibly batching the results of
 * multiple updates to the underlying ivykis timer.  This is necessary as
 * suppress timer updates must run in the main thread, and updating it every
 * time a new message comes in would cause enormous latency in the fast
 * path.  By collecting multiple updates the overhead is drastically
 * reduced.
 */
static void
ml_batched_timer_update(MlBatchedTimer *self, struct timespec *next_expires)
{
  gboolean invoke;

  /* last update was finished, we need to invoke the updater again */
  invoke = ((next_expires->tv_sec != self->expires.tv_sec) ||
            (next_expires->tv_nsec != self->expires.tv_nsec)) &&
           self->updated;
  self->updated = FALSE;

  if (invoke)
    {
      self->expires = *next_expires;
      g_static_mutex_unlock(&self->lock);
      self->ref_cookie(self->cookie);
      main_loop_call((MainLoopTaskFunc) ml_batched_timer_perform_update, self, FALSE);
      g_static_mutex_lock(&self->lock);
    }
}