Beispiel #1
0
/*
 * The basic idea here, is that gtk+ does not have a recursive lock
 * so - taking the GDK_THREADS_ENTER when we already own the lock is
 * fatal. Of course, we get that lock when we enter from the mainloop
 * so - check if we are already entering from that, and do not acquire
 * the lock. Sadly no glibmm binding of this yet.
 *
 * If force is true we take the lock even if we own the mainloop, this
 * is useful for eg. for idle handlers.
 */
ToolkitLock::ToolkitLock( bool force ) :
  m_locked(false)
{
  if (!force && g_main_context_is_owner (NULL))
    return;
  GDK_THREADS_ENTER();
  m_locked = true;
}
Beispiel #2
0
/* This must be called with the agent lock *held*. */
void
nice_component_emit_io_callback (NiceComponent *component,
    const guint8 *buf, gsize buf_len)
{
  NiceAgent *agent;
  guint stream_id, component_id;
  NiceAgentRecvFunc io_callback;
  gpointer io_user_data;

  g_assert (component != NULL);
  g_assert (buf != NULL);
  g_assert (buf_len > 0);

  agent = component->agent;
  stream_id = component->stream->id;
  component_id = component->id;

  g_mutex_lock (&component->io_mutex);
  io_callback = component->io_callback;
  io_user_data = component->io_user_data;
  g_mutex_unlock (&component->io_mutex);

  /* Allow this to be called with a NULL io_callback, since the caller can’t
   * lock io_mutex to check beforehand. */
  if (io_callback == NULL)
    return;

  g_assert (NICE_IS_AGENT (agent));
  g_assert (stream_id > 0);
  g_assert (component_id > 0);
  g_assert (io_callback != NULL);

  /* Only allocate a closure if the callback is being deferred to an idle
   * handler. */
  if (g_main_context_is_owner (component->ctx)) {
    /* Thread owns the main context, so invoke the callback directly. */
    agent_unlock_and_emit (agent);
    io_callback (agent, stream_id,
        component_id, buf_len, (gchar *) buf, io_user_data);
    agent_lock ();
  } else {
    IOCallbackData *data;

    g_mutex_lock (&component->io_mutex);

    /* Slow path: Current thread doesn’t own the Component’s context at the
     * moment, so schedule the callback in an idle handler. */
    data = io_callback_data_new (buf, buf_len);
    g_queue_push_tail (&component->pending_io_messages,
        data);  /* transfer ownership */

    nice_debug ("%s: **WARNING: SLOW PATH**", G_STRFUNC);

    nice_component_schedule_io_callback (component);

    g_mutex_unlock (&component->io_mutex);
  }
}
Beispiel #3
0
static gboolean inotify_cb(GIOChannel *source, GIOCondition condition, gpointer data)
{
	struct inotify_monitor *im = (struct inotify_monitor *)data;
	char event_buffer[INOTIFY_BUFFER_SIZE];
	ssize_t len;

	assert(g_main_context_is_owner(access_monitor_get_main_context(im->monitor)));

	if ((len = read (im->inotify_fd, event_buffer, INOTIFY_BUFFER_SIZE)) > 0)  {
		char *p;

		p = event_buffer;
		while (p < event_buffer + len) {
			struct inotify_event *event = (struct inotify_event *) p;

			inotify_event_process(im, event);

			p += sizeof(struct inotify_event) + event->len;
		}
	}

	return TRUE;
}
Beispiel #4
0
gboolean
grustna_call (RustFunc func, gpointer data, GMainContext *context)
{
  gboolean thread_default_context = FALSE;

  g_return_val_if_fail (func != NULL, FALSE);

  if (context == NULL)
    {
      context = g_main_context_get_thread_default ();
      if (context == NULL)
        context = get_rust_thread_context ();
      else
        thread_default_context = TRUE;
    }

  /* This code is based on g_main_context_invoke_full() */

  if (g_main_context_is_owner (context))
    {
      /* Fastest path: the caller is in the same thread where some code
       * is supposedly driving the loop context affine to this call. */
      func (data, context);
      return TRUE;
    }

  if (g_main_context_acquire (context))
    {
      /* Here, we get to exclusively use the desired loop context
       * that is not (yet) driven by an event loop.
       * This is perfectly OK for non-async functions on objects affine
       * to this context, and matches the behavior of GIO-style async calls
       * that rely on the thread-default context to be eventually driven
       * in order to complete. */

      if (!thread_default_context)
        g_main_context_push_thread_default (context);

      func (data, context);

      if (!thread_default_context)
        g_main_context_pop_thread_default (context);

      g_main_context_release (context);

      /* Unblock a potentially waiting
       * grustna_main_loop_run_thread_local() */
      g_cond_broadcast (&rust_context_released_cond);

      return TRUE;
    }
  else
    {
      /* Shunt the call to the loop thread
       * and wait for it to complete. */

      RustCallData *call_data;
      RustCallStatus status;
      GSource *idle;

      call_data = g_slice_new0 (RustCallData);
      call_data->func = func;
      call_data->param = data;
      call_data->context = g_main_context_ref (context);
      call_data->ref_count = 3;
      call_data->minder_backoff = 1 * G_TIME_SPAN_MILLISECOND;
      call_data->status = RUST_CALL_PENDING;

      idle = g_idle_source_new ();
      g_source_set_priority (idle, G_PRIORITY_DEFAULT);
      g_source_set_callback (idle, loop_callback, call_data, NULL);
      g_source_attach (idle, context);
      call_data->source = idle;

      g_cond_init (&call_data->return_cond);

      add_call_minder (call_data);

      g_mutex_lock (&call_mutex);
      while ((status = call_data->status) == RUST_CALL_PENDING)
        g_cond_wait (&call_data->return_cond, &call_mutex);
      g_mutex_unlock (&call_mutex);

      call_data_unref (call_data);

      return status == RUST_CALL_RETURNED;
    }
}
/**
 * @brief Unlocks GDK mutex if necessary.
 *
 * @see maybe_lock_gdk
 */
static inline void
maybe_unlock_gdk(void)
{
	if (!g_main_context_is_owner(g_main_context_default()))
		gdk_threads_leave();
}