Пример #1
0
char *findhdrloc(const char *header)
{
    char *includeline;
    const char *cname = FILE_C_NAME;
    const char *iname = FILE_I_NAME;

    TRACE(("Temp source: %s\n", cname));
    create_src(cname, header);

    TRACE(("Expanded source: %s\n", iname));
    run_cc(cname, iname);

    includeline = getincludeline(iname, header);
    if (includeline == NULL)
        PANIC(("No include line for %s in '%s'!\n", header, iname));
    else {
        char *start, *end;

        start = strchr(includeline, '\"');
        if (start == NULL) 
            PANIC(("Malformed line: '%s'\n", includeline));

        end = strchr(++start, '\"');
        if (end == NULL)
            PANIC(("Malformed line: '%s'\n", includeline));
        *end = '\0';

        remove(cname);
        remove(iname);

        return start;
    }
}
Пример #2
0
static void
empathy_audio_src_init (EmpathyGstAudioSrc *obj)
{
  EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (obj);
  GstPad *ghost, *src;

  obj->priv = priv;
  g_mutex_init (&priv->lock);

  priv->volume = 1.0;

  priv->src = create_src ();
  if (priv->src == NULL)
    return;

  if (GST_IS_STREAM_VOLUME (priv->src))
    {
      gdouble volume;
      gboolean mute;

      priv->have_stream_volume = TRUE;
      /* We can't do a bidirection bind as the ::notify comes from another
       * thread, for other bits of empathy it's most simpler if it comes from
       * the main thread */
      g_object_bind_property (obj, "volume", priv->src, "volume",
        G_BINDING_DEFAULT);
      g_object_bind_property (obj, "mute", priv->src, "mute",
        G_BINDING_DEFAULT);

      /* sync and callback for bouncing */
      g_object_get (priv->src, "volume", &volume, NULL);
      g_object_set (obj, "volume", volume, NULL);

      g_object_get (priv->src, "mute", &mute, NULL);
      g_object_set (obj, "mute", mute, NULL);

      g_signal_connect (priv->src, "notify::volume",
        G_CALLBACK (empathy_audio_src_volume_changed), obj);
      g_signal_connect (priv->src, "notify::mute",
        G_CALLBACK (empathy_audio_src_volume_changed), obj);
    }
  else
    {
      g_message ("No stream volume available :(, mute will work though");
      priv->have_stream_volume = FALSE;
    }

  gst_bin_add (GST_BIN (obj), priv->src);

  priv->volume_element = gst_element_factory_make ("volume", NULL);
  gst_bin_add (GST_BIN (obj), priv->volume_element);

  {
    GstElement *capsfilter;
    GstCaps *caps;

    /* Explicitly state what format we want from pulsesrc. This pushes resampling
     * and format conversion as early as possible, lowering the amount of data
     * transferred and thus improving performance. When moving to GStreamer
     * 0.11/1.0, this should change so that we actually request what the encoder
     * wants downstream. */
    caps = gst_caps_new_simple ("audio/x-raw-int",
        "channels", G_TYPE_INT, 1,
        "width", G_TYPE_INT, 16,
        "depth", G_TYPE_INT, 16,
        "rate", G_TYPE_INT, 32000,
        NULL);
    capsfilter = gst_element_factory_make ("capsfilter", NULL);
    g_object_set (G_OBJECT (capsfilter), "caps", caps, NULL);
    gst_bin_add (GST_BIN (obj), capsfilter);
    gst_element_link (priv->src, capsfilter);
    gst_element_link (capsfilter, priv->volume_element);
  }

  src = gst_element_get_static_pad (priv->volume_element, "src");

  ghost = gst_ghost_pad_new ("src", src);
  gst_element_add_pad (GST_ELEMENT (obj), ghost);

  gst_object_unref (G_OBJECT (src));

  /* Listen to changes to GstPulseSrc:source-output-index so we know when
   * it's no longer PA_INVALID_INDEX (starting for the first time) or if it
   * changes (READY->NULL->READY...) */
  g_signal_connect (priv->src, "notify::source-output-index",
      G_CALLBACK (empathy_audio_src_source_output_index_notify),
      obj);

  priv->mic_monitor = empathy_mic_monitor_new ();
  g_signal_connect (priv->mic_monitor, "microphone-changed",
      G_CALLBACK (empathy_audio_src_microphone_changed_cb), obj);

  priv->source_idx = PA_INVALID_INDEX;
}