コード例 #1
0
ファイル: rb-player-gst.c プロジェクト: dignan/control
static gboolean
construct_pipeline (RBPlayerGst *mp, GError **error)
{
	GstElement *sink;

	mp->priv->playbin = gst_element_factory_make ("playbin2", NULL);
	if (mp->priv->playbin == NULL) {
		g_set_error (error,
			     RB_PLAYER_ERROR,
			     RB_PLAYER_ERROR_GENERAL,
			     _("Failed to create playbin2 element; check your GStreamer installation"));
		return FALSE;
	}
	g_signal_connect_object (G_OBJECT (mp->priv->playbin),
				 "about-to-finish",
				 G_CALLBACK (about_to_finish_cb),
				 mp, 0);
	g_signal_connect_object (G_OBJECT (mp->priv->playbin),
				 "deep-notify::volume",
				 G_CALLBACK (volume_notify_cb),
				 mp, 0);
	g_signal_connect_object (G_OBJECT (mp->priv->playbin),
				 "notify::source",
				 G_CALLBACK (source_notify_cb),
				 mp, 0);
	if (mp->priv->buffer_size != 0) {
		g_object_set (mp->priv->playbin, "buffer-size", mp->priv->buffer_size * 1024, NULL);
	}

	gst_bus_add_watch (gst_element_get_bus (mp->priv->playbin),
			   (GstBusFunc) bus_cb,
			   mp);

	/* let plugins add bits to playbin */
	g_object_notify (G_OBJECT (mp), "playbin");
	g_object_notify (G_OBJECT (mp), "bus");

	/* Use gconfaudiosink for audio if there's no audio sink yet */
	g_object_get (mp->priv->playbin, "audio-sink", &mp->priv->audio_sink, NULL);
	if (mp->priv->audio_sink == NULL) {
		mp->priv->audio_sink = gst_element_factory_make ("gconfaudiosink", NULL);
		if (mp->priv->audio_sink == NULL) {
			/* fall back to autoaudiosink */
			rb_debug ("falling back to autoaudiosink");
			mp->priv->audio_sink = gst_element_factory_make ("autoaudiosink", NULL);
		} else {
			rb_debug ("using gconfaudiosink");
		}

		if (mp->priv->audio_sink != NULL) {
			/* set the profile property on the gconfaudiosink to "music and movies" */
			if (g_object_class_find_property (G_OBJECT_GET_CLASS (mp->priv->audio_sink), "profile")) {
				rb_debug ("setting profile property on audio sink");
				g_object_set (mp->priv->audio_sink, "profile", 1, NULL);
			}

			g_object_set (mp->priv->playbin, "audio-sink", mp->priv->audio_sink, NULL);
		}
	} else {
		rb_debug ("existing audio sink found");
		g_object_unref (mp->priv->audio_sink);
	}

	{
		GstPad *pad;
		GList *l;
		GstElement *queue;
		GstPad *ghostpad;

		/* setup filterbin */
		mp->priv->filterbin = rb_gst_create_filter_bin ();

		/* set up the sinkbin with its tee element */
		mp->priv->sinkbin = gst_bin_new (NULL);
		mp->priv->tee = gst_element_factory_make ("tee", NULL);
		queue = gst_element_factory_make ("queue", NULL);

		/* link it all together and insert */
		gst_bin_add_many (GST_BIN (mp->priv->sinkbin), mp->priv->filterbin, mp->priv->tee, queue, mp->priv->audio_sink, NULL);
		gst_element_link_many (mp->priv->filterbin, mp->priv->tee, queue, mp->priv->audio_sink, NULL);

		pad = gst_element_get_pad (mp->priv->filterbin, "sink");
		ghostpad = gst_ghost_pad_new ("sink", pad);
		gst_element_add_pad (mp->priv->sinkbin, ghostpad);
		gst_object_unref (pad);

		g_object_set (G_OBJECT (mp->priv->playbin), "audio-sink", mp->priv->sinkbin, NULL);

		/* add any tees and filters that were waiting for us */
		for (l = mp->priv->waiting_tees; l != NULL; l = g_list_next (l)) {
			rb_player_gst_tee_add_tee (RB_PLAYER_GST_TEE (mp), GST_ELEMENT (l->data));
		}
		g_list_free (mp->priv->waiting_tees);
		mp->priv->waiting_tees = NULL;

		for (l = mp->priv->waiting_filters; l != NULL; l = g_list_next (l)) {
			rb_player_gst_filter_add_filter (RB_PLAYER_GST_FILTER(mp), GST_ELEMENT (l->data));
		}
		g_list_free (mp->priv->waiting_filters);
		mp->priv->waiting_filters = NULL;
	}

	/* Use fakesink for video if there's no video sink yet */
	g_object_get (mp->priv->playbin, "video-sink", &sink, NULL);
	if (sink == NULL) {
		sink = gst_element_factory_make ("fakesink", NULL);
		g_object_set (mp->priv->playbin, "video-sink", sink, NULL);
	} else {
		g_object_unref (sink);
	}

	if (mp->priv->cur_volume > 1.0)
		mp->priv->cur_volume = 1.0;
	if (mp->priv->cur_volume < 0.0)
		mp->priv->cur_volume = 0;

	rb_debug ("pipeline construction complete");
	return TRUE;
}
コード例 #2
0
ファイル: gstavvidenc.c プロジェクト: krieger-od/gst-ffmpeg
static gboolean
gst_ffmpegvidenc_set_format (GstVideoEncoder * encoder,
    GstVideoCodecState * state)
{
  GstCaps *other_caps;
  GstCaps *allowed_caps;
  GstCaps *icaps;
  GstVideoCodecState *output_format;
  enum PixelFormat pix_fmt;
  GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
  GstFFMpegVidEncClass *oclass =
      (GstFFMpegVidEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);

  /* close old session */
  if (ffmpegenc->opened) {
    gst_ffmpeg_avcodec_close (ffmpegenc->context);
    ffmpegenc->opened = FALSE;
    if (avcodec_get_context_defaults3 (ffmpegenc->context,
            oclass->in_plugin) < 0) {
      GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
      return FALSE;
    }
  }

  /* if we set it in _getcaps we should set it also in _link */
  ffmpegenc->context->strict_std_compliance = ffmpegenc->compliance;

  /* user defined properties */
  ffmpegenc->context->bit_rate = ffmpegenc->bitrate;
  ffmpegenc->context->bit_rate_tolerance = ffmpegenc->bitrate;
  ffmpegenc->context->gop_size = ffmpegenc->gop_size;
  ffmpegenc->context->me_method = ffmpegenc->me_method;
  GST_DEBUG_OBJECT (ffmpegenc, "Setting avcontext to bitrate %d, gop_size %d",
      ffmpegenc->bitrate, ffmpegenc->gop_size);

  /* RTP payload used for GOB production (for Asterisk) */
  if (ffmpegenc->rtp_payload_size) {
    ffmpegenc->context->rtp_payload_size = ffmpegenc->rtp_payload_size;
  }

  /* additional avcodec settings */
  /* first fill in the majority by copying over */
  gst_ffmpeg_cfg_fill_context (ffmpegenc, ffmpegenc->context);

  /* then handle some special cases */
  ffmpegenc->context->lmin = (ffmpegenc->lmin * FF_QP2LAMBDA + 0.5);
  ffmpegenc->context->lmax = (ffmpegenc->lmax * FF_QP2LAMBDA + 0.5);

  if (ffmpegenc->interlaced) {
    ffmpegenc->context->flags |=
        CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME;
    ffmpegenc->picture->interlaced_frame = TRUE;
    /* if this is not the case, a filter element should be used to swap fields */
    ffmpegenc->picture->top_field_first = TRUE;
  }

  /* some other defaults */
  ffmpegenc->context->rc_strategy = 2;
  ffmpegenc->context->b_frame_strategy = 0;
  ffmpegenc->context->coder_type = 0;
  ffmpegenc->context->context_model = 0;
  ffmpegenc->context->scenechange_threshold = 0;
  ffmpegenc->context->inter_threshold = 0;

  /* and last but not least the pass; CBR, 2-pass, etc */
  ffmpegenc->context->flags |= ffmpegenc->pass;
  switch (ffmpegenc->pass) {
      /* some additional action depends on type of pass */
    case CODEC_FLAG_QSCALE:
      ffmpegenc->context->global_quality
          = ffmpegenc->picture->quality = FF_QP2LAMBDA * ffmpegenc->quantizer;
      break;
    case CODEC_FLAG_PASS1:     /* need to prepare a stats file */
      /* we don't close when changing caps, fingers crossed */
      if (!ffmpegenc->file)
        ffmpegenc->file = g_fopen (ffmpegenc->filename, "w");
      if (!ffmpegenc->file)
        goto open_file_err;
      break;
    case CODEC_FLAG_PASS2:
    {                           /* need to read the whole stats file ! */
      gsize size;

      if (!g_file_get_contents (ffmpegenc->filename,
              &ffmpegenc->context->stats_in, &size, NULL))
        goto file_read_err;

      break;
    }
    default:
      break;
  }

  GST_DEBUG_OBJECT (ffmpegenc, "Extracting common video information");
  /* fetch pix_fmt, fps, par, width, height... */
  gst_ffmpeg_videoinfo_to_context (&state->info, ffmpegenc->context);

  if ((oclass->in_plugin->id == AV_CODEC_ID_MPEG4)
      && (ffmpegenc->context->time_base.den > 65535)) {
    /* MPEG4 Standards do not support time_base denominator greater than
     * (1<<16) - 1 . We therefore scale them down.
     * Agreed, it will not be the exact framerate... but the difference
     * shouldn't be that noticeable */
    ffmpegenc->context->time_base.num =
        (gint) gst_util_uint64_scale_int (ffmpegenc->context->time_base.num,
        65535, ffmpegenc->context->time_base.den);
    ffmpegenc->context->time_base.den = 65535;
    GST_LOG_OBJECT (ffmpegenc, "MPEG4 : scaled down framerate to %d / %d",
        ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num);
  }

  pix_fmt = ffmpegenc->context->pix_fmt;

  /* max-key-interval may need the framerate set above */
  if (ffmpegenc->max_key_interval) {
    AVCodecContext *ctx;

    /* override gop-size */
    ctx = ffmpegenc->context;
    ctx->gop_size = (ffmpegenc->max_key_interval < 0) ?
        (-ffmpegenc->max_key_interval
        * (ctx->time_base.den * ctx->ticks_per_frame / ctx->time_base.num))
        : ffmpegenc->max_key_interval;
  }

  /* open codec */
  if (gst_ffmpeg_avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0)
    goto open_codec_fail;

  /* second pass stats buffer no longer needed */
  if (ffmpegenc->context->stats_in)
    g_free (ffmpegenc->context->stats_in);

  /* is the colourspace correct? */
  if (pix_fmt != ffmpegenc->context->pix_fmt)
    goto pix_fmt_err;

  /* we may have failed mapping caps to a pixfmt,
   * and quite some codecs do not make up their own mind about that
   * in any case, _NONE can never work out later on */
  if (pix_fmt == PIX_FMT_NONE)
    goto bad_input_fmt;

  /* some codecs support more than one format, first auto-choose one */
  GST_DEBUG_OBJECT (ffmpegenc, "picking an output format ...");
  allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  if (!allowed_caps) {
    GST_DEBUG_OBJECT (ffmpegenc, "... but no peer, using template caps");
    /* we need to copy because get_allowed_caps returns a ref, and
     * get_pad_template_caps doesn't */
    allowed_caps =
        gst_pad_get_pad_template_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
  }
  GST_DEBUG_OBJECT (ffmpegenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
  gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
      oclass->in_plugin->type, allowed_caps, ffmpegenc->context);

  /* try to set this caps on the other side */
  other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
      ffmpegenc->context, TRUE);

  if (!other_caps) {
    gst_caps_unref (allowed_caps);
    goto unsupported_codec;
  }

  icaps = gst_caps_intersect (allowed_caps, other_caps);
  gst_caps_unref (allowed_caps);
  gst_caps_unref (other_caps);
  if (gst_caps_is_empty (icaps)) {
    gst_caps_unref (icaps);
    return FALSE;
  }
  icaps = gst_caps_truncate (icaps);

  /* Store input state and set output state */
  if (ffmpegenc->input_state)
    gst_video_codec_state_unref (ffmpegenc->input_state);
  ffmpegenc->input_state = gst_video_codec_state_ref (state);

  output_format = gst_video_encoder_set_output_state (encoder, icaps, state);
  gst_video_codec_state_unref (output_format);

  /* success! */
  ffmpegenc->opened = TRUE;

  return TRUE;

  /* ERRORS */
open_file_err:
  {
    GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, OPEN_WRITE,
        (("Could not open file \"%s\" for writing."), ffmpegenc->filename),
        GST_ERROR_SYSTEM);
    return FALSE;
  }
file_read_err:
  {
    GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, READ,
        (("Could not get contents of file \"%s\"."), ffmpegenc->filename),
        GST_ERROR_SYSTEM);
    return FALSE;
  }

open_codec_fail:
  {
    gst_ffmpeg_avcodec_close (ffmpegenc->context);
    if (avcodec_get_context_defaults3 (ffmpegenc->context,
            oclass->in_plugin) < 0)
      GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
    if (ffmpegenc->context->stats_in)
      g_free (ffmpegenc->context->stats_in);
    GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to open libav codec",
        oclass->in_plugin->name);
    return FALSE;
  }

pix_fmt_err:
  {
    gst_ffmpeg_avcodec_close (ffmpegenc->context);
    if (avcodec_get_context_defaults3 (ffmpegenc->context,
            oclass->in_plugin) < 0)
      GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
    GST_DEBUG_OBJECT (ffmpegenc,
        "avenc_%s: AV wants different colourspace (%d given, %d wanted)",
        oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt);
    return FALSE;
  }

bad_input_fmt:
  {
    GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to determine input format",
        oclass->in_plugin->name);
    return FALSE;
  }

unsupported_codec:
  {
    gst_ffmpeg_avcodec_close (ffmpegenc->context);
    if (avcodec_get_context_defaults3 (ffmpegenc->context,
            oclass->in_plugin) < 0)
      GST_DEBUG_OBJECT (ffmpegenc, "Failed to set context defaults");
    GST_DEBUG ("Unsupported codec - no caps found");
    return FALSE;
  }
}
コード例 #3
0
ファイル: gstavvidenc.c プロジェクト: krieger-od/gst-ffmpeg
static GstFlowReturn
gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
{
  GstVideoCodecFrame *frame;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  gint ret_size;

  GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);

  /* no need to empty codec if there is none */
  if (!ffmpegenc->opened)
    goto done;

  while ((frame =
          gst_video_encoder_get_oldest_frame (GST_VIDEO_ENCODER (ffmpegenc)))) {

    ffmpegenc_setup_working_buf (ffmpegenc);

    ret_size = avcodec_encode_video (ffmpegenc->context,
        ffmpegenc->working_buf, ffmpegenc->working_buf_size, NULL);

    if (ret_size < 0) {         /* there should be something, notify and give up */
#ifndef GST_DISABLE_GST_DEBUG
      GstFFMpegVidEncClass *oclass =
          (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
      GST_WARNING_OBJECT (ffmpegenc,
          "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
      gst_video_codec_frame_unref (frame);
      break;
    }

    /* save stats info if there is some as well as a stats file */
    if (ffmpegenc->file && ffmpegenc->context->stats_out)
      if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
        GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
            (("Could not write to file \"%s\"."), ffmpegenc->filename),
            GST_ERROR_SYSTEM);

    if (send) {
      if (gst_video_encoder_allocate_output_frame (GST_VIDEO_ENCODER
              (ffmpegenc), frame, ret_size) != GST_FLOW_OK) {
#ifndef GST_DISABLE_GST_DEBUG
        GstFFMpegVidEncClass *oclass =
            (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
        GST_WARNING_OBJECT (ffmpegenc,
            "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
        gst_video_codec_frame_unref (frame);
        break;
      }
      outbuf = frame->output_buffer;
      gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);

      if (ffmpegenc->context->coded_frame->key_frame)
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);

      flow_ret =
          gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
    } else {
      gst_video_codec_frame_unref (frame);
    }
  }

done:

  return flow_ret;
}
コード例 #4
0
ファイル: gstautovideosink.c プロジェクト: PeterXu/gst-mobile
static GstElement *
gst_auto_video_sink_find_best (GstAutoVideoSink * sink)
{
  GList *list, *item;
  GstElement *choice = NULL;
  GstMessage *message = NULL;
  GSList *errors = NULL;
  GstBus *bus = gst_bus_new ();
  GstPad *el_pad = NULL;
  GstCaps *el_caps = NULL;
  gboolean no_match = TRUE;

  list = gst_registry_feature_filter (gst_registry_get (),
      (GstPluginFeatureFilter) gst_auto_video_sink_factory_filter, FALSE, sink);
  list = g_list_sort (list, (GCompareFunc) gst_auto_video_sink_compare_ranks);

  GST_LOG_OBJECT (sink, "Trying to find usable video devices ...");

  for (item = list; item != NULL; item = item->next) {
    GstElementFactory *f = GST_ELEMENT_FACTORY (item->data);
    GstElement *el;

    if ((el = gst_auto_video_sink_create_element_with_pretty_name (sink, f))) {
      GstStateChangeReturn ret;

      GST_DEBUG_OBJECT (sink, "Testing %s", GST_OBJECT_NAME (f));

      /* If autovideosink has been provided with filter caps,
       * accept only sinks that match with the filter caps */
      if (sink->filter_caps) {
        el_pad = gst_element_get_static_pad (GST_ELEMENT (el), "sink");
        el_caps = gst_pad_query_caps (el_pad, NULL);
        gst_object_unref (el_pad);
        GST_DEBUG_OBJECT (sink,
            "Checking caps: %" GST_PTR_FORMAT " vs. %" GST_PTR_FORMAT,
            sink->filter_caps, el_caps);
        no_match = !gst_caps_can_intersect (sink->filter_caps, el_caps);
        gst_caps_unref (el_caps);

        if (no_match) {
          GST_DEBUG_OBJECT (sink, "Incompatible caps");
          gst_object_unref (el);
          continue;
        } else {
          GST_DEBUG_OBJECT (sink, "Found compatible caps");
        }
      }

      gst_element_set_bus (el, bus);
      ret = gst_element_set_state (el, GST_STATE_READY);
      if (ret == GST_STATE_CHANGE_SUCCESS) {
        GST_DEBUG_OBJECT (sink, "This worked!");
        choice = el;
        break;
      }

      /* collect all error messages */
      while ((message = gst_bus_pop_filtered (bus, GST_MESSAGE_ERROR))) {
        GST_DEBUG_OBJECT (sink, "error message %" GST_PTR_FORMAT, message);
        errors = g_slist_append (errors, message);
      }

      gst_element_set_state (el, GST_STATE_NULL);
      gst_object_unref (el);
    }
  }

  GST_DEBUG_OBJECT (sink, "done trying");
  if (!choice) {
    if (errors) {
      /* FIXME: we forward the first error for now; but later on it might make
       * sense to actually analyse them */
      gst_message_ref (GST_MESSAGE (errors->data));
      GST_DEBUG_OBJECT (sink, "reposting message %p", errors->data);
      gst_element_post_message (GST_ELEMENT (sink), GST_MESSAGE (errors->data));
    } else {
      /* send warning message to application and use a fakesink */
      GST_ELEMENT_WARNING (sink, RESOURCE, NOT_FOUND, (NULL),
          ("Failed to find a usable video sink"));
      choice = gst_element_factory_make ("fakesink", "fake-video-sink");
      if (g_object_class_find_property (G_OBJECT_GET_CLASS (choice), "sync"))
        g_object_set (choice, "sync", TRUE, NULL);
      gst_element_set_state (choice, GST_STATE_READY);
    }
  }
  gst_object_unref (bus);
  gst_plugin_feature_list_free (list);
  g_slist_foreach (errors, (GFunc) gst_mini_object_unref, NULL);
  g_slist_free (errors);

  return choice;
}
コード例 #5
0
void QGstreamerVideoWindow::setAutopaintColorKey(bool enabled)
{
    if (m_videoSink && g_object_class_find_property(G_OBJECT_GET_CLASS(m_videoSink), "autopaint-colorkey"))
        g_object_set(G_OBJECT(m_videoSink), "autopaint-colorkey", enabled, NULL);
}
コード例 #6
0
ファイル: gstdshowvideodec.cpp プロジェクト: zsx/ossbuild
static gboolean
gst_dshowvideodec_create_graph_and_filters (GstDshowVideoDec * vdec)
{
  HRESULT hres = S_FALSE;
  GstDshowVideoDecClass *klass =
      (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (vdec);
  IBaseFilter *srcfilter = NULL;
  IBaseFilter *sinkfilter = NULL;
  gboolean ret = FALSE;

  /* create the filter graph manager object */
  hres = CoCreateInstance (CLSID_FilterGraph, NULL, CLSCTX_INPROC,
      IID_IFilterGraph, (LPVOID *) & vdec->filtergraph);
  if (hres != S_OK || !vdec->filtergraph) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED, ("Can't create an instance "
            "of the directshow graph manager (error=%d)", hres), (NULL));
    goto error;
  }

  hres = vdec->filtergraph->QueryInterface(IID_IMediaFilter,
      (void **) &vdec->mediafilter);
  if (hres != S_OK || !vdec->mediafilter) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED,
        ("Can't get IMediacontrol interface "
            "from the graph manager (error=%d)", hres), (NULL));
    goto error;
  }

  /* create fake src filter */
  vdec->fakesrc = new FakeSrc();
  /* Created with a refcount of zero, so increment that */
  vdec->fakesrc->AddRef();

  hres = vdec->fakesrc->QueryInterface(IID_IBaseFilter,
      (void **) &srcfilter);
  if (FAILED (hres)) {
    GST_WARNING_OBJECT (vdec, "Failed to QI fakesrc to IBaseFilter");
    goto error;
  }

  /* search a decoder filter and create it */
  vdec->decfilter = gst_dshow_find_filter (
          klass->entry->input_majortype,
          klass->entry->input_subtype,
          klass->entry->output_majortype,
          klass->entry->output_subtype,
          klass->entry->preferred_filters);
  if (vdec->decfilter == NULL) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED, ("Can't create an instance "
            "of the decoder filter"), (NULL));
    goto error;
  }

  /* create fake sink filter */
  vdec->fakesink = new VideoFakeSink(vdec);
  /* Created with a refcount of zero, so increment that */
  vdec->fakesink->AddRef();

  hres = vdec->fakesink->QueryInterface(IID_IBaseFilter,
      (void **) &sinkfilter);
  if (FAILED (hres)) {
    GST_WARNING_OBJECT (vdec, "Failed to QI fakesink to IBaseFilter");
    goto error;
  }

  /* add filters to the graph */
  hres = vdec->filtergraph->AddFilter (srcfilter, L"src");
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED, ("Can't add fakesrc filter "
            "to the graph (error=%d)", hres), (NULL));
    goto error;
  }

  hres = vdec->filtergraph->AddFilter(vdec->decfilter, L"decoder");
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED, ("Can't add decoder filter "
            "to the graph (error=%d)", hres), (NULL));
    goto error;
  }

  hres = vdec->filtergraph->AddFilter(sinkfilter, L"sink");
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, STREAM, FAILED, ("Can't add fakesink filter "
            "to the graph (error=%d)", hres), (NULL));
    goto error;
  }

  vdec->setup = TRUE;

  ret = TRUE;

done:
  if (srcfilter)
    srcfilter->Release();
  if (sinkfilter)
    sinkfilter->Release();
  return ret;

error:
  if (vdec->fakesrc) {
    vdec->fakesrc->Release();
    vdec->fakesrc = NULL;
  }
  if (vdec->decfilter) {
    vdec->decfilter->Release();
    vdec->decfilter = NULL;
  }
  if (vdec->fakesink) {
    vdec->fakesink->Release();
    vdec->fakesink = NULL;
  }
  if (vdec->mediafilter) {
    vdec->mediafilter->Release();
    vdec->mediafilter = NULL;
  }
  if (vdec->filtergraph) {
    vdec->filtergraph->Release();
    vdec->filtergraph = NULL;
  }

  goto done;
}
コード例 #7
0
void eServiceMP3Record::handleUridecNotifySource(GObject *object, GParamSpec *unused, gpointer user_data)
{
	GstElement *source = NULL;
	eServiceMP3Record *_this = (eServiceMP3Record*)user_data;
	g_object_get(object, "source", &source, NULL);
	if (source)
	{
		if (g_object_class_find_property(G_OBJECT_GET_CLASS(source), "ssl-strict") != 0)
		{
			g_object_set(G_OBJECT(source), "ssl-strict", FALSE, NULL);
		}
		if (g_object_class_find_property(G_OBJECT_GET_CLASS(source), "user-agent") != 0 && !_this->m_useragent.empty())
		{
			g_object_set(G_OBJECT(source), "user-agent", _this->m_useragent.c_str(), NULL);
		}
		if (g_object_class_find_property(G_OBJECT_GET_CLASS(source), "extra-headers") != 0 && !_this->m_extra_headers.empty())
		{
#if GST_VERSION_MAJOR < 1
			GstStructure *extras = gst_structure_empty_new("extras");
#else
			GstStructure *extras = gst_structure_new_empty("extras");
#endif
			size_t pos = 0;
			while (pos != std::string::npos)
			{
				std::string name, value;
				size_t start = pos;
				size_t len = std::string::npos;
				pos = _this->m_extra_headers.find('=', pos);
				if (pos != std::string::npos)
				{
					len = pos - start;
					pos++;
					name = _this->m_extra_headers.substr(start, len);
					start = pos;
					len = std::string::npos;
					pos = _this->m_extra_headers.find('&', pos);
					if (pos != std::string::npos)
					{
						len = pos - start;
						pos++;
					}
					value = _this->m_extra_headers.substr(start, len);
				}
				if (!name.empty() && !value.empty())
				{
					GValue header;
					eDebug("[eServiceMP3Record] handleUridecNotifySource setting extra-header '%s:%s'", name.c_str(), value.c_str());
					memset(&header, 0, sizeof(GValue));
					g_value_init(&header, G_TYPE_STRING);
					g_value_set_string(&header, value.c_str());
					gst_structure_set_value(extras, name.c_str(), &header);
				}
				else
				{
					eDebug("[eServiceMP3Record] handleUridecNotifySource invalid header format %s", _this->m_extra_headers.c_str());
					break;
				}
			}
			if (gst_structure_n_fields(extras) > 0)
			{
				g_object_set(G_OBJECT(source), "extra-headers", extras, NULL);
			}
			gst_structure_free(extras);
		}
		gst_object_unref(source);
	}
}
コード例 #8
0
ファイル: dom-configurator.c プロジェクト: yixiaoyang/log4g
static void
parse_property(Log4gConfigurator *base, xmlNodePtr node, gpointer object)
{
	GParamSpec *spec = NULL;
	xmlChar *name = xmlGetProp(node, (const xmlChar *)"name");
	xmlChar *value = xmlGetProp(node, (const xmlChar *)"value");
	if (!name) {
		log4g_log_error(Q_("properties must have a `name' attribute"));
		goto exit;
	}
	if (!value) {
		log4g_log_error(Q_("properties must have a "
					"`value' attribute"));
		goto exit;
	}
	spec = g_object_class_find_property(G_OBJECT_GET_CLASS(object),
			(gchar *)name);
	if (!spec) {
		log4g_log_error(Q_("object does not have the property `%s'"),
				(gchar *)name);
		goto exit;
	}
	if (G_TYPE_STRING == spec->value_type) {
		g_object_set(object, (const gchar *)name, (const gchar *)value, NULL);
	} else if (G_TYPE_BOOLEAN == spec->value_type) {
		if (!g_ascii_strcasecmp((const gchar *)value, "true")) {
			g_object_set(object, (const gchar *)name, TRUE, NULL);
		} else if (!g_ascii_strcasecmp((const gchar *)value, "false")) {
			g_object_set(object, (const gchar *)name, FALSE, NULL);
		} else {
			log4g_log_error(Q_("%s: not a boolean value "
						"(true|false)"), value);
			goto exit;
		}
	} else if (G_TYPE_CHAR == spec->value_type) {
		if (1 != strlen((const char *)value)) {
			log4g_log_error(Q_("%s: not a char"), value);
			goto exit;
		}
		g_object_set(object, (const gchar *)name, *value, NULL);
	} else if (G_TYPE_UCHAR == spec->value_type) {
		if (1 != strlen((const char *)value)) {
			log4g_log_error(Q_("%s: not an uchar"), value);
			goto exit;
		}
		g_object_set(object, (const gchar *)name, *value, NULL);
	} else if (G_TYPE_INT == spec->value_type) {
		gint64 i;
		errno = 0;
		i = g_ascii_strtoll((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not an int"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, (gint)i, NULL);
	} else if (G_TYPE_UINT == spec->value_type) {
		guint64 i;
		errno = 0;
		i = g_ascii_strtoull((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not an uint"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, (guint)i, NULL);
	} else if (G_TYPE_LONG == spec->value_type) {
		gint64 i;
		errno = 0;
		i = g_ascii_strtoll((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not a long"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, (glong)i, NULL);
	} else if (G_TYPE_ULONG == spec->value_type) {
		guint64 i;
		errno = 0;
		i = g_ascii_strtoull((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not an ulong"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, (gulong)i, NULL);
	} else if (G_TYPE_INT64 == spec->value_type) {
		gint64 i;
		errno = 0;
		i = g_ascii_strtoll((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not an int64"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, i, NULL);
	} else if (G_TYPE_UINT64 == spec->value_type) {
		guint64 i;
		errno = 0;
		i = g_ascii_strtoull((const char *)value, NULL, 10);
		if (errno) {
			log4g_log_error(Q_("%s: not an uint64"), value);
			goto exit;
		}
		g_object_set(object, (const char *)name, i, NULL);
	} else if (G_TYPE_FLOAT == spec->value_type) {
		gdouble d;
		errno = 0;
		d = g_ascii_strtod((gchar *)value, NULL);
		if (errno) {
			log4g_log_error(Q_("%s: not a float"), value);
			goto exit;
		}
		g_object_set(object, (const gchar *)name, (gfloat)d, NULL);
	} else if (G_TYPE_DOUBLE == spec->value_type) {
		gdouble d;
		errno = 0;
		d = g_ascii_strtod((gchar *)value, NULL);
		if (errno) {
			log4g_log_error(Q_("%s: not a double"), value);
			goto exit;
		}
		g_object_set(object, (const gchar *)name, d, NULL);
	} else if (G_TYPE_OBJECT == spec->value_type) {
		struct Private *priv = GET_PRIVATE(base);
		GObject *o = g_hash_table_lookup(priv->objects,
				(const gchar *)value);
		if (!o) {
			log4g_log_error(Q_("%s: undefined object"), value);
			goto exit;
		}
		g_object_set(object, (const gchar *)name, o, NULL);
	} else {
		log4g_log_warn(Q_("%s: property cannot be set via DOM "
					"configuration"), name);
		goto exit;
	}
exit:
	if (spec) {
		g_param_spec_unref(spec);
	}
	if (name) {
		xmlFree(name);
	}
	if (value) {
		xmlFree(value);
	}
}
コード例 #9
0
/**
 * gst_wrapper_camera_bin_src_construct_pipeline:
 * @bcamsrc: camerasrc object
 *
 * This function creates and links the elements of the camerasrc bin
 * videosrc ! cspconv ! capsfilter ! crop ! scale ! capsfilter ! tee name=t !
 *    t. ! ... (viewfinder pad)
 *    t. ! output-selector name=outsel
 *        outsel. ! (image pad)
 *        outsel. ! (video pad)
 *
 * Returns: TRUE, if elements were successfully created, FALSE otherwise
 */
static gboolean
gst_wrapper_camera_bin_src_construct_pipeline (GstBaseCameraBinSrc * bcamsrc)
{
  GstWrapperCameraBinSrc *self = GST_WRAPPER_CAMERA_BIN_SRC (bcamsrc);
  GstBin *cbin = GST_BIN (bcamsrc);
  GstElement *tee;
  gboolean ret = FALSE;
  GstElement *videoscale;
  GstPad *vf_pad;
  GstPad *tee_capture_pad;
  GstPad *src_caps_src_pad;

  if (!self->elements_created) {

    GST_DEBUG_OBJECT (self, "constructing pipeline");

    /* Add application set or default video src element */
    if (!(self->src_vid_src = gst_camerabin_setup_default_element (cbin,
                self->app_vid_src, "autovideosrc", DEFAULT_VIDEOSRC,
                "camerasrc-real-src"))) {
      self->src_vid_src = NULL;
      goto done;
    } else {
      if (!gst_camerabin_add_element (cbin, self->src_vid_src)) {
        goto done;
      }
    }
    /* we lost the reference */
    self->app_vid_src = NULL;

    /* we listen for changes to max-zoom in the video src so that
     * we can proxy them to the basecamerasrc property */
    if (g_object_class_find_property (G_OBJECT_GET_CLASS (bcamsrc), "max-zoom")) {
      g_signal_connect (G_OBJECT (self->src_vid_src), "notify::max-zoom",
          (GCallback) gst_wrapper_camera_bin_src_max_zoom_cb, bcamsrc);
    }

    /* add a buffer probe to the src elemento to drop EOS from READY->NULL */
    {
      GstPad *pad;
      pad = gst_element_get_static_pad (self->src_vid_src, "src");

      self->src_event_probe_id = gst_pad_add_event_probe (pad,
          (GCallback) gst_wrapper_camera_src_src_event_probe, self);
      gst_object_unref (pad);
    }

    if (!gst_camerabin_create_and_add_element (cbin, "ffmpegcolorspace",
            "src-colorspace"))
      goto done;

    if (!(self->src_filter =
            gst_camerabin_create_and_add_element (cbin, "capsfilter",
                "src-capsfilter")))
      goto done;

    /* attach to notify::caps on the first capsfilter and use a callback
     * to recalculate the zoom properties when these caps change and to
     * propagate the caps to the second capsfilter */
    src_caps_src_pad = gst_element_get_static_pad (self->src_filter, "src");
    g_signal_connect (src_caps_src_pad, "notify::caps",
        G_CALLBACK (gst_wrapper_camera_bin_src_caps_cb), self);
    gst_object_unref (src_caps_src_pad);

    if (!(self->src_zoom_crop =
            gst_camerabin_create_and_add_element (cbin, "videocrop",
                "zoom-crop")))
      goto done;
    if (!(self->src_zoom_scale =
            gst_camerabin_create_and_add_element (cbin, "videoscale",
                "zoom-scale")))
      goto done;
    if (!(self->src_zoom_filter =
            gst_camerabin_create_and_add_element (cbin, "capsfilter",
                "zoom-capsfilter")))
      goto done;

    if (!(tee =
            gst_camerabin_create_and_add_element (cbin, "tee",
                "camerasrc-tee")))
      goto done;

    /* viewfinder pad */
    vf_pad = gst_element_get_request_pad (tee, "src%d");
    g_object_set (tee, "alloc-pad", vf_pad, NULL);
    gst_object_unref (vf_pad);

    /* the viewfinder should always work, so we add some converters to it */
    if (!gst_camerabin_create_and_add_element (cbin, "ffmpegcolorspace",
            "viewfinder-colorspace"))
      goto done;
    if (!(videoscale =
            gst_camerabin_create_and_add_element (cbin, "videoscale",
                "viewfinder-scale")))
      goto done;

    /* image/video pad from tee */
    tee_capture_pad = gst_element_get_request_pad (tee, "src%d");

    self->output_selector =
        gst_element_factory_make ("output-selector", "outsel");
    g_object_set (self->output_selector, "pad-negotiation-mode", 0, NULL);
    gst_bin_add (GST_BIN (self), self->output_selector);
    {
      GstPad *pad = gst_element_get_static_pad (self->output_selector, "sink");

      /* check return TODO */
      gst_pad_link (tee_capture_pad, pad);
      gst_object_unref (pad);
    }
    gst_object_unref (tee_capture_pad);

    /* Create the 2 output pads for video and image */
    self->outsel_vidpad =
        gst_element_get_request_pad (self->output_selector, "src%d");
    self->outsel_imgpad =
        gst_element_get_request_pad (self->output_selector, "src%d");

    g_assert (self->outsel_vidpad != NULL);
    g_assert (self->outsel_imgpad != NULL);

    gst_pad_add_buffer_probe (self->outsel_imgpad,
        G_CALLBACK (gst_wrapper_camera_bin_src_imgsrc_probe), self);
    gst_pad_add_buffer_probe (self->outsel_vidpad,
        G_CALLBACK (gst_wrapper_camera_bin_src_vidsrc_probe), self);
    gst_ghost_pad_set_target (GST_GHOST_PAD (self->imgsrc),
        self->outsel_imgpad);
    gst_ghost_pad_set_target (GST_GHOST_PAD (self->vidsrc),
        self->outsel_vidpad);

    if (bcamsrc->mode == MODE_IMAGE) {
      g_object_set (self->output_selector, "active-pad", self->outsel_imgpad,
          NULL);
    } else {
      g_object_set (self->output_selector, "active-pad", self->outsel_vidpad,
          NULL);
    }

    /* hook-up the vf ghostpad */
    vf_pad = gst_element_get_static_pad (videoscale, "src");
    gst_ghost_pad_set_target (GST_GHOST_PAD (self->vfsrc), vf_pad);
    gst_object_unref (vf_pad);

    gst_pad_set_active (self->vfsrc, TRUE);
    gst_pad_set_active (self->imgsrc, TRUE);    /* XXX ??? */
    gst_pad_set_active (self->vidsrc, TRUE);    /* XXX ??? */
  }
  ret = TRUE;
  self->elements_created = TRUE;
done:
  return ret;
}
コード例 #10
0
ファイル: gstdebugutils.c プロジェクト: Kurento/gstreamer
static gchar *
debug_dump_get_object_params (GObject * object,
    GstDebugGraphDetails details, const char *const *ignored_propnames)
{
  gchar *param_name = NULL;
  GParamSpec **properties, *property;
  GValue value = { 0, };
  guint i, number_of_properties;
  gchar *tmp, *value_str;
  const gchar *ellipses;

  /* get paramspecs and show non-default properties */
  properties =
      g_object_class_list_properties (G_OBJECT_GET_CLASS (object),
      &number_of_properties);
  if (properties) {
    for (i = 0; i < number_of_properties; i++) {
      gint j;
      gboolean ignore = FALSE;
      property = properties[i];

      /* skip some properties */
      if (!(property->flags & G_PARAM_READABLE))
        continue;
      if (!strcmp (property->name, "name"))
        continue;

      if (ignored_propnames)
        for (j = 0; ignored_propnames[j]; j++)
          if (!g_strcmp0 (ignored_propnames[j], property->name))
            ignore = TRUE;

      if (ignore)
        continue;

      g_value_init (&value, property->value_type);
      g_object_get_property (G_OBJECT (object), property->name, &value);
      if (!(g_param_value_defaults (property, &value))) {
        tmp = g_strdup_value_contents (&value);
        value_str = g_strescape (tmp, NULL);
        g_free (tmp);

        /* too long, ellipsize */
        if (!(details & GST_DEBUG_GRAPH_SHOW_FULL_PARAMS) &&
            strlen (value_str) > PARAM_MAX_LENGTH)
          ellipses = "…";
        else
          ellipses = "";

        if (param_name)
          tmp = param_name;
        else
          tmp = (char *) "";

        if (details & GST_DEBUG_GRAPH_SHOW_FULL_PARAMS) {
          param_name = g_strdup_printf ("%s\\n%s=%s", tmp, property->name,
              value_str);
        } else {
          param_name = g_strdup_printf ("%s\\n%s=%."
              G_STRINGIFY (PARAM_MAX_LENGTH) "s%s", tmp, property->name,
              value_str, ellipses);
        }

        if (tmp[0] != '\0')
          g_free (tmp);

        g_free (value_str);
      }
      g_value_unset (&value);
    }
    g_free (properties);
  }
  return param_name;
}
コード例 #11
0
GeglBuffer *
gegl_operation_context_get_target (GeglOperationContext *context,
                                   const gchar          *padname)
{
  GeglBuffer          *output;
  const GeglRectangle *result;
  const Babl          *format;
  GeglNode            *node;
  GeglOperation       *operation;

#if 0
  g_return_val_if_fail (GEGL_IS_OPERATION_CONTEXT (context), NULL);
#endif

  operation = context->operation;
  node = operation->node; /* <ick */
  format = gegl_operation_get_format (operation, padname);

  if (format == NULL)
    {
      g_warning ("no format for %s presuming RGBA float\n",
                 gegl_node_get_debug_name (node));
      format = babl_format ("RGBA float");
    }
  g_assert (format != NULL);
  g_assert (!strcmp (padname, "output"));

  result = &context->result_rect;

  if (result->width == 0 ||
      result->height == 0)
    {
      output = g_object_ref (emptybuf());
    }
  else if (node->dont_cache == FALSE &&
      ! GEGL_OPERATION_CLASS (G_OBJECT_GET_CLASS (operation))->no_cache)
    {
      GeglBuffer    *cache;
      cache = GEGL_BUFFER (gegl_node_get_cache (node));

      /* Only use the cache if the result is within the cache
       * extent. This is certainly not optimal. My gut feeling is that
       * the current caching mechanism needs to be redesigned
       */
      if (gegl_rectangle_contains (gegl_buffer_get_extent (cache), result))
        {
          output = g_object_ref (cache);
        }
      else
        {
          output = gegl_buffer_new_ram (result, format);
        }
    }
  else
    {
      output = gegl_buffer_new_ram (result, format);
    }

  gegl_operation_context_take_object (context, padname, G_OBJECT (output));

  return output;
}
コード例 #12
0
static void
impl_activate (PeasActivatable *activatable)
{
	RBVisualizerPlugin *pi = RB_VISUALIZER_PLUGIN (activatable);
	RBDisplayPageGroup *page_group;
	RhythmDBEntry *entry;
	GSimpleAction *fullscreen;
	RBShell *shell;
	GMenu *menu;

	g_object_get (pi, "object", &shell, NULL);

	pi->settings = g_settings_new ("org.gnome.rhythmbox.plugins.visualizer");
	g_signal_connect_object (pi->settings, "changed", G_CALLBACK (settings_changed_cb), pi, 0);

	/* create UI actions and menus and stuff */
	fullscreen = g_simple_action_new_stateful ("visualizer-toggle", G_VARIANT_TYPE_BOOLEAN, g_variant_new_boolean (FALSE));
	menu = rb_visualizer_create_popup_menu ("app.visualizer-toggle");
	g_object_ref_sink (menu);

	/* create visualizer page */
	pi->page = rb_visualizer_page_new (G_OBJECT (pi), shell, fullscreen, G_MENU_MODEL (menu));
	g_signal_connect_object (pi->page, "start", G_CALLBACK (start_visualizer_cb), pi, 0);
	g_signal_connect_object (pi->page, "stop", G_CALLBACK (stop_visualizer_cb), pi, 0);

	/* don't do anything if we couldn't create a video sink (clutter is broken, etc.) */
	g_object_get (pi->page, "sink", &pi->sink, NULL);
	if (pi->sink == NULL) {
		g_object_unref (shell);
		return;
	}

	/* prepare style stuff for fullscreen display */
	rb_visualizer_fullscreen_load_style (G_OBJECT (pi));

	/* add the visualizer page to the UI */
	page_group = rb_display_page_group_get_by_id ("display");
	if (page_group == NULL) {
		page_group = rb_display_page_group_new (G_OBJECT (shell),
							"display",
							_("Display"),
							RB_DISPLAY_PAGE_GROUP_CATEGORY_TOOLS);
		rb_shell_append_display_page (shell, RB_DISPLAY_PAGE (page_group), NULL);
	}
	g_object_set (pi->page, "visibility", FALSE, NULL);

	rb_shell_append_display_page (shell, RB_DISPLAY_PAGE (pi->page), RB_DISPLAY_PAGE (page_group));

	/* get player objects */
	g_object_get (shell, "shell-player", &pi->shell_player, NULL);
	g_object_get (pi->shell_player, "player", &pi->player, NULL);

	/* only show the page in the page tree when playing something */
	g_signal_connect_object (pi->shell_player, "playing-song-changed", G_CALLBACK (playing_song_changed_cb), pi, 0);
	entry = rb_shell_player_get_playing_entry (pi->shell_player);
	playing_song_changed_cb (pi->shell_player, entry, pi);
	if (entry != NULL) {
		rhythmdb_entry_unref (entry);
	}

	/* figure out how to insert the visualizer into the playback pipeline */
	if (g_object_class_find_property (G_OBJECT_GET_CLASS (pi->player), "playbin")) {

		rb_debug ("using playbin-based visualization");
		pi->playbin_notify_id = g_signal_connect_object (pi->player,
								 "notify::playbin",
								 G_CALLBACK (playbin_notify_cb),
								 pi,
								 0);
		g_object_get (pi->player, "playbin", &pi->playbin, NULL);
		if (pi->playbin != NULL) {
			mutate_playbin (pi, pi->playbin);
		}
	} else if (RB_IS_PLAYER_GST_TEE (pi->player)) {
		rb_debug ("using tee-based visualization");
	} else {
		g_warning ("unknown player backend type");
		g_object_unref (pi->player);
		pi->player = NULL;
	}

	g_object_unref (shell);
}
コード例 #13
0
/**
 * gom_command_builder_build_update:
 * @builder: A #GomCommandBuilder.
 * @resource: a #GomResource
 *
 * Builds a new #GomCommand that will update the contents stored for @resource
 * in the underlying database.
 *
 * Returns: (transfer full): A #GomCommand.
 */
GomCommand *
gom_command_builder_build_update (GomCommandBuilder *builder,
                                  GomResource       *resource)
{
   GomCommandBuilderPrivate *priv;
   GomResourceClass *klass;
   GomCommand *command = NULL;
   GParamSpec **pspecs = NULL;
   gboolean did_pspec = FALSE;
   GString *str = NULL;
   guint n_pspecs = 0;
   guint i = 0;
   guint idx = 0;

   g_return_val_if_fail(GOM_IS_COMMAND_BUILDER(builder), NULL);

   priv = builder->priv;

   klass = g_type_class_ref(priv->resource_type);

   pspecs = g_object_class_list_properties(G_OBJECT_CLASS(klass), &n_pspecs);

   str = g_string_new("UPDATE ");
   g_string_append_printf(str, "%s SET ", klass->table);

   for (i = 0; i < n_pspecs; i++) {
      if (do_prop_on_insert(pspecs[i], klass, priv->resource_type)) {
         if (did_pspec) {
            g_string_append(str, ", ");
         }
         g_string_append_printf(str, "'%s' = ?", pspecs[i]->name);
         did_pspec = TRUE;
      }
   }

   g_string_append_printf(str, " WHERE '%s'.'%s' = ?;",
                          klass->table, klass->primary_key);

   command = g_object_new(GOM_TYPE_COMMAND,
                          "adapter", priv->adapter,
                          "sql", str->str,
                          NULL);

   for (i = 0; i < n_pspecs; i++) {
      if (do_prop_on_insert(pspecs[i], klass, priv->resource_type)) {
         GValue value = { 0 };

         resource_get_property(G_OBJECT(resource), pspecs[i]->name, &value);
         gom_command_set_param(command, idx++, &value);
         g_value_unset(&value);
      }
   }

   {
      GParamSpec *pspec;
      GValue value = { 0 };

      pspec = g_object_class_find_property(G_OBJECT_GET_CLASS(resource),
                                           klass->primary_key);
      g_assert(pspec);

      g_value_init(&value, pspec->value_type);
      g_object_get_property(G_OBJECT(resource), pspec->name, &value);
      gom_command_set_param(command, idx++, &value);
      g_value_unset(&value);
   }

   g_type_class_unref(klass);

   if (str) {
      g_string_free(str, TRUE);
   }

   g_free(pspecs);

   return command;
}
コード例 #14
0
static void
gegl_node_set_props (GeglNode *node,
                     va_list   var_args)
{
  const char *property_name;

  g_object_freeze_notify (G_OBJECT (node));

  property_name = va_arg (var_args, gchar *);
  while (property_name)
    {
      GValue      value = { 0, };
      GParamSpec *pspec = NULL;
      gchar      *error = NULL;

      if (!strcmp (property_name, "name"))
        {
          pspec = g_object_class_find_property (
            G_OBJECT_GET_CLASS (G_OBJECT (node)), property_name);

          g_value_init (&value, G_PARAM_SPEC_VALUE_TYPE (pspec));
          G_VALUE_COLLECT (&value, var_args, 0, &error);
          if (error)
            {
              g_warning ("%s: %s", G_STRFUNC, error);
              g_free (error);
              g_value_unset (&value);
              break;
            }
          g_object_set_property (G_OBJECT (node), property_name, &value);
          g_value_unset (&value);
        }
      else
        {
          if (node->operation)
            {
              pspec = g_object_class_find_property (
                G_OBJECT_GET_CLASS (G_OBJECT (node->operation)), property_name);
            }
          if (!pspec)
            {
              g_warning ("%s:%s has no property named: '%s'",
                         G_STRFUNC,
                         gegl_node_get_debug_name (node), property_name);
              break;
            }
          if (!(pspec->flags & G_PARAM_WRITABLE))
            {
              g_warning ("%s: property (%s of operation class '%s' is not writable",
                         G_STRFUNC,
                         pspec->name,
                         G_OBJECT_TYPE_NAME (node->operation));
              break;
            }

          g_value_init (&value, G_PARAM_SPEC_VALUE_TYPE (pspec));
          G_VALUE_COLLECT (&value, var_args, 0, &error);
          if (error)
            {
              g_warning ("%s: %s", G_STRFUNC, error);
              g_free (error);
              g_value_unset (&value);
              break;
            }
          g_object_set_property (G_OBJECT (node->operation), property_name, &value);
          g_value_unset (&value);
        }
      property_name = va_arg (var_args, gchar *);
    }
  g_object_thaw_notify (G_OBJECT (node));
}
コード例 #15
0
ファイル: gstdshowvideodec.cpp プロジェクト: zsx/ossbuild
HRESULT VideoFakeSink::DoRenderSample(IMediaSample *pMediaSample)
{
  gboolean in_seg = FALSE;
  gint64 clip_start = 0, clip_stop = 0;
  GstDshowVideoDecClass *klass =
      (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (mDec);
  GstBuffer *buf = NULL;
  GstClockTime start, stop;

  if(pMediaSample)
  {
    BYTE *pBuffer = NULL;
    LONGLONG lStart = 0, lStop = 0;
    long size = pMediaSample->GetActualDataLength();

    pMediaSample->GetPointer(&pBuffer);
    pMediaSample->GetTime(&lStart, &lStop);

    start = lStart * 100;
    stop = lStop * 100;
    /* check if this buffer is in our current segment */
    in_seg = gst_segment_clip (mDec->segment, GST_FORMAT_TIME,
        start, stop, &clip_start, &clip_stop);

    /* if the buffer is out of segment do not push it downstream */
    if (!in_seg) {
      GST_DEBUG_OBJECT (mDec,
        "buffer is out of segment, start %" GST_TIME_FORMAT " stop %"
        GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
      goto done;
    }

    /* buffer is in our segment, allocate a new out buffer and clip its
     * timestamps */
    mDec->last_ret = gst_pad_alloc_buffer (mDec->srcpad, 
        GST_BUFFER_OFFSET_NONE,
        size, 
        GST_PAD_CAPS (mDec->srcpad), &buf);
    if (!buf) {
      GST_WARNING_OBJECT (mDec,
          "cannot allocate a new GstBuffer");
      goto done;
    }

    /* set buffer properties */
    GST_BUFFER_TIMESTAMP (buf) = clip_start;
    GST_BUFFER_DURATION (buf) = clip_stop - clip_start;

    if (strstr (klass->entry->srccaps, "rgb")) {
      /* FOR RGB directshow decoder will return bottom-up BITMAP 
       * There is probably a way to get top-bottom video frames from
       * the decoder...
       */
      gint line = 0;
      guint stride = mDec->width * 4;

      for (; line < mDec->height; line++) {
        memcpy (GST_BUFFER_DATA (buf) + (line * stride),
            pBuffer + (size - ((line + 1) * (stride))), stride);
      }
    } else {
      memcpy (GST_BUFFER_DATA (buf), pBuffer, MIN ((unsigned int)size, GST_BUFFER_SIZE (buf)));
    }

    GST_LOG_OBJECT (mDec,
        "push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT
        " duration %" GST_TIME_FORMAT, size,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf)),
        GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));

    /* push the buffer downstream */
    mDec->last_ret = gst_pad_push (mDec->srcpad, buf);
  }
done:

  return S_OK;
}
コード例 #16
0
ファイル: nemo-directory.c プロジェクト: daschuer/nemo
GList *
nemo_directory_get_file_list (NemoDirectory *directory)
{
	return NEMO_DIRECTORY_CLASS (G_OBJECT_GET_CLASS (directory))->get_file_list (directory);
}
コード例 #17
0
ファイル: gstdshowvideodec.cpp プロジェクト: zsx/ossbuild
static gboolean
gst_dshowvideodec_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  gboolean ret = FALSE;
  HRESULT hres;
  GstStructure *s = gst_caps_get_structure (caps, 0);
  GstDshowVideoDec *vdec = (GstDshowVideoDec *) gst_pad_get_parent (pad);
  GstDshowVideoDecClass *klass =
      (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (vdec);
  GstBuffer *extradata = NULL;
  const GValue *v = NULL;
  guint size = 0;
  GstCaps *caps_out;
  AM_MEDIA_TYPE output_mediatype, input_mediatype;
  VIDEOINFOHEADER *input_vheader = NULL, *output_vheader = NULL;
  CComPtr<IPin> output_pin;
  CComPtr<IPin> input_pin;
  IBaseFilter *srcfilter = NULL;
  IBaseFilter *sinkfilter = NULL;
  const GValue *fps, *par;

  /* read data */
  if (!gst_structure_get_int (s, "width", &vdec->width) ||
      !gst_structure_get_int (s, "height", &vdec->height)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("error getting video width or height from caps"), (NULL));
    goto end;
  }
  fps = gst_structure_get_value (s, "framerate");
  if (fps) {
    vdec->fps_n = gst_value_get_fraction_numerator (fps);
    vdec->fps_d = gst_value_get_fraction_denominator (fps);
  }
  else {
    /* Invent a sane default framerate; the timestamps matter
     * more anyway. */
    vdec->fps_n = 25;
    vdec->fps_d = 1;
  }

  par = gst_structure_get_value (s, "pixel-aspect-ratio");
  if (par) {
    vdec->par_n = gst_value_get_fraction_numerator (par);
    vdec->par_d = gst_value_get_fraction_denominator (par);
  }
  else {
    vdec->par_n = vdec->par_d = 1;
  }

  if ((v = gst_structure_get_value (s, "codec_data")))
    extradata = gst_value_get_buffer (v);

  /* define the input type format */
  memset (&input_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  input_mediatype.majortype = klass->entry->input_majortype;
  input_mediatype.subtype = klass->entry->input_subtype;
  input_mediatype.bFixedSizeSamples = FALSE;
  input_mediatype.bTemporalCompression = TRUE;

  if (strstr (klass->entry->sinkcaps, "video/mpeg, mpegversion= (int) 1")) {
    size =
        sizeof (MPEG1VIDEOINFO) + (extradata ? GST_BUFFER_SIZE (extradata) -
        1 : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {
      MPEG1VIDEOINFO *mpeg_info = (MPEG1VIDEOINFO *) input_vheader;

      memcpy (mpeg_info->bSequenceHeader,
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      mpeg_info->cbSequenceHeader = GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_MPEGVideo;
  } else {
    size =
        sizeof (VIDEOINFOHEADER) +
        (extradata ? GST_BUFFER_SIZE (extradata) : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {            /* Codec data is appended after our header */
      memcpy (((guchar *) input_vheader) + sizeof (VIDEOINFOHEADER),
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      input_vheader->bmiHeader.biSize += GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_VideoInfo;
  }
  input_vheader->rcSource.top = input_vheader->rcSource.left = 0;
  input_vheader->rcSource.right = vdec->width;
  input_vheader->rcSource.bottom = vdec->height;
  input_vheader->rcTarget = input_vheader->rcSource;
  input_vheader->bmiHeader.biWidth = vdec->width;
  input_vheader->bmiHeader.biHeight = vdec->height;
  input_vheader->bmiHeader.biPlanes = 1;
  input_vheader->bmiHeader.biBitCount = 16;
  input_vheader->bmiHeader.biCompression = klass->entry->format;
  input_vheader->bmiHeader.biSizeImage =
      (vdec->width * vdec->height) * (input_vheader->bmiHeader.biBitCount / 8);

  input_mediatype.cbFormat = size;
  input_mediatype.pbFormat = (BYTE *) input_vheader;
  input_mediatype.lSampleSize = input_vheader->bmiHeader.biSizeImage;

  vdec->fakesrc->GetOutputPin()->SetMediaType(&input_mediatype);

  /* set the sample size for fakesrc filter to the output buffer size */
  vdec->fakesrc->GetOutputPin()->SetSampleSize(input_mediatype.lSampleSize);

  /* connect our fake src to decoder */
  hres = vdec->fakesrc->QueryInterface(IID_IBaseFilter,
      (void **) &srcfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesrc to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  output_pin = gst_dshow_get_pin_from_filter (srcfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our directshow fakesrc filter"), (NULL));
    goto end;
  }
  input_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect (output_pin, input_pin, NULL);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect fakesrc with decoder (error=%x)", hres), (NULL));
    goto end;
  }

  /* get decoder output video format */
  if (!gst_dshowvideodec_get_filter_output_format (vdec,
          klass->entry->output_subtype, &output_vheader, &size)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get decoder output video format"), (NULL));
    goto end;
  }

  memset (&output_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  output_mediatype.majortype = klass->entry->output_majortype;
  output_mediatype.subtype = klass->entry->output_subtype;
  output_mediatype.bFixedSizeSamples = TRUE;
  output_mediatype.bTemporalCompression = FALSE;
  output_mediatype.lSampleSize = output_vheader->bmiHeader.biSizeImage;
  output_mediatype.formattype = FORMAT_VideoInfo;
  output_mediatype.cbFormat = size;
  output_mediatype.pbFormat = (BYTE *) output_vheader;

  vdec->fakesink->SetMediaType (&output_mediatype);

  /* connect decoder to our fake sink */
  output_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->fakesink->QueryInterface(IID_IBaseFilter,
      (void **) &sinkfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesink to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  input_pin = gst_dshow_get_pin_from_filter (sinkfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from our directshow fakesink filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect(output_pin, input_pin,
      &output_mediatype);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect decoder with fakesink (error=%x)", hres), (NULL));
    goto end;
  }

  /* negotiate output */
  caps_out = gst_caps_from_string (klass->entry->srccaps);
  gst_caps_set_simple (caps_out,
      "width", G_TYPE_INT, vdec->width,
      "height", G_TYPE_INT, vdec->height, NULL);

  if (vdec->fps_n && vdec->fps_d) {
      gst_caps_set_simple (caps_out,
          "framerate", GST_TYPE_FRACTION, vdec->fps_n, vdec->fps_d, NULL);
  }

  gst_caps_set_simple (caps_out, 
      "pixel-aspect-ratio", GST_TYPE_FRACTION, vdec->par_n, vdec->par_d, NULL);

  if (!gst_pad_set_caps (vdec->srcpad, caps_out)) {
    gst_caps_unref (caps_out);
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Failed to negotiate output"), (NULL));
    goto end;
  }
  gst_caps_unref (caps_out);

  hres = vdec->mediafilter->Run (-1);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't run the directshow graph (error=%d)", hres), (NULL));
    goto end;
  }

  ret = TRUE;
end:
  gst_object_unref (vdec);
  if (input_vheader)
    g_free (input_vheader);
  if (srcfilter)
    srcfilter->Release();
  if (sinkfilter)
    sinkfilter->Release();
  return ret;
}
コード例 #18
0
ファイル: nemo-directory.c プロジェクト: daschuer/nemo
gboolean
nemo_directory_is_editable (NemoDirectory *directory)
{
	return NEMO_DIRECTORY_CLASS (G_OBJECT_GET_CLASS (directory))->is_editable (directory);
}
コード例 #19
0
ファイル: mediamanager.c プロジェクト: psunkari/spicebird
gboolean
purple_media_manager_create_output_window(PurpleMediaManager *manager,
		PurpleMedia *media, const gchar *session_id,
		const gchar *participant)
{
#ifdef USE_VV
	GList *iter;

	g_return_val_if_fail(PURPLE_IS_MEDIA(media), FALSE);

	iter = manager->priv->output_windows;
	for(; iter; iter = g_list_next(iter)) {
		PurpleMediaOutputWindow *ow = iter->data;

		if (ow->sink == NULL && ow->media == media &&
				((participant != NULL &&
				ow->participant != NULL &&
				!strcmp(participant, ow->participant)) ||
				(participant == ow->participant)) &&
				!strcmp(session_id, ow->session_id)) {
			GstBus *bus;
			GstElement *queue, *colorspace;
			GstElement *tee = purple_media_get_tee(media,
					session_id, participant);

			if (tee == NULL)
				continue;

			queue = gst_element_factory_make(
					"queue", NULL);
			colorspace = gst_element_factory_make(
					"ffmpegcolorspace", NULL);
			ow->sink = purple_media_manager_get_element(
					manager, PURPLE_MEDIA_RECV_VIDEO,
					ow->media, ow->session_id,
					ow->participant);

			if (participant == NULL) {
				/* aka this is a preview sink */
				GObjectClass *klass =
						G_OBJECT_GET_CLASS(ow->sink);
				if (g_object_class_find_property(klass,
						"sync"))
					g_object_set(G_OBJECT(ow->sink),
							"sync", "FALSE", NULL);
				if (g_object_class_find_property(klass,
						"async"))
					g_object_set(G_OBJECT(ow->sink),
							"async", FALSE, NULL);
			}

			gst_bin_add_many(GST_BIN(GST_ELEMENT_PARENT(tee)),
					queue, colorspace, ow->sink, NULL);

			bus = gst_pipeline_get_bus(GST_PIPELINE(
					manager->priv->pipeline));
			g_signal_connect(bus, "sync-message::element",
					G_CALLBACK(window_id_cb), ow);
			gst_object_unref(bus);

			gst_element_set_state(ow->sink, GST_STATE_PLAYING);
			gst_element_set_state(colorspace, GST_STATE_PLAYING);
			gst_element_set_state(queue, GST_STATE_PLAYING);
			gst_element_link(colorspace, ow->sink);
			gst_element_link(queue, colorspace);
			gst_element_link(tee, queue);
		}
	}
	return TRUE;
#else
	return FALSE;
#endif
}
コード例 #20
0
ファイル: main.c プロジェクト: sinoory/webv8
static gboolean parseOptionEntryCallback(const gchar *optionNameFull, const gchar *value, WebKitSettings *webSettings, GError **error)
{
    if (strlen(optionNameFull) <= 2) {
        g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_FAILED, "Invalid option %s", optionNameFull);
        return FALSE;
    }

    /* We have two -- in option name so remove them. */
    const gchar *optionName = optionNameFull + 2;
    GParamSpec *spec = g_object_class_find_property(G_OBJECT_GET_CLASS(webSettings), optionName);
    if (!spec) {
        g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_FAILED, "Cannot find web settings for option %s", optionNameFull);
        return FALSE;
    }

    switch (G_PARAM_SPEC_VALUE_TYPE(spec)) {
    case G_TYPE_BOOLEAN: {
        gboolean propertyValue = !(value && g_ascii_strcasecmp(value, "true") && strcmp(value, "1"));
        g_object_set(G_OBJECT(webSettings), optionName, propertyValue, NULL);
        break;
    }
    case G_TYPE_STRING:
        g_object_set(G_OBJECT(webSettings), optionName, value, NULL);
        break;
    case G_TYPE_INT: {
        glong propertyValue;
        gchar *end;

        errno = 0;
        propertyValue = g_ascii_strtoll(value, &end, 0);
        if (errno == ERANGE || propertyValue > G_MAXINT || propertyValue < G_MININT) {
            g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_BAD_VALUE, "Integer value '%s' for %s out of range", value, optionNameFull);
            return FALSE;
        }
        if (errno || value == end) {
            g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_BAD_VALUE, "Cannot parse integer value '%s' for %s", value, optionNameFull);
            return FALSE;
        }
        g_object_set(G_OBJECT(webSettings), optionName, propertyValue, NULL);
        break;
    }
    case G_TYPE_FLOAT: {
        gdouble propertyValue;
        gchar *end;

        errno = 0;
        propertyValue = g_ascii_strtod(value, &end);
        if (errno == ERANGE || propertyValue > G_MAXFLOAT || propertyValue < G_MINFLOAT) {
            g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_BAD_VALUE, "Float value '%s' for %s out of range", value, optionNameFull);
            return FALSE;
        }
        if (errno || value == end) {
            g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_BAD_VALUE, "Cannot parse float value '%s' for %s", value, optionNameFull);
            return FALSE;
        }
        g_object_set(G_OBJECT(webSettings), optionName, propertyValue, NULL);
        break;
    }
    default:
        g_assert_not_reached();
    }

    return TRUE;
}
コード例 #21
0
bool GstEnginePipeline::Init() {
  // Here we create all the parts of the gstreamer pipeline - from the source
  // to the sink.  The parts of the pipeline are split up into bins:
  //   uri decode bin -> audio bin
  // The uri decode bin is a gstreamer builtin that automatically picks the
  // right type of source and decoder for the URI.

  // The audio bin gets created here and contains:
  //   queue ! audioconvert ! <caps32>
  //         ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee
  // rgvolume and rglimiter are only created when replaygain is enabled.

  // After the tee the pipeline splits.  One split is converted to 16-bit int
  // samples for the scope, the other is kept as float32 and sent to the
  // speaker.
  //   tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink
  //   tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale
  //        ! convert ! audiosink

  gst_segment_init(&last_decodebin_segment_, GST_FORMAT_TIME);

  // Audio bin
  audiobin_ = gst_bin_new("audiobin");
  gst_bin_add(GST_BIN(pipeline_), audiobin_);

  // Create the sink
  if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false;

  if (g_object_class_find_property(G_OBJECT_GET_CLASS(audiosink_), "device") &&
      !device_.toString().isEmpty()) {
    switch (device_.type()) {
      case QVariant::Int:
        g_object_set(G_OBJECT(audiosink_), "device", device_.toInt(), nullptr);
        break;
      case QVariant::String:
        g_object_set(G_OBJECT(audiosink_), "device",
                     device_.toString().toUtf8().constData(), nullptr);
        break;

#ifdef Q_OS_WIN32
      case QVariant::ByteArray: {
        GUID guid = QUuid(device_.toByteArray());
        g_object_set(G_OBJECT(audiosink_), "device", &guid, nullptr);
        break;
      }
#endif  // Q_OS_WIN32

      default:
        qLog(Warning) << "Unknown device type" << device_;
        break;
    }
  }

  // Create all the other elements
  GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue,
      *convert;

  queue_ = engine_->CreateElement("queue2", audiobin_);
  audioconvert_ = engine_->CreateElement("audioconvert", audiobin_);
  tee = engine_->CreateElement("tee", audiobin_);

  probe_queue = engine_->CreateElement("queue", audiobin_);
  probe_converter = engine_->CreateElement("audioconvert", audiobin_);
  probe_sink = engine_->CreateElement("fakesink", audiobin_);

  audio_queue = engine_->CreateElement("queue", audiobin_);
  equalizer_preamp_ = engine_->CreateElement("volume", audiobin_);
  equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_);
  stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_);
  volume_ = engine_->CreateElement("volume", audiobin_);
  audioscale_ = engine_->CreateElement("audioresample", audiobin_);
  convert = engine_->CreateElement("audioconvert", audiobin_);

  if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter ||
      !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ ||
      !stereo_panorama_ || !volume_ || !audioscale_ || !convert) {
    return false;
  }

  // Create the replaygain elements if it's enabled.  event_probe is the
  // audioconvert element we attach the probe to, which will change depending
  // on whether replaygain is enabled.  convert_sink is the element after the
  // first audioconvert, which again will change.
  GstElement* event_probe = audioconvert_;
  GstElement* convert_sink = tee;

  if (rg_enabled_) {
    rgvolume_ = engine_->CreateElement("rgvolume", audiobin_);
    rglimiter_ = engine_->CreateElement("rglimiter", audiobin_);
    audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_);
    event_probe = audioconvert2_;
    convert_sink = rgvolume_;

    if (!rgvolume_ || !rglimiter_ || !audioconvert2_) {
      return false;
    }

    // Set replaygain settings
    g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr);
    g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr);
    g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_),
                 nullptr);
  }

  // Create a pad on the outside of the audiobin and connect it to the pad of
  // the first element.
  GstPad* pad = gst_element_get_static_pad(queue_, "sink");
  gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
  gst_object_unref(pad);

  // Add a data probe on the src pad of the audioconvert element for our scope.
  // We do it here because we want pre-equalized and pre-volume samples
  // so that our visualization are not be affected by them.
  pad = gst_element_get_static_pad(event_probe, "src");
  gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM,
                    &EventHandoffCallback, this, NULL);
  gst_object_unref(pad);

  // Configure the fakesink properly
  g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr);

  // Set the equalizer bands
  g_object_set(G_OBJECT(equalizer_), "num-bands", 10, nullptr);

  int last_band_frequency = 0;
  for (int i = 0; i < kEqBandCount; ++i) {
    GstObject* band = GST_OBJECT(
        gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), i));

    const float frequency = kEqBandFrequencies[i];
    const float bandwidth = frequency - last_band_frequency;
    last_band_frequency = frequency;

    g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth,
                 "gain", 0.0f, nullptr);
    g_object_unref(G_OBJECT(band));
  }

  // Set the stereo balance.
  g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_,
               nullptr);

  // Set the buffer duration.  We set this on this queue instead of the
  // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin
  // only affects network sources.
  // Disable the default buffer and byte limits, so we only buffer based on
  // time.
  g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_,
               nullptr);
  g_object_set(G_OBJECT(queue_), "low-percent", buffer_min_fill_, nullptr);

  if (buffer_duration_nanosec_ > 0) {
    g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr);
  }

  gst_element_link_many(queue_, audioconvert_, convert_sink, nullptr);

  // Link the elements with special caps
  // The scope path through the tee gets 16-bit ints.
  GstCaps* caps16 = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING,
                                        "S16LE", NULL);
  gst_element_link_filtered(probe_converter, probe_sink, caps16);
  gst_caps_unref(caps16);

  // Link the outputs of tee to the queues on each path.
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(probe_queue, "sink"));
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(audio_queue, "sink"));

  // Link replaygain elements if enabled.
  if (rg_enabled_) {
    gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr);
  }

  // Link everything else.
  gst_element_link(probe_queue, probe_converter);
  gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_,
                        stereo_panorama_, volume_, audioscale_, convert,
                        nullptr);

  // add caps for mono, but only if requested
  if (mono_playback_) {
    GstCaps* capsmono = gst_caps_new_simple("audio/x-raw", "channels",
                                          G_TYPE_INT, 1, nullptr);
    gst_element_link_filtered(convert, audiosink_, capsmono);
    gst_caps_unref(capsmono);
  } else {
    gst_element_link(convert, audiosink_);
  }

  // Add probes and handlers.
  gst_pad_add_probe(gst_element_get_static_pad(probe_converter, "src"),
                    GST_PAD_PROBE_TYPE_BUFFER, HandoffCallback, this, nullptr);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this, nullptr);
  bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                                 BusCallback, this);

  MaybeLinkDecodeToAudio();

  return true;
}
コード例 #22
0
ファイル: gstavvidenc.c プロジェクト: GStreamer/gst-libav
static GstFlowReturn
gst_ffmpegvidenc_handle_frame (GstVideoEncoder * encoder,
                               GstVideoCodecFrame * frame)
{
    GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
    GstBuffer *outbuf;
    gint ret = 0, c;
    GstVideoInfo *info = &ffmpegenc->input_state->info;
    AVPacket *pkt;
    int have_data = 0;
    BufferInfo *buffer_info;

    if (ffmpegenc->interlaced) {
        ffmpegenc->picture->interlaced_frame = TRUE;
        /* if this is not the case, a filter element should be used to swap fields */
        ffmpegenc->picture->top_field_first =
            GST_BUFFER_FLAG_IS_SET (frame->input_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
    }

    if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame))
        ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;

    buffer_info = g_slice_new0 (BufferInfo);
    buffer_info->buffer = gst_buffer_ref (frame->input_buffer);

    if (!gst_video_frame_map (&buffer_info->vframe, info, frame->input_buffer,
                              GST_MAP_READ)) {
        GST_ERROR_OBJECT (encoder, "Failed to map input buffer");
        gst_buffer_unref (buffer_info->buffer);
        g_slice_free (BufferInfo, buffer_info);
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_ERROR;
    }

    /* Fill avpicture */
    ffmpegenc->picture->buf[0] =
        av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0);
    for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
        if (c < GST_VIDEO_INFO_N_COMPONENTS (info)) {
            ffmpegenc->picture->data[c] =
                GST_VIDEO_FRAME_PLANE_DATA (&buffer_info->vframe, c);
            ffmpegenc->picture->linesize[c] =
                GST_VIDEO_FRAME_COMP_STRIDE (&buffer_info->vframe, c);
        } else {
            ffmpegenc->picture->data[c] = NULL;
            ffmpegenc->picture->linesize[c] = 0;
        }
    }

    ffmpegenc->picture->format = ffmpegenc->context->pix_fmt;
    ffmpegenc->picture->width = GST_VIDEO_FRAME_WIDTH (&buffer_info->vframe);
    ffmpegenc->picture->height = GST_VIDEO_FRAME_HEIGHT (&buffer_info->vframe);

    ffmpegenc->picture->pts =
        gst_ffmpeg_time_gst_to_ff (frame->pts /
                                   ffmpegenc->context->ticks_per_frame, ffmpegenc->context->time_base);

    have_data = 0;
    pkt = g_slice_new0 (AVPacket);

    ret =
        avcodec_encode_video2 (ffmpegenc->context, pkt, ffmpegenc->picture,
                               &have_data);

    av_frame_unref (ffmpegenc->picture);

    if (ret < 0 || !have_data)
        g_slice_free (AVPacket, pkt);

    if (ret < 0)
        goto encode_fail;

    /* Encoder needs more data */
    if (!have_data) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_OK;
    }

    /* save stats info if there is some as well as a stats file */
    if (ffmpegenc->file && ffmpegenc->context->stats_out)
        if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
            GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                               (("Could not write to file \"%s\"."), ffmpegenc->filename),
                               GST_ERROR_SYSTEM);

    gst_video_codec_frame_unref (frame);

    /* Get oldest frame */
    frame = gst_video_encoder_get_oldest_frame (encoder);

    outbuf =
        gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                     pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
    frame->output_buffer = outbuf;

    if (pkt->flags & AV_PKT_FLAG_KEY)
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
    else
        GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

    return gst_video_encoder_finish_frame (encoder, frame);

    /* ERRORS */
encode_fail:
    {
#ifndef GST_DISABLE_GST_DEBUG
        GstFFMpegVidEncClass *oclass =
            (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
        GST_ERROR_OBJECT (ffmpegenc,
                          "avenc_%s: failed to encode buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
        /* avoid frame (and ts etc) piling up */
        return gst_video_encoder_finish_frame (encoder, frame);
    }
}
コード例 #23
0
ファイル: clutter-container.c プロジェクト: gramozeka/GSB-NEW
/**
 * clutter_container_child_set:
 * @container: a #ClutterContainer
 * @actor: a #ClutterActor that is a child of @container.
 * @first_prop: name of the first property to be set.
 * @...: value for the first property, followed optionally by more name/value
 * pairs terminated with NULL.
 *
 * Sets container specific properties on the child of a container.
 *
 * Since: 0.8
 */
void
clutter_container_child_set (ClutterContainer *container,
                             ClutterActor     *actor,
                             const gchar      *first_prop,
                             ...)
{
  GObjectClass *klass;
  const gchar *name;
  va_list var_args;
  
  g_return_if_fail (CLUTTER_IS_CONTAINER (container));
  g_return_if_fail (CLUTTER_IS_ACTOR (actor));

  klass = G_OBJECT_GET_CLASS (container);

  va_start (var_args, first_prop);

  name = first_prop;
  while (name)
    {
      GValue value = { 0, };
      gchar *error = NULL;
      GParamSpec *pspec;
    
      pspec = clutter_container_class_find_child_property (klass, name);
      if (!pspec)
        {
          g_warning ("%s: Containers of type '%s' have no child "
                     "property named '%s'",
                     G_STRLOC, G_OBJECT_TYPE_NAME (container), name);
          break;
        }

      if (!(pspec->flags & G_PARAM_WRITABLE))
        {
          g_warning ("%s: Child property '%s' of the container '%s' "
                     "is not writable",
                     G_STRLOC, pspec->name, G_OBJECT_TYPE_NAME (container));
          break;
        }

#if GLIB_CHECK_VERSION (2, 23, 2)
      G_VALUE_COLLECT_INIT (&value, G_PARAM_SPEC_VALUE_TYPE (pspec),
                            var_args, 0,
                            &error);
#else
      g_value_init (&value, G_PARAM_SPEC_VALUE_TYPE (pspec));
      G_VALUE_COLLECT (&value, var_args, 0, &error);
#endif /* GLIB_CHECK_VERSION (2, 23, 2) */

      if (error)
        {
          /* we intentionally leak the GValue because it might
           * be in an undefined state and calling g_value_unset()
           * on it might crash
           */
          g_warning ("%s: %s", G_STRLOC, error);
          g_free (error);
          break;
        }

      container_set_child_property (container, actor, &value, pspec);

      g_value_unset (&value);

      name = va_arg (var_args, gchar*);
    }

  va_end (var_args);
}
コード例 #24
0
ファイル: gstavvidenc.c プロジェクト: GStreamer/gst-libav
static GstFlowReturn
gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
{
    GstVideoCodecFrame *frame;
    GstFlowReturn flow_ret = GST_FLOW_OK;
    GstBuffer *outbuf;
    gint ret;
    AVPacket *pkt;
    int have_data = 0;

    GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);

    /* no need to empty codec if there is none */
    if (!ffmpegenc->opened)
        goto done;

    while ((frame =
                gst_video_encoder_get_oldest_frame (GST_VIDEO_ENCODER (ffmpegenc)))) {
        pkt = g_slice_new0 (AVPacket);
        have_data = 0;

        ret = avcodec_encode_video2 (ffmpegenc->context, pkt, NULL, &have_data);

        if (ret < 0) {              /* there should be something, notify and give up */
#ifndef GST_DISABLE_GST_DEBUG
            GstFFMpegVidEncClass *oclass =
                (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
            GST_WARNING_OBJECT (ffmpegenc,
                                "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
            g_slice_free (AVPacket, pkt);
            gst_video_codec_frame_unref (frame);
            break;
        }

        /* save stats info if there is some as well as a stats file */
        if (ffmpegenc->file && ffmpegenc->context->stats_out)
            if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
                GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                                   (("Could not write to file \"%s\"."), ffmpegenc->filename),
                                   GST_ERROR_SYSTEM);

        if (send && have_data) {
            outbuf =
                gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                             pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
            frame->output_buffer = outbuf;

            if (pkt->flags & AV_PKT_FLAG_KEY)
                GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
            else
                GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

            flow_ret =
                gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        } else {
            /* no frame attached, so will be skipped and removed from frame list */
            gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        }
    }

done:

    return flow_ret;
}
コード例 #25
0
/**
 * gimp_config_deserialize_properties:
 * @config: a #GimpConfig.
 * @scanner: a #GScanner.
 * @nest_level: the nest level
 *
 * This function uses the @scanner to configure the properties of @config.
 *
 * Return value: %TRUE on success, %FALSE otherwise.
 *
 * Since: GIMP 2.4
 **/
gboolean
gimp_config_deserialize_properties (GimpConfig *config,
                                    GScanner   *scanner,
                                    gint        nest_level)
{
  GObjectClass  *klass;
  GParamSpec   **property_specs;
  guint          n_property_specs;
  guint          i;
  guint          scope_id;
  guint          old_scope_id;
  GTokenType     token;
  GTokenType     next;

  g_return_val_if_fail (GIMP_IS_CONFIG (config), FALSE);

  klass = G_OBJECT_GET_CLASS (config);
  property_specs = g_object_class_list_properties (klass, &n_property_specs);

  if (! property_specs)
    return TRUE;

  scope_id = g_type_qname (G_TYPE_FROM_INSTANCE (config));
  old_scope_id = g_scanner_set_scope (scanner, scope_id);

  for (i = 0; i < n_property_specs; i++)
    {
      GParamSpec *prop_spec = property_specs[i];

      if (prop_spec->flags & GIMP_CONFIG_PARAM_SERIALIZE)
        {
          g_scanner_scope_add_symbol (scanner, scope_id,
                                      prop_spec->name, prop_spec);
        }
    }

  g_free (property_specs);

  g_object_freeze_notify (G_OBJECT (config));

  token = G_TOKEN_LEFT_PAREN;

  while (TRUE)
    {
      next = g_scanner_peek_next_token (scanner);

      if (G_UNLIKELY (next != token &&
                      ! (token == G_TOKEN_SYMBOL &&
                         next  == G_TOKEN_IDENTIFIER)))
        {
          break;
        }

      token = g_scanner_get_next_token (scanner);

      switch (token)
        {
        case G_TOKEN_LEFT_PAREN:
          token = G_TOKEN_SYMBOL;
          break;

        case G_TOKEN_IDENTIFIER:
          token = gimp_config_skip_unknown_property (scanner);
          break;

        case G_TOKEN_SYMBOL:
          token = gimp_config_deserialize_property (config,
                                                    scanner, nest_level);
          break;

        case G_TOKEN_RIGHT_PAREN:
          token = G_TOKEN_LEFT_PAREN;
          break;

        default: /* do nothing */
          break;
        }
    }

  g_scanner_set_scope (scanner, old_scope_id);

  g_object_thaw_notify (G_OBJECT (config));

  if (token == G_TOKEN_NONE)
    return FALSE;

  return gimp_config_deserialize_return (scanner, token, nest_level);
}
コード例 #26
0
/**
 * clutter_layout_manager_child_get:
 * @manager: a #ClutterLayoutManager
 * @container: a #ClutterContainer using @manager
 * @actor: a #ClutterActor child of @container
 * @first_property: the name of the first property
 * @Varargs: a list of property name and return location for the value pairs
 *
 * Retrieves the values for a list of properties out of the
 * #ClutterLayoutMeta created by @manager and attached to the
 * child of a @container
 *
 * Since: 1.2
 */
void
clutter_layout_manager_child_get (ClutterLayoutManager *manager,
                                  ClutterContainer     *container,
                                  ClutterActor         *actor,
                                  const gchar          *first_property,
                                  ...)
{
  ClutterLayoutMeta *meta;
  GObjectClass *klass;
  const gchar *pname;
  va_list var_args;

  g_return_if_fail (CLUTTER_IS_LAYOUT_MANAGER (manager));
  g_return_if_fail (CLUTTER_IS_CONTAINER (container));
  g_return_if_fail (CLUTTER_IS_ACTOR (actor));
  g_return_if_fail (first_property != NULL);

  meta = get_child_meta (manager, container, actor);
  if (meta == NULL)
    {
      g_warning ("Layout managers of type '%s' do not support "
                 "layout metadata",
                 g_type_name (G_OBJECT_TYPE (manager)));
      return;
    }

  klass = G_OBJECT_GET_CLASS (meta);

  va_start (var_args, first_property);

  pname = first_property;
  while (pname)
    {
      GValue value = { 0, };
      GParamSpec *pspec;
      gchar *error;
      gboolean res;

      pspec = g_object_class_find_property (klass, pname);
      if (pspec == NULL)
        {
          g_warning ("%s: Layout managers of type '%s' have no layout "
                     "property named '%s'",
                     G_STRLOC, G_OBJECT_TYPE_NAME (manager), pname);
          break;
        }

      g_value_init (&value, G_PARAM_SPEC_VALUE_TYPE (pspec));

      res = layout_get_property_internal (manager, G_OBJECT (meta),
                                          pspec,
                                          &value);
      if (!res)
        {
          g_value_unset (&value);
          break;
        }

      G_VALUE_LCOPY (&value, var_args, 0, &error);
      if (error)
        {
          g_warning ("%s: %s", G_STRLOC, error);
          g_free (error);
          g_value_unset (&value);
          break;
        }

      g_value_unset (&value);

      pname = va_arg (var_args, gchar*);
    }

  va_end (var_args);
}
コード例 #27
0
ファイル: gstavvidenc.c プロジェクト: krieger-od/gst-ffmpeg
static GstFlowReturn
gst_ffmpegvidenc_handle_frame (GstVideoEncoder * encoder,
    GstVideoCodecFrame * frame)
{
  GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
  GstBuffer *outbuf;
  gint ret_size = 0, c;
  GstVideoInfo *info = &ffmpegenc->input_state->info;
  GstVideoFrame vframe;

  if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame))
    ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;

  if (!gst_video_frame_map (&vframe, info, frame->input_buffer, GST_MAP_READ)) {
    GST_ERROR_OBJECT (encoder, "Failed to map input buffer");
    return GST_FLOW_ERROR;
  }

  /* Fill avpicture */
  for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
    if (c < GST_VIDEO_INFO_N_COMPONENTS (info)) {
      ffmpegenc->picture->data[c] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, c);
      ffmpegenc->picture->linesize[c] =
          GST_VIDEO_FRAME_COMP_STRIDE (&vframe, c);
    } else {
      ffmpegenc->picture->data[c] = NULL;
      ffmpegenc->picture->linesize[c] = 0;
    }
  }

  ffmpegenc->picture->pts =
      gst_ffmpeg_time_gst_to_ff (frame->pts /
      ffmpegenc->context->ticks_per_frame, ffmpegenc->context->time_base);

  ffmpegenc_setup_working_buf (ffmpegenc);

  ret_size = avcodec_encode_video (ffmpegenc->context,
      ffmpegenc->working_buf, ffmpegenc->working_buf_size, ffmpegenc->picture);

  gst_video_frame_unmap (&vframe);

  if (ret_size < 0)
    goto encode_fail;

  /* Encoder needs more data */
  if (!ret_size)
    return GST_FLOW_OK;

  /* save stats info if there is some as well as a stats file */
  if (ffmpegenc->file && ffmpegenc->context->stats_out)
    if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
      GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
          (("Could not write to file \"%s\"."), ffmpegenc->filename),
          GST_ERROR_SYSTEM);

  gst_video_codec_frame_unref (frame);

  /* Get oldest frame */
  frame = gst_video_encoder_get_oldest_frame (encoder);

  /* Allocate output buffer */
  if (gst_video_encoder_allocate_output_frame (encoder, frame,
          ret_size) != GST_FLOW_OK) {
    gst_video_codec_frame_unref (frame);
    goto alloc_fail;
  }

  outbuf = frame->output_buffer;
  gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);

  /* buggy codec may not set coded_frame */
  if (ffmpegenc->context->coded_frame) {
    if (ffmpegenc->context->coded_frame->key_frame)
      GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
  } else
    GST_WARNING_OBJECT (ffmpegenc, "codec did not provide keyframe info");

  /* Reset frame type */
  if (ffmpegenc->picture->pict_type)
    ffmpegenc->picture->pict_type = 0;

  return gst_video_encoder_finish_frame (encoder, frame);

  /* ERRORS */
encode_fail:
  {
#ifndef GST_DISABLE_GST_DEBUG
    GstFFMpegVidEncClass *oclass =
        (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
    GST_ERROR_OBJECT (ffmpegenc,
        "avenc_%s: failed to encode buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
    return GST_FLOW_OK;
  }
alloc_fail:
  {
#ifndef GST_DISABLE_GST_DEBUG
    GstFFMpegVidEncClass *oclass =
        (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
    GST_ERROR_OBJECT (ffmpegenc,
        "avenc_%s: failed to allocate buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
    return GST_FLOW_ERROR;
  }
}
コード例 #28
0
ファイル: misc.c プロジェクト: svn2github/GtkAda
AdaGObjectClass ada_gobject_class_from_object(GObject* object) {
   return ADA_CLASS_FROM_C_CLASS(G_OBJECT_GET_CLASS(object));
}
コード例 #29
0
static void
print_element_properties_info (GstElement * element)
{
  GParamSpec **property_specs;
  guint num_properties, i;
  gboolean readable;
  gboolean first_flag;

  property_specs = g_object_class_list_properties
      (G_OBJECT_GET_CLASS (element), &num_properties);
  n_print ("\n");
  n_print ("Element Properties:\n");

  for (i = 0; i < num_properties; i++) {
    GValue value = { 0, };
    GParamSpec *param = property_specs[i];

    readable = FALSE;

    g_value_init (&value, param->value_type);

    n_print ("  %-20s: %s\n", g_param_spec_get_name (param),
        g_param_spec_get_blurb (param));

    first_flag = TRUE;
    n_print ("%-23.23s flags: ", "");
    if (param->flags & G_PARAM_READABLE) {
      g_object_get_property (G_OBJECT (element), param->name, &value);
      readable = TRUE;
      if (!first_flag)
        g_print (", ");
      else
        first_flag = FALSE;
      g_print (_("readable"));
    }
    if (param->flags & G_PARAM_WRITABLE) {
      if (!first_flag)
        g_print (", ");
      else
        first_flag = FALSE;
      g_print (_("writable"));
    }
    if (param->flags & GST_PARAM_CONTROLLABLE) {
      if (!first_flag)
        g_print (", ");
      else
        first_flag = FALSE;
      g_print (_("controllable"));
    }
    n_print ("\n");

    switch (G_VALUE_TYPE (&value)) {
      case G_TYPE_STRING:
      {
        GParamSpecString *pstring = G_PARAM_SPEC_STRING (param);

        n_print ("%-23.23s String. ", "");

        if (pstring->default_value == NULL)
          g_print ("Default: null ");
        else
          g_print ("Default: \"%s\" ", pstring->default_value);

        if (readable) {
          const char *string_val = g_value_get_string (&value);

          if (string_val == NULL)
            g_print ("Current: null");
          else
            g_print ("Current: \"%s\"", string_val);
        }
        break;
      }
      case G_TYPE_BOOLEAN:
      {
        GParamSpecBoolean *pboolean = G_PARAM_SPEC_BOOLEAN (param);

        n_print ("%-23.23s Boolean. ", "");
        g_print ("Default: %s ", (pboolean->default_value ? "true" : "false"));
        if (readable)
          g_print ("Current: %s",
              (g_value_get_boolean (&value) ? "true" : "false"));
        break;
      }
      case G_TYPE_ULONG:
      {
        GParamSpecULong *pulong = G_PARAM_SPEC_ULONG (param);

        n_print ("%-23.23s Unsigned Long. ", "");
        g_print ("Range: %lu - %lu Default: %lu ",
            pulong->minimum, pulong->maximum, pulong->default_value);
        if (readable)
          g_print ("Current: %lu", g_value_get_ulong (&value));
        break;
      }
      case G_TYPE_LONG:
      {
        GParamSpecLong *plong = G_PARAM_SPEC_LONG (param);

        n_print ("%-23.23s Long. ", "");
        g_print ("Range: %ld - %ld Default: %ld ",
            plong->minimum, plong->maximum, plong->default_value);
        if (readable)
          g_print ("Current: %ld", g_value_get_long (&value));
        break;
      }
      case G_TYPE_UINT:
      {
        GParamSpecUInt *puint = G_PARAM_SPEC_UINT (param);

        n_print ("%-23.23s Unsigned Integer. ", "");
        g_print ("Range: %u - %u Default: %u ",
            puint->minimum, puint->maximum, puint->default_value);
        if (readable)
          g_print ("Current: %u", g_value_get_uint (&value));
        break;
      }
      case G_TYPE_INT:
      {
        GParamSpecInt *pint = G_PARAM_SPEC_INT (param);

        n_print ("%-23.23s Integer. ", "");
        g_print ("Range: %d - %d Default: %d ",
            pint->minimum, pint->maximum, pint->default_value);
        if (readable)
          g_print ("Current: %d", g_value_get_int (&value));
        break;
      }
      case G_TYPE_UINT64:
      {
        GParamSpecUInt64 *puint64 = G_PARAM_SPEC_UINT64 (param);

        n_print ("%-23.23s Unsigned Integer64. ", "");
        g_print ("Range: %" G_GUINT64_FORMAT " - %" G_GUINT64_FORMAT
            " Default: %" G_GUINT64_FORMAT " ",
            puint64->minimum, puint64->maximum, puint64->default_value);
        if (readable)
          g_print ("Current: %" G_GUINT64_FORMAT, g_value_get_uint64 (&value));
        break;
      }
      case G_TYPE_INT64:
      {
        GParamSpecInt64 *pint64 = G_PARAM_SPEC_INT64 (param);

        n_print ("%-23.23s Integer64. ", "");
        g_print ("Range: %" G_GINT64_FORMAT " - %" G_GINT64_FORMAT
            " Default: %" G_GINT64_FORMAT " ",
            pint64->minimum, pint64->maximum, pint64->default_value);
        if (readable)
          g_print ("Current: %" G_GINT64_FORMAT, g_value_get_int64 (&value));
        break;
      }
      case G_TYPE_FLOAT:
      {
        GParamSpecFloat *pfloat = G_PARAM_SPEC_FLOAT (param);

        n_print ("%-23.23s Float. ", "");
        g_print ("Range: %15.7g - %15.7g Default: %15.7g ",
            pfloat->minimum, pfloat->maximum, pfloat->default_value);
        if (readable)
          g_print ("Current: %15.7g", g_value_get_float (&value));
        break;
      }
      case G_TYPE_DOUBLE:
      {
        GParamSpecDouble *pdouble = G_PARAM_SPEC_DOUBLE (param);

        n_print ("%-23.23s Double. ", "");
        g_print ("Range: %15.7g - %15.7g Default: %15.7g ",
            pdouble->minimum, pdouble->maximum, pdouble->default_value);
        if (readable)
          g_print ("Current: %15.7g", g_value_get_double (&value));
        break;
      }
      default:
        if (param->value_type == GST_TYPE_CAPS) {
          const GstCaps *caps = gst_value_get_caps (&value);

          if (!caps)
            n_print ("%-23.23s Caps (NULL)", "");
          else {
            print_caps (caps, "                           ");
          }
        } else if (G_IS_PARAM_SPEC_ENUM (param)) {
          GParamSpecEnum *penum = G_PARAM_SPEC_ENUM (param);
          GEnumValue *values;
          guint j = 0;
          gint enum_value;
          const gchar *def_val_nick = "", *cur_val_nick = "";

          values = G_ENUM_CLASS (g_type_class_ref (param->value_type))->values;
          enum_value = g_value_get_enum (&value);

          while (values[j].value_name) {
            if (values[j].value == enum_value)
              cur_val_nick = values[j].value_nick;
            if (values[j].value == penum->default_value)
              def_val_nick = values[j].value_nick;
            j++;
          }

          n_print
              ("%-23.23s Enum \"%s\" Default: %d, \"%s\" Current: %d, \"%s\"",
              "", g_type_name (G_VALUE_TYPE (&value)), penum->default_value,
              def_val_nick, enum_value, cur_val_nick);

          j = 0;
          while (values[j].value_name) {
            g_print ("\n");
            if (_name)
              g_print (_name);
            g_print ("%-23.23s    (%d): %-16s - %s", "",
                values[j].value, values[j].value_nick, values[j].value_name);
            j++;
          }
          /* g_type_class_unref (ec); */
        } else if (G_IS_PARAM_SPEC_FLAGS (param)) {
          GParamSpecFlags *pflags = G_PARAM_SPEC_FLAGS (param);
          GFlagsValue *values;
          guint j = 0;
          gint flags_value;
          GString *cur_flags = NULL, *def_flags = NULL;

          values = G_FLAGS_CLASS (g_type_class_ref (param->value_type))->values;
          flags_value = g_value_get_flags (&value);

          while (values[j].value_name) {
            if (values[j].value & flags_value) {
              if (cur_flags) {
                g_string_append_printf (cur_flags, " | %s",
                    values[j].value_nick);
              } else {
                cur_flags = g_string_new (values[j].value_nick);
              }
            }
            if (values[j].value & pflags->default_value) {
              if (def_flags) {
                g_string_append_printf (def_flags, " | %s",
                    values[j].value_nick);
              } else {
                def_flags = g_string_new (values[j].value_nick);
              }
            }
            j++;
          }

          n_print
              ("%-23.23s Flags \"%s\" Default: 0x%08x, \"%s\" Current: 0x%08x, \"%s\"",
              "", g_type_name (G_VALUE_TYPE (&value)), pflags->default_value,
              (def_flags ? def_flags->str : "(none)"), flags_value,
              (cur_flags ? cur_flags->str : "(none)"));

          j = 0;
          while (values[j].value_name) {
            g_print ("\n");
            if (_name)
              g_print (_name);
            g_print ("%-23.23s    (0x%08x): %-16s - %s", "",
                values[j].value, values[j].value_nick, values[j].value_name);
            j++;
          }

          if (cur_flags)
            g_string_free (cur_flags, TRUE);
          if (def_flags)
            g_string_free (def_flags, TRUE);
        } else if (G_IS_PARAM_SPEC_OBJECT (param)) {
          n_print ("%-23.23s Object of type \"%s\"", "",
              g_type_name (param->value_type));
        } else if (G_IS_PARAM_SPEC_BOXED (param)) {
          n_print ("%-23.23s Boxed pointer of type \"%s\"", "",
              g_type_name (param->value_type));
        } else if (G_IS_PARAM_SPEC_POINTER (param)) {
          if (param->value_type != G_TYPE_POINTER) {
            n_print ("%-23.23s Pointer of type \"%s\".", "",
                g_type_name (param->value_type));
          } else {
            n_print ("%-23.23s Pointer.", "");
          }
        } else if (param->value_type == G_TYPE_VALUE_ARRAY) {
          n_print ("%-23.23s Array of GValues", "");
        } else if (GST_IS_PARAM_SPEC_FRACTION (param)) {
          GstParamSpecFraction *pfraction = GST_PARAM_SPEC_FRACTION (param);

          n_print ("%-23.23s Fraction. ", "");

          g_print ("Range: %d/%d - %d/%d Default: %d/%d ",
              pfraction->min_num, pfraction->min_den,
              pfraction->max_num, pfraction->max_den,
              pfraction->def_num, pfraction->def_den);
          if (readable)
            g_print ("Current: %d/%d",
                gst_value_get_fraction_numerator (&value),
                gst_value_get_fraction_denominator (&value));

        } else {
          n_print ("%-23.23s Unknown type %ld \"%s\"", "", param->value_type,
              g_type_name (param->value_type));
        }
        break;
    }
    if (!readable)
      g_print (" Write only\n");
    else
      g_print ("\n");

    g_value_reset (&value);
  }
  if (num_properties == 0)
    n_print ("  none\n");

  g_free (property_specs);
}
コード例 #30
0
ファイル: gstavauddec.c プロジェクト: pexip/gst-libav
static GstFlowReturn
gst_ffmpegauddec_handle_frame (GstAudioDecoder * decoder, GstBuffer * inbuf)
{
  GstFFMpegAudDec *ffmpegdec;
  GstFFMpegAudDecClass *oclass;
  guint8 *data, *bdata;
  GstMapInfo map;
  gint size, bsize, len, have_data;
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean do_padding, is_header;

  ffmpegdec = (GstFFMpegAudDec *) decoder;

  if (G_UNLIKELY (!ffmpegdec->opened))
    goto not_negotiated;

  if (inbuf == NULL) {
    gst_ffmpegauddec_drain (ffmpegdec);
    return GST_FLOW_OK;
  }

  inbuf = gst_buffer_ref (inbuf);
  is_header = GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_HEADER);

  oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));

  GST_LOG_OBJECT (ffmpegdec,
      "Received new data of size %" G_GSIZE_FORMAT ", offset:%" G_GUINT64_FORMAT
      ", ts:%" GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT,
      gst_buffer_get_size (inbuf), GST_BUFFER_OFFSET (inbuf),
      GST_TIME_ARGS (GST_BUFFER_PTS (inbuf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)));

  /* workarounds, functions write to buffers:
   *  libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
   *  libavcodec/svq3.c:svq3_decode_slice_header too.
   * ffmpeg devs know about it and will fix it (they said). */
  if (oclass->in_plugin->id == AV_CODEC_ID_SVQ1 ||
      oclass->in_plugin->id == AV_CODEC_ID_SVQ3) {
    inbuf = gst_buffer_make_writable (inbuf);
  }

  gst_buffer_map (inbuf, &map, GST_MAP_READ);

  bdata = map.data;
  bsize = map.size;

  if (bsize > 0 && (!GST_MEMORY_IS_ZERO_PADDED (map.memory)
          || (map.maxsize - map.size) < FF_INPUT_BUFFER_PADDING_SIZE)) {
    /* add padding */
    if (ffmpegdec->padded_size < bsize + FF_INPUT_BUFFER_PADDING_SIZE) {
      ffmpegdec->padded_size = bsize + FF_INPUT_BUFFER_PADDING_SIZE;
      ffmpegdec->padded = g_realloc (ffmpegdec->padded, ffmpegdec->padded_size);
      GST_LOG_OBJECT (ffmpegdec, "resized padding buffer to %d",
          ffmpegdec->padded_size);
    }
    GST_CAT_TRACE_OBJECT (CAT_PERFORMANCE, ffmpegdec,
        "Copy input to add padding");
    memcpy (ffmpegdec->padded, bdata, bsize);
    memset (ffmpegdec->padded + bsize, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    bdata = ffmpegdec->padded;
    do_padding = TRUE;
  } else {
    do_padding = FALSE;
  }

  do {
    guint8 tmp_padding[FF_INPUT_BUFFER_PADDING_SIZE];

    data = bdata;
    size = bsize;

    if (do_padding) {
      /* add temporary padding */
      GST_CAT_TRACE_OBJECT (CAT_PERFORMANCE, ffmpegdec,
          "Add temporary input padding");
      memcpy (tmp_padding, data + size, FF_INPUT_BUFFER_PADDING_SIZE);
      memset (data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    }

    /* decode a frame of audio now */
    len = gst_ffmpegauddec_frame (ffmpegdec, data, size, &have_data, &ret);

    if (do_padding) {
      memcpy (data + size, tmp_padding, FF_INPUT_BUFFER_PADDING_SIZE);
    }

    if (ret != GST_FLOW_OK) {
      GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
          gst_flow_get_name (ret));
      /* bad flow return, make sure we discard all data and exit */
      bsize = 0;
      break;
    }

    if (len == 0 && have_data == 0) {
      /* nothing was decoded, this could be because no data was available or
       * because we were skipping frames.
       * If we have no context we must exit and wait for more data, we keep the
       * data we tried. */
      GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
      break;
    } else if (len < 0) {
      /* a decoding error happened, we must break and try again with next data. */
      GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
      bsize = 0;
      break;
    }
    /* prepare for the next round, for codecs with a context we did this
     * already when using the parser. */
    bsize -= len;
    bdata += len;

    do_padding = TRUE;

    GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0).  bsize:%d , bdata:%p",
        bsize, bdata);
  } while (bsize > 0);

  gst_buffer_unmap (inbuf, &map);
  gst_buffer_unref (inbuf);

  if (ffmpegdec->outbuf)
    ret =
        gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (ffmpegdec),
        ffmpegdec->outbuf, 1);
  else if (len < 0 || is_header)
    ret =
        gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (ffmpegdec), NULL, 1);
  ffmpegdec->outbuf = NULL;

  if (bsize > 0) {
    GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
  }

  return ret;

  /* ERRORS */
not_negotiated:
  {
    oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
    GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
        ("avdec_%s: input format was not set before data start",
            oclass->in_plugin->name));
    return GST_FLOW_NOT_NEGOTIATED;
  }
}