Ejemplo n.º 1
0
//
// decode buffer
//
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
    if(!buffer)
        return false;

    if(!frame) {
        gint height, width;
        GstCaps *buff_caps = gst_buffer_get_caps(buffer);
        assert(gst_caps_get_size(buff_caps) == 1);
        GstStructure* structure = gst_caps_get_structure(buff_caps, 0);

        if(!gst_structure_get_int(structure, "width", &width) ||
                !gst_structure_get_int(structure, "height", &height))
            return false;

        frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
        gst_caps_unref(buff_caps);
    }

    // no need to memcpy, just use gstreamer's buffer :-)
    frame->imageData = (char *)GST_BUFFER_DATA(buffer);
    //memcpy (frame->imageData, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE (buffer));
    //gst_buffer_unref(buffer);
    //buffer = 0;
    return frame;
}
Ejemplo n.º 2
0
static void
swfdec_video_decoder_gst_decode (SwfdecVideoDecoder *dec, SwfdecBuffer *buffer)
{
  SwfdecVideoDecoderGst *player = SWFDEC_VIDEO_DECODER_GST (dec);
#define SWFDEC_ALIGN(x, n) (((x) + (n) - 1) & (~((n) - 1)))
  GstBuffer *buf;
  GstCaps *caps;
  GstStructure *structure;

  buf = swfdec_gst_buffer_new (swfdec_buffer_ref (buffer));
  if (!swfdec_gst_decoder_push (&player->dec, buf)) {
    swfdec_video_decoder_error (dec, "failed to push buffer");
    return;
  }

  buf = swfdec_gst_decoder_pull (&player->dec);
  if (buf == NULL) {
    SWFDEC_ERROR ("failed to pull decoded buffer. Broken stream?");
    return;
  } else {
    if (player->last)
      gst_buffer_unref (player->last);
    player->last = buf;
  }

  while ((buf = swfdec_gst_decoder_pull (&player->dec))) {
    SWFDEC_ERROR ("too many output buffers!");
    gst_buffer_unref (buf);
  }
  caps = gst_buffer_get_caps (player->last);
  if (caps == NULL) {
    swfdec_video_decoder_error (dec, "no caps on decoded buffer");
    return;
  }
  structure = gst_caps_get_structure (caps, 0);
  if (!gst_structure_get_int (structure, "width", (int *) &dec->width) ||
      !gst_structure_get_int (structure, "height", (int *) &dec->height)) {
    swfdec_video_decoder_error (dec, "invalid caps on decoded buffer");
    return;
  }
  buf = player->last;
  switch (swfdec_video_codec_get_format (dec->codec)) {
    case SWFDEC_VIDEO_FORMAT_RGBA:
      dec->plane[0] = buf->data;
      dec->rowstride[0] = dec->width * 4;
      break;
    case SWFDEC_VIDEO_FORMAT_I420:
      dec->plane[0] = buf->data;
      dec->rowstride[0] = SWFDEC_ALIGN (dec->width, 4);
      dec->plane[1] = dec->plane[0] + dec->rowstride[0] * SWFDEC_ALIGN (dec->height, 2);
      dec->rowstride[1] = SWFDEC_ALIGN (dec->width, 8) / 2;
      dec->plane[2] = dec->plane[1] + dec->rowstride[1] * SWFDEC_ALIGN (dec->height, 2) / 2;
      dec->rowstride[2] = dec->rowstride[1];
      g_assert (dec->plane[2] + dec->rowstride[2] * SWFDEC_ALIGN (dec->height, 2) / 2 == dec->plane[0] + buf->size);
      break;
    default:
      g_return_if_reached ();
  }
#undef SWFDEC_ALIGN
}
Ejemplo n.º 3
0
static GstFlowReturn
gst_test_reverse_negotiation_sink_render (GstBaseSink * bsink,
    GstBuffer * buffer)
{
  GstTestReverseNegotiationSink *sink =
      GST_TEST_REVERSE_NEGOTIATION_SINK (bsink);
  GstCaps *caps = gst_buffer_get_caps (buffer);
  GstVideoFormat fmt;
  gint width, height;

  fail_unless (caps != NULL);
  fail_unless (gst_video_format_parse_caps (caps, &fmt, &width, &height));

  sink->nbuffers++;

  /* The third buffer is still in the old size
   * because the ffmpegcolorspaces can't convert
   * the frame sizes
   */
  if (sink->nbuffers > 3) {
    fail_unless_equals_int (width, 512);
    fail_unless_equals_int (height, 128);
  }

  gst_caps_unref (caps);

  return GST_FLOW_OK;
}
Ejemplo n.º 4
0
static GstFlowReturn
gst_nle_source_push_still_picture (GstNleSource * nlesrc, GstNleSrcItem * item,
    GstBuffer * buf)
{
  GstCaps *bcaps, *ncaps;
  guint64 buf_dur;
  gint i, n_bufs;
  GstFlowReturn ret = GST_FLOW_OK;

  buf_dur = GST_SECOND * nlesrc->fps_d / nlesrc->fps_n;
  n_bufs = item->duration / buf_dur;

  bcaps = gst_buffer_get_caps (buf);
  ncaps = gst_caps_make_writable (bcaps);
  gst_caps_set_simple (ncaps, "pixel-aspect-ratio", GST_TYPE_FRACTION,
      1, 1, NULL);
  gst_buffer_set_caps (buf, ncaps);
  gst_caps_unref (ncaps);

  nlesrc->video_seek_done = TRUE;
  for (i = 0; i < n_bufs; i++) {
    GstBuffer *new_buf;

    new_buf = gst_buffer_copy (buf);
    GST_BUFFER_TIMESTAMP (new_buf) = item->start + buf_dur * i;
    GST_BUFFER_DURATION (new_buf) = buf_dur;
    ret = gst_nle_source_push_buffer (nlesrc, new_buf, FALSE);
    if (ret <= GST_FLOW_UNEXPECTED) {
      break;
    }
  }

  gst_buffer_unref (buf);
  return ret;
}
Ejemplo n.º 5
0
/*!
  Returns audio format for a buffer.
  If the buffer doesn't have a valid audio format, an empty QAudioFormat is returned.
*/
QAudioFormat QGstUtils::audioFormatForBuffer(GstBuffer *buffer)
{
    GstCaps* caps = gst_buffer_get_caps(buffer);
    if (!caps)
        return QAudioFormat();

    QAudioFormat format = QGstUtils::audioFormatForCaps(caps);
    gst_caps_unref(caps);
    return format;
}
Ejemplo n.º 6
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn gst_throttle_chain(GstPad * pad, GstBuffer * buf)
{
	GstThrottle * filter = GST_THROTTLE(GST_OBJECT_PARENT(pad));
	
	if (filter->printOnly)
	{
		GstCaps * caps = gst_buffer_get_caps(buf);
		gchar * capsStr = gst_caps_to_string(caps);
		gst_caps_unref(caps);
		GST_LOG_OBJECT(filter, "ts: %" GST_TIME_FORMAT " %sof type %s",
			GST_TIME_ARGS(buf->timestamp),
			GST_BUFFER_IS_DISCONT(buf) ? "and discontinuity " : "",
			capsStr
		);
		g_free(capsStr);
		
		GstFlowReturn ret = gst_pad_push(filter->srcpad, buf);
		GST_TRACE_OBJECT(filter, "ts: %" GST_TIME_FORMAT " processed with status %d", GST_TIME_ARGS(buf->timestamp), ret);
		return ret;
	}
	
	if (filter->clock == NULL)
	{
		return gst_pad_push(filter->srcpad, buf);
	}
	
	GstClockTime realTs = gst_clock_get_time(filter->clock);
	
	if (filter->haveStartTime)
	{
		const char * discont = GST_BUFFER_IS_DISCONT(buf) ? " with discotinuity" : "";
		
		GstClockTime expectedRealTs = filter->streamStartRealTime + buf->timestamp;
		gboolean early = realTs < expectedRealTs;
		if (early)
		{
			GstClockID * cid = gst_clock_new_single_shot_id(filter->clock, expectedRealTs);
			GST_TRACE_OBJECT(filter, "ts: %" GST_TIME_FORMAT " %s, waiting for %ld ms", GST_TIME_ARGS(buf->timestamp), discont, (expectedRealTs - realTs)/1000000);
			gst_clock_id_wait(cid, NULL);
			gst_clock_id_unref(cid);
		}
		else
		{
			GST_TRACE_OBJECT(filter, "ts: %" GST_TIME_FORMAT " %s, pad on time", GST_TIME_ARGS(buf->timestamp), discont);
		}
	}
	else
	{
		filter->streamStartRealTime = realTs - buf->timestamp;
		filter->haveStartTime = TRUE;
	}
	
	return gst_pad_push(filter->srcpad, buf);
}
Ejemplo n.º 7
0
GstFlowReturn AudioFileReader::handleBuffer(GstAppSink* sink)
{
    GstBuffer* buffer = gst_app_sink_pull_buffer(sink);
    if (!buffer)
        return GST_FLOW_ERROR;

    GstCaps* caps = gst_buffer_get_caps(buffer);
    GstStructure* structure = gst_caps_get_structure(caps, 0);

    gint channels = 0;
    if (!gst_structure_get_int(structure, "channels", &channels) || !channels) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    gint sampleRate = 0;
    if (!gst_structure_get_int(structure, "rate", &sampleRate) || !sampleRate) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    gint width = 0;
    if (!gst_structure_get_int(structure, "width", &width) || !width) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    GstClockTime duration = (static_cast<guint64>(GST_BUFFER_SIZE(buffer)) * 8 * GST_SECOND) / (sampleRate * channels * width);
    int frames = GST_CLOCK_TIME_TO_FRAMES(duration, sampleRate);

    // Check the first audio channel. The buffer is supposed to store
    // data of a single channel anyway.
    GstAudioChannelPosition* positions = gst_audio_get_channel_positions(structure);
    switch (positions[0]) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
        gst_buffer_list_iterator_add(m_frontLeftBuffersIterator, buffer);
        m_channelSize += frames;
        break;
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        gst_buffer_list_iterator_add(m_frontRightBuffersIterator, buffer);
        break;
    default:
        gst_buffer_unref(buffer);
        break;
    }

    g_free(positions);
    gst_caps_unref(caps);
    return GST_FLOW_OK;
}
void QGstreamerAudioProbeControl::bufferProbed(GstBuffer* buffer)
{
    GstCaps* caps = gst_buffer_get_caps(buffer);
    if (!caps)
        return;

    QAudioFormat format = QGstUtils::audioFormatForCaps(caps);
    gst_caps_unref(caps);
    if (!format.isValid())
        return;

    QAudioBuffer audioBuffer = QAudioBuffer(QByteArray((const char*)buffer->data, buffer->size), format);

    {
        QMutexLocker locker(&m_bufferMutex);
        m_pendingBuffer = audioBuffer;
        QMetaObject::invokeMethod(this, "bufferProbed", Qt::QueuedConnection);
    }
}
PassRefPtr<ImageGStreamer> ImageGStreamer::createImage(GstBuffer* buffer)
{
    int width = 0, height = 0;
    GstCaps* caps = gst_buffer_get_caps(buffer);
    GstVideoFormat format;
    if (!gst_video_format_parse_caps(caps, &format, &width, &height)) {
        gst_caps_unref(caps);
        return 0;
    }

    gst_caps_unref(caps);

    cairo_format_t cairoFormat;
    if (format == GST_VIDEO_FORMAT_ARGB || format == GST_VIDEO_FORMAT_BGRA)
        cairoFormat = CAIRO_FORMAT_ARGB32;
    else
        cairoFormat = CAIRO_FORMAT_RGB24;

    return adoptRef(new ImageGStreamer(buffer, IntSize(width, height), cairoFormat));
}
Ejemplo n.º 10
0
/*
 Saves photos
*/
void
acam_webcam_take_photo (GstElement *element, GstBuffer *buffer, GstPad *pad, acam_webcam_device_s *acam_webcam_device)
{
	GstCaps *caps;
	const GstStructure *structure;
	int width, height, stride;
	GdkPixbuf *pixbuf;
	const int bits_per_pixel = 8;

	time_t now;
	struct tm *d;
	char li[30];
	gchar *filename;
	gchar *home_directory;

	time (&now);
	d = localtime (&now);
	strftime (li, 30, "%d-%m-%Y-%H-%M-%S", d);

	/* Create name photo */
	filename = g_strdup_printf("%s/dev%d-%s.jpg", acam_webcam_device->photo_directory, acam_webcam_device->dev_num + 1, li);
	
	g_print (" Taking photo | device %d | %s\n", acam_webcam_device->dev_num + 1, filename);
	
	caps = gst_buffer_get_caps (buffer);
	structure = gst_caps_get_structure (caps, 0);
	gst_structure_get_int (structure, "width", &width);
	gst_structure_get_int (structure, "height", &height);

	stride = buffer->size / height;
	pixbuf = gdk_pixbuf_new_from_data (GST_BUFFER_DATA (buffer), GDK_COLORSPACE_RGB,
		                             FALSE, bits_per_pixel, width, height, stride,
		                             NULL, NULL);

	gdk_pixbuf_save (pixbuf, filename, "jpeg", NULL, NULL);
	
	g_object_unref (G_OBJECT (pixbuf));

	g_signal_handler_disconnect (G_OBJECT (acam_webcam_device->photo_sink), acam_webcam_device->photo_handler_signal_id);
}
Ejemplo n.º 11
0
static gboolean query_data(struct input_handle* ih) {
  GstBuffer *preroll;
  GstCaps *src_caps;
  GstStructure *s;
  int i;

  ih->n_channels = 0;
  ih->sample_rate = 0;
  ih->channel_positions = NULL;

  preroll = gst_app_sink_pull_preroll(GST_APP_SINK(ih->appsink));
  src_caps = gst_buffer_get_caps(preroll);

  s = gst_caps_get_structure(src_caps, 0);
  gst_structure_get_int(s, "rate", &(ih->sample_rate));
  gst_structure_get_int(s, "channels", &(ih->n_channels));
  if (!ih->sample_rate || !ih->n_channels) {
    gst_caps_unref(src_caps);
    gst_buffer_unref(preroll);
    return FALSE;
  }

  ih->channel_positions = gst_audio_get_channel_positions(s);
  if (verbose) {
    if (ih->channel_positions) {
      for (i = 0; i < ih->n_channels; ++i) {
        printf("Channel %d: %d\n", i, ih->channel_positions[i]);
      }
    }
    g_print ("%d channels @ %d Hz\n", ih->n_channels, ih->sample_rate);
  }

  gst_caps_unref(src_caps);
  gst_buffer_unref(preroll);

  return TRUE;
}
Ejemplo n.º 12
0
std::auto_ptr<GnashImage>
VideoDecoderGst::pop()
{
    GstBuffer * buffer = swfdec_gst_decoder_pull (&_decoder);

    if (!buffer) {
        return std::auto_ptr<GnashImage>();
    }
  
    GstCaps* caps = gst_buffer_get_caps(buffer);

    assert(gst_caps_get_size(caps) == 1);
  
    GstStructure* structure = gst_caps_get_structure (caps, 0);

    gst_structure_get_int (structure, "width", &_width);
    gst_structure_get_int (structure, "height", &_height);

    gst_caps_unref(caps);
  
    std::auto_ptr<GnashImage> ret(new gnashGstBuffer(buffer, _width, _height));
  
    return ret;
}
Ejemplo n.º 13
0
/*
 * Test if the parser pushes clean data properly.
 */
void
gst_parser_test_run (GstParserTest * test, GstCaps ** out_caps)
{
  buffer_verify_data_s vdata = { 0, 0, 0, NULL, 0, NULL, FALSE };
  GstElement *element;
  GstBuffer *buffer = NULL;
  GstCaps *src_caps;
  guint i, j, k;
  guint frames = 0, size = 0;

  element = setup_element (test->factory, test->sink_template, NULL,
      test->src_template, test->src_caps);

  /* push some setup headers */
  for (j = 0; j < G_N_ELEMENTS (test->headers) && test->headers[j].data; j++) {
    buffer = buffer_new (test->headers[j].data, test->headers[j].size);
    fail_unless_equals_int (gst_pad_push (srcpad, buffer), GST_FLOW_OK);
  }

  for (j = 0; j < 3; j++) {
    for (i = 0; i < test->series[j].num; i++) {
      /* sanity enforcing */
      for (k = 0; k < MAX (1, test->series[j].fpb); k++) {
        if (!k)
          buffer = buffer_new (test->series[j].data, test->series[j].size);
        else {
          GstCaps *caps = gst_buffer_get_caps (buffer);

          buffer = gst_buffer_join (buffer,
              buffer_new (test->series[j].data, test->series[j].size));
          if (caps) {
            gst_buffer_set_caps (buffer, caps);
            gst_caps_unref (caps);
          }
        }
      }
      fail_unless_equals_int (gst_pad_push (srcpad, buffer), GST_FLOW_OK);
      if (j == 0)
        vdata.buffers_before_offset_skip++;
      else if (j == 1)
        vdata.offset_skip_amount += test->series[j].size * test->series[j].fpb;
      if (j != 1) {
        frames += test->series[j].fpb;
        size += test->series[j].size * test->series[j].fpb;
      }
    }
  }
  gst_pad_push_event (srcpad, gst_event_new_eos ());

  if (G_LIKELY (test->framed))
    fail_unless_equals_int (g_list_length (buffers) - test->discard, frames);

  /* if all frames are identical, do extended test,
   * otherwise only verify total data size */
  if (test->series[0].data && (!test->series[2].size ||
          (test->series[0].size == test->series[2].size && test->series[2].data
              && !memcmp (test->series[0].data, test->series[2].data,
                  test->series[0].size)))) {
    vdata.data_to_verify = test->series[0].data;
    vdata.data_to_verify_size = test->series[0].size;
    vdata.caps = test->sink_caps;
    vdata.discard = test->discard;
    vdata.no_metadata = test->no_metadata;
    g_list_foreach (buffers, buffer_verify_data, &vdata);
  } else {
    guint datasum = 0;

    g_list_foreach (buffers, buffer_count_size, &datasum);
    size -= test->dropped;
    fail_unless_equals_int (datasum, size);
  }

  src_caps = gst_pad_get_negotiated_caps (sinkpad);
  GST_LOG ("output caps: %" GST_PTR_FORMAT, src_caps);

  if (test->sink_caps) {
    GST_LOG ("%" GST_PTR_FORMAT " = %" GST_PTR_FORMAT " ?", src_caps,
        test->sink_caps);
    fail_unless (gst_caps_is_equal (src_caps, test->sink_caps));
  }

  if (out_caps)
    *out_caps = src_caps;
  else
    gst_caps_unref (src_caps);

  cleanup_element (element);
}
static void
bp_vis_pcm_handoff (GstElement *sink, GstBuffer *buffer, GstPad *pad, gpointer userdata)
{
    BansheePlayer *player = (BansheePlayer*)userdata;
    GstStructure *structure;
    gint channels, wanted_size;
    gfloat *data;
    BansheePlayerVisDataCallback vis_data_cb;
    
    g_return_if_fail (IS_BANSHEE_PLAYER (player));
    
    vis_data_cb = player->vis_data_cb;

    if (vis_data_cb == NULL) {
        return;
    }

    if (player->vis_thawing) {
        // Flush our buffers out.
        gst_adapter_clear (player->vis_buffer);
        memset (player->vis_fft_sample_buffer, 0, sizeof(gfloat) * SLICE_SIZE);

        player->vis_thawing = FALSE;
    }
    
    structure = gst_caps_get_structure (gst_buffer_get_caps (buffer), 0);
    gst_structure_get_int (structure, "channels", &channels);
    
    wanted_size = channels * SLICE_SIZE * sizeof (gfloat);

    gst_adapter_push (player->vis_buffer, gst_buffer_copy (buffer));
    
    while ((data = (gfloat *)gst_adapter_peek (player->vis_buffer, wanted_size)) != NULL) {
        gfloat *deinterlaced = g_malloc (wanted_size);
        gfloat *specbuf = g_new (gfloat, SLICE_SIZE * 2);

        gint i, j;

        memcpy (specbuf, player->vis_fft_sample_buffer, SLICE_SIZE * sizeof(gfloat));
        
        for (i = 0; i < SLICE_SIZE; i++) {
            gfloat avg = 0.0f;

            for (j = 0; j < channels; j++) {
                gfloat sample = data[i * channels + j];

                deinterlaced[j * SLICE_SIZE + i] = sample;
                avg += sample;
            }

            avg /= channels;
            specbuf[i + SLICE_SIZE] = avg;
        }

        memcpy (player->vis_fft_sample_buffer, &specbuf[SLICE_SIZE], SLICE_SIZE * sizeof(gfloat));

        gst_fft_f32_window (player->vis_fft, specbuf, GST_FFT_WINDOW_HAMMING);
        gst_fft_f32_fft (player->vis_fft, specbuf, player->vis_fft_buffer);

        for (i = 0; i < SLICE_SIZE; i++) {
            gfloat val;

            GstFFTF32Complex cplx = player->vis_fft_buffer[i];

            val = cplx.r * cplx.r + cplx.i * cplx.i;
            val /= SLICE_SIZE * SLICE_SIZE;
            val = 10.0f * log10f(val);

            val = (val + 60.0f) / 60.0f;
            if (val < 0.0f)
                val = 0.0f;

            specbuf[i] = val;
        }

        vis_data_cb (player, channels, SLICE_SIZE, deinterlaced, SLICE_SIZE, specbuf);
        
        g_free (deinterlaced);
        g_free (specbuf);

        gst_adapter_flush (player->vis_buffer, wanted_size);
    }
}
Ejemplo n.º 15
0
    void FarsightChannel::OnFakeSinkHandoff(GstElement *fakesink, GstBuffer *buffer, GstPad *pad, gpointer user_data)
    {
        FarsightChannel* self = (FarsightChannel*)user_data;

        static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
        g_static_mutex_lock (&mutex);
        gst_buffer_ref(buffer);

        int rate = 0;
        int channels = 0;
        int width = 0;
        GstCaps *caps;
        GstStructure *structure;
        caps = gst_buffer_get_caps(buffer);
        structure = gst_caps_get_structure(caps, 0);
        gst_structure_get_int(structure, "rate", &rate);
        gst_structure_get_int(structure, "channels", &channels);
        gst_structure_get_int(structure, "width", &width);
        gst_caps_unref(caps);

        if (GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_PREROLL))
        {
            LogInfo("Drop fakesink buffer: Preroll audio data packet.");
            gst_buffer_unref(buffer);
            g_static_mutex_unlock (&mutex);
            return;
        }
        if (GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_GAP))
        {
            LogInfo("Drop fakesink buffer: Caps audio data packet.");
            gst_buffer_unref(buffer);
            g_static_mutex_unlock (&mutex);
            return;
        }
        if (GST_BUFFER_DURATION(buffer) == 0)
        {
            LogInfo("Drop fakesink buffer: Got audio data packet with 0 duration");
            gst_buffer_unref(buffer);
            g_static_mutex_unlock (&mutex);
            return;
        }
        if (GST_BUFFER_IS_DISCONT(buffer))
        {
            LogInfo("Drop fakesink buffer: Got disconnect audio data packet.");
            gst_buffer_unref(buffer);
            g_static_mutex_unlock (&mutex);
            return;
        }

        if (GST_BUFFER_OFFSET_IS_VALID(buffer))
        {
            guint64 offset = GST_BUFFER_OFFSET (buffer);
        }
        if (GST_BUFFER_OFFSET_END_IS_VALID(buffer))
        {
            guint64 offset = GST_BUFFER_OFFSET_END(buffer);
        }

        u8* data = GST_BUFFER_DATA(buffer);
        u32 size = GST_BUFFER_SIZE(buffer);

        self->HandleAudioData(data, size, rate, width, channels);
        gst_buffer_unref(buffer);
        g_static_mutex_unlock (&mutex);
    }
void MediaPlayerPrivate::paint(GraphicsContext* context, const IntRect& rect)
{
    if (context->paintingDisabled())
        return;

    if (!m_player->visible())
        return;
    if (!m_buffer)
        return;

    int width = 0, height = 0;
    int pixelAspectRatioNumerator = 0;
    int pixelAspectRatioDenominator = 0;
    double doublePixelAspectRatioNumerator = 0;
    double doublePixelAspectRatioDenominator = 0;
    double displayWidth;
    double displayHeight;
    double scale, gapHeight, gapWidth;

    GstCaps *caps = gst_buffer_get_caps(m_buffer);

    if (!gst_video_format_parse_caps(caps, NULL, &width, &height) ||
        !gst_video_parse_caps_pixel_aspect_ratio(caps, &pixelAspectRatioNumerator, &pixelAspectRatioDenominator)) {
      gst_caps_unref(caps);
      return;
    }

    displayWidth = width;
    displayHeight = height;
    doublePixelAspectRatioNumerator = pixelAspectRatioNumerator;
    doublePixelAspectRatioDenominator = pixelAspectRatioDenominator;

    cairo_t* cr = context->platformContext();
    cairo_surface_t* src = cairo_image_surface_create_for_data(GST_BUFFER_DATA(m_buffer),
                                                               CAIRO_FORMAT_RGB24,
                                                               width, height,
                                                               4 * width);

    cairo_save(cr);
    cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);

    displayWidth *= doublePixelAspectRatioNumerator / doublePixelAspectRatioDenominator;
    displayHeight *= doublePixelAspectRatioDenominator / doublePixelAspectRatioNumerator;

    scale = MIN (rect.width () / displayWidth, rect.height () / displayHeight);
    displayWidth *= scale;
    displayHeight *= scale;

    // Calculate gap between border an picture
    gapWidth = (rect.width() - displayWidth) / 2.0;
    gapHeight = (rect.height() - displayHeight) / 2.0;

    // paint the rectangle on the context and draw the surface inside.
    cairo_translate(cr, rect.x() + gapWidth, rect.y() + gapHeight);
    cairo_rectangle(cr, 0, 0, rect.width(), rect.height());
    cairo_scale(cr, doublePixelAspectRatioNumerator / doublePixelAspectRatioDenominator,
                doublePixelAspectRatioDenominator / doublePixelAspectRatioNumerator);
    cairo_scale(cr, scale, scale);
    cairo_set_source_surface(cr, src, 0, 0);
    cairo_fill(cr);
    cairo_restore(cr);

    cairo_surface_destroy(src);
    gst_caps_unref(caps);
}
FrameSource::FrameStatus GStreamerBaseFrameSourceImpl::fetch(vx_image image, vx_uint32 /*timeout*/)
{
    if (end)
    {
        close();
        return FrameSource::CLOSED;
    }

    handleGStreamerMessages();

    if (gst_app_sink_is_eos(GST_APP_SINK(sink)))
    {
        close();
        return FrameSource::CLOSED;
    }

    if ((lastFrameTimestamp.toc()/1000.0) > Application::get().getSourceDefaultTimeout())
    {
        close();
        return FrameSource::CLOSED;
    }

    lastFrameTimestamp.tic();

#if GST_VERSION_MAJOR == 0
    std::unique_ptr<GstBuffer, GStreamerObjectDeleter> bufferHolder(
        gst_app_sink_pull_buffer(GST_APP_SINK(sink)));
    GstBuffer* buffer = bufferHolder.get();
#else
    std::unique_ptr<GstSample, GStreamerObjectDeleter> sample(gst_app_sink_pull_sample(GST_APP_SINK(sink)));

    if (!sample)
    {
        close();
        return FrameSource::CLOSED;
    }

    GstBuffer* buffer = gst_sample_get_buffer(sample.get());
#endif

    gint          width;
    gint          height;

#if GST_VERSION_MAJOR == 0
    std::unique_ptr<GstCaps, GStreamerObjectDeleter> bufferCapsHolder(gst_buffer_get_caps(buffer));
    GstCaps* bufferCaps = bufferCapsHolder.get();
#else
    GstCaps* bufferCaps = gst_sample_get_caps(sample.get());
#endif
    // bail out in no caps
    assert(gst_caps_get_size(bufferCaps) == 1);
    GstStructure* structure = gst_caps_get_structure(bufferCaps, 0);

    // bail out if width or height are 0
    if (!gst_structure_get_int(structure, "width", &width) ||
            !gst_structure_get_int(structure, "height", &height))
    {
        close();
        return FrameSource::CLOSED;
    }

    int depth = 3;
#if GST_VERSION_MAJOR > 0
    depth = 0;
    const gchar* name = gst_structure_get_name(structure);
    const gchar* format = gst_structure_get_string(structure, "format");

    if (!name || !format)
    {
        close();
        return FrameSource::CLOSED;
    }

    // we support 2 types of data:
    //     video/x-raw, format=BGR   -> 8bit, 3 channels
    //     video/x-raw, format=GRAY8 -> 8bit, 1 channel
    if (strcasecmp(name, "video/x-raw") == 0)
    {
        if (strcasecmp(format, "RGB") == 0)
        {
            depth = 3;
        }
        else if(strcasecmp(format, "GRAY8") == 0)
        {
            depth = 1;
        }
    }
#endif
    if (depth == 0)
    {
        close();
        return FrameSource::CLOSED;
    }

    vx_imagepatch_addressing_t decodedImageAddr;
    decodedImageAddr.dim_x = width;
    decodedImageAddr.dim_y = height;
    decodedImageAddr.stride_x = depth;
    // GStreamer uses as stride width rounded up to the nearest multiple of 4
    decodedImageAddr.stride_y = ((width*depth+3)/4)*4;
    decodedImageAddr.scale_x = 1;
    decodedImageAddr.scale_y = 1;
    vx_image decodedImage = NULL;
    vx_df_image_e vx_type_map[5] = { VX_DF_IMAGE_VIRT, VX_DF_IMAGE_U8,
                                     VX_DF_IMAGE_VIRT, VX_DF_IMAGE_RGB, VX_DF_IMAGE_RGBX };

    // fetch image width and height
    vx_uint32 actual_width, actual_height;
    vx_df_image_e actual_format;
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_WIDTH, (void *)&actual_width, sizeof(actual_width)) );
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_HEIGHT, (void *)&actual_height, sizeof(actual_height)) );
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&actual_format, sizeof(actual_format)) );
    bool needScale = width != (int)configuration.frameWidth || height != (int)configuration.frameHeight;

    // config and actual image sized must be the same!
    if ((actual_height != configuration.frameHeight) ||
            (actual_width != configuration.frameWidth) ||
            (actual_format != configuration.format))
    {
        close();

        NVXIO_THROW_EXCEPTION("Actual image [ " << actual_width << " x " << actual_height <<
                              " ] does not equal configuration one [ " << configuration.frameWidth
                              << " x " << configuration.frameHeight << " ]");
    }

    // we assume that decoced image will have no more than 3 channels per pixel
    if (!devMem)
    {
        NVXIO_ASSERT( cudaSuccess == cudaMallocPitch(&devMem, &devMemPitch, width * 3, height) );
    }

    // check if decoded image format has changed
    if (scaledImage)
    {
        vx_df_image_e scaled_format;
        NVXIO_SAFE_CALL( vxQueryImage(scaledImage, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&scaled_format, sizeof(scaled_format)) );

        if (scaled_format != vx_type_map[depth])
        {
            vxReleaseImage(&scaledImage);
            scaledImage = NULL;
        }
    }

    if (needScale && !scaledImage)
    {
        scaledImage = vxCreateImage(vxContext, configuration.frameWidth,
                                    configuration.frameHeight, vx_type_map[depth]);
        NVXIO_CHECK_REFERENCE( scaledImage );
    }

#if GST_VERSION_MAJOR == 0
    bool needConvert = configuration.format != VX_DF_IMAGE_RGB;
    void * decodedPtr = GST_BUFFER_DATA(buffer);
#else
    GstMapInfo info;

    gboolean success = gst_buffer_map(buffer, &info, (GstMapFlags)GST_MAP_READ);
    if (!success)
    {
        printf("GStreamer: unable to map buffer\n");
        close();
        return FrameSource::CLOSED;
    }

    bool needConvert = configuration.format != vx_type_map[depth];
    void * decodedPtr = info.data;
#endif

    if (!needConvert && !needScale)
    {
        decodedImage = vxCreateImageFromHandle(vxContext, vx_type_map[depth], &decodedImageAddr,
                                               &decodedPtr, VX_IMPORT_TYPE_HOST);
        NVXIO_CHECK_REFERENCE( decodedImage );
        NVXIO_SAFE_CALL( nvxuCopyImage(vxContext, decodedImage, image) );
    }
    else
    {
        // 1. upload decoced image to CUDA buffer
        NVXIO_ASSERT( cudaSuccess == cudaMemcpy2D(devMem, devMemPitch,
                                                  decodedPtr, decodedImageAddr.stride_y,
                                                  decodedImageAddr.dim_x * depth, decodedImageAddr.dim_y,
                                                  cudaMemcpyHostToDevice) );

        // 2. create vx_image wrapper for decoded buffer
        decodedImageAddr.stride_y = static_cast<vx_int32>(devMemPitch);
        decodedImage = vxCreateImageFromHandle(vxContext, vx_type_map[depth], &decodedImageAddr,
                                               &devMem, NVX_IMPORT_TYPE_CUDA);
        NVXIO_CHECK_REFERENCE( decodedImage );

        if (needScale)
        {
            // 3. scale image
            NVXIO_SAFE_CALL( vxuScaleImage(vxContext, decodedImage, scaledImage, VX_INTERPOLATION_TYPE_BILINEAR) );

            // 4. convert to dst image
            NVXIO_SAFE_CALL( vxuColorConvert(vxContext, scaledImage, image) );
        }
        else
        {
            // 3. convert to dst image
            NVXIO_SAFE_CALL( vxuColorConvert(vxContext, decodedImage, image) );
        }
    }

#if GST_VERSION_MAJOR != 0
    gst_buffer_unmap(buffer, &info);
#endif

    NVXIO_SAFE_CALL( vxReleaseImage(&decodedImage) );

    return FrameSource::OK;
}
Ejemplo n.º 18
0
//
// decode buffer
//
static IplImage *icvRetrieveFrame_GStreamer(CvCapture_GStreamer *cap)
{
	if(!cap->buffer)
		return 0;

//	printf("getting buffercaps\n");

	GstCaps* caps = gst_buffer_get_caps(cap->buffer);

	assert(gst_caps_get_size(caps) == 1);

	GstStructure* structure = gst_caps_get_structure(caps, 0);

	gint bpp, endianness, redmask, greenmask, bluemask;

	if(!gst_structure_get_int(structure, "bpp", &bpp) ||
	   !gst_structure_get_int(structure, "endianness", &endianness) ||
	   !gst_structure_get_int(structure, "red_mask", &redmask) ||
	   !gst_structure_get_int(structure, "green_mask", &greenmask) ||
	   !gst_structure_get_int(structure, "blue_mask", &bluemask)) {
		printf("missing essential information in buffer caps, %s\n", gst_caps_to_string(caps));
		return 0;
	}

	printf("buffer has %d bpp, endianness %d, rgb %x %x %x, %s\n", bpp, endianness, redmask, greenmask, bluemask, gst_caps_to_string(caps));

	if(!redmask || !greenmask || !bluemask)
		return 0;

	if(!cap->frame) {
		gint height, width;

		if(!gst_structure_get_int(structure, "width", &width) ||
		   !gst_structure_get_int(structure, "height", &height))
			return 0;

//		printf("creating frame %dx%d\n", width, height);

		cap->frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	}

	gst_caps_unref(caps);

	unsigned char *data = GST_BUFFER_DATA(cap->buffer);

	printf("generating shifts\n");

	IplImage *frame = cap->frame;
	unsigned nbyte = bpp >> 3;
	unsigned redshift, blueshift, greenshift;
	unsigned mask = redmask;
	for(redshift = 0, mask = redmask; (mask & 1) == 0; mask >>= 1, redshift++)
		;
	for(greenshift = 0, mask = greenmask; (mask & 1) == 0; mask >>= 1, greenshift++)
		;
	for(blueshift = 0, mask = bluemask; (mask & 1) == 0; mask >>= 1, blueshift++)
		;

	printf("shifts: %u %u %u\n", redshift, greenshift, blueshift);

	for(int r = 0; r < frame->height; r++) {
		for(int c = 0; c < frame->width; c++, data += nbyte) {
			int at = r * frame->widthStep + c * 3;
			frame->imageData[at] = ((*((gint *)data)) & redmask) >> redshift;
			frame->imageData[at+1] = ((*((gint *)data)) & greenmask) >> greenshift;
			frame->imageData[at+2] = ((*((gint *)data)) & bluemask) >> blueshift;
		}
	}

//	printf("converted buffer\n");

	gst_buffer_unref(cap->buffer);
	cap->buffer = 0;

	return cap->frame;
}
void GStreamerWrapper::newAudioSinkPrerollCallback( GstBuffer* audioSinkBuffer )
{
	std::lock_guard< std::mutex > lock( m_AudioMutex );

	if ( m_cAudioBuffer == NULL )
	{
		m_iAudioBufferSize = GST_BUFFER_SIZE( audioSinkBuffer );
		m_cAudioBuffer = new unsigned char[m_iAudioBufferSize];

		////////////////////////////////////////////////////////////////////////// AUDIO DATA

		/*
			Note: For some reason, with this version of GStreamer the only way to retrieve the audio metadata
			is to read the caps from the audio appsink buffer and via a GstStructure we can retrieve the needed
			values from the caps. After lots of research I also found another possibility by using GstAudioInfo
			but this struct is not available in this version.

			If a later version of GStreamer is ever compiled in a valid way so it can be used with Visual Studio
			it would definitely be a good idea to retrieve the audio information somewhere else in the code.
			But this piece of code does it well for now.
		*/

		// Get Audio metadata
		// http://gstreamer.freedesktop.org/data/doc/gstreamer/head/pwg/html/section-types-definitions.html
		GstCaps* audioCaps = gst_buffer_get_caps( audioSinkBuffer );
		GstStructure* gstStructure = gst_caps_get_structure( audioCaps, 0 );

		// Is audio data signed or not?
		gboolean isAudioSigned;
		gst_structure_get_boolean( gstStructure, "signed", &isAudioSigned );
		m_bIsAudioSigned = isAudioSigned;

		// Number of channels
		gst_structure_get_int( gstStructure, "channels", &m_iNumAudioChannels );
		// Audio sample rate
		gst_structure_get_int( gstStructure, "rate", &m_iAudioSampleRate );
		// Audio width
		gst_structure_get_int( gstStructure, "width", &m_iAudioWidth );

		// Calculate the audio buffer size without the number of channels and audio width
		m_iAudioDecodeBufferSize = m_iAudioBufferSize / m_iNumAudioChannels / ( m_iAudioWidth / 8 );

		// Audio endianness
		gint audioEndianness;
		gst_structure_get_int( gstStructure, "endianness",  &audioEndianness );
		m_AudioEndianness = (Endianness)audioEndianness;

		gst_caps_unref( audioCaps );
	}
	else
	{
		// The Audio Buffer size may change during runtime so we keep track if the buffer changes
		// If so, delete the old buffer and re-allocate it with the respective new buffer size
		int bufferSize = GST_BUFFER_SIZE( audioSinkBuffer );
		if ( m_iAudioBufferSize != bufferSize )
		{
			// Allocate the audio data array according to the audio appsink buffer size
			m_iAudioBufferSize = bufferSize;
			delete [] m_cAudioBuffer;
			m_cAudioBuffer = NULL;

			m_cAudioBuffer = new unsigned char[m_iAudioBufferSize];
		}
	}

	// Copy the audio appsink buffer data to our unsigned char array
	memcpy( (unsigned char *)m_cAudioBuffer, (unsigned char *)GST_BUFFER_DATA( audioSinkBuffer ), GST_BUFFER_SIZE( audioSinkBuffer ) );
}
Ejemplo n.º 20
0
/*!
 * \brief CvCapture_GStreamer::retrieveFrame
 * \return IplImage pointer. [Transfer Full]
 *  Retreive the previously grabbed buffer, and wrap it in an IPLImage structure
 */
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
    if(!buffer)
        return 0;

    //construct a frame header if we did not have any yet
    if(!frame)
    {
        gint height, width;

        //reuse the caps ptr
        if (buffer_caps)
            gst_caps_unref(buffer_caps);

#if GST_VERSION_MAJOR == 0
        buffer_caps = gst_buffer_get_caps(buffer);
#else
        buffer_caps = gst_sample_get_caps(sample);
#endif
        // bail out in no caps
        assert(gst_caps_get_size(buffer_caps) == 1);
        GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);

        // bail out if width or height are 0
        if(!gst_structure_get_int(structure, "width", &width) ||
                !gst_structure_get_int(structure, "height", &height))
        {
            return 0;
        }


        int depth = 3;
#if GST_VERSION_MAJOR > 0
        depth = 0;
        const gchar* name = gst_structure_get_name(structure);
        const gchar* format = gst_structure_get_string(structure, "format");

        if (!name || !format)
            return 0;

        // we support 3 types of data:
        //     video/x-raw, format=BGR   -> 8bit, 3 channels
        //     video/x-raw, format=GRAY8 -> 8bit, 1 channel
        //     video/x-bayer             -> 8bit, 1 channel
        // bayer data is never decoded, the user is responsible for that
        // everything is 8 bit, so we just test the caps for bit depth

        if (strcasecmp(name, "video/x-raw") == 0)
        {
            if (strcasecmp(format, "BGR") == 0) {
                depth = 3;
            }
            else if(strcasecmp(format, "GRAY8") == 0){
                depth = 1;
            }
        }
        else if (strcasecmp(name, "video/x-bayer") == 0)
        {
            depth = 1;
        }
#endif
        if (depth > 0) {
            frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, depth);
        }else{
            return 0;
        }
    }

    // gstreamer expects us to handle the memory at this point
    // so we can just wrap the raw buffer and be done with it
#if GST_VERSION_MAJOR == 0
    frame->imageData = (char *)GST_BUFFER_DATA(buffer);
#else
    // the data ptr in GstMapInfo is only valid throughout the mapifo objects life.
    // TODO: check if reusing the mapinfo object is ok.

    gboolean success = gst_buffer_map(buffer,info, (GstMapFlags)GST_MAP_READ);
    if (!success){
        //something weird went wrong here. abort. abort.
        //fprintf(stderr,"GStreamer: unable to map buffer");
        return 0;
    }
    frame->imageData = (char*)info->data;
    gst_buffer_unmap(buffer,info);
#endif

    return frame;
}