GstFlowReturn frame_handler(GstSample * sample, GStreamerFramesReceiver * pClass)
{
	GstBuffer * buffer = gst_sample_get_buffer(sample);

	GstMapInfo info;
	gst_buffer_map(buffer, &info, GST_MAP_READ);

	if (pClass)
	{
		if (pClass -> InputFrameWidth() == 0)
		{
			int width, height;
			PixelFormat pixelFormat;
			GstCaps *caps = gst_sample_get_caps(sample);
			ExtractImageParams(caps, width, height, pixelFormat);
			pClass -> InputFrameWidth()  = width;
			pClass -> InputFrameHeight() = height;
			pClass -> InputPixelFormat() = pixelFormat;
		}
		
		pClass -> CopyFrameData(info.data, info.size);
	}

	gst_buffer_unmap (buffer, &info);

	return GST_FLOW_OK;
}
static GstSample *
totem_gst_tag_list_get_cover_real (GstTagList *tag_list)
{
  GstSample *cover_sample = NULL;
  guint i;

  for (i = 0; ; i++) {
    GstSample *sample;
    GstCaps *caps;
    const GstStructure *caps_struct;
    int type;

    if (!gst_tag_list_get_sample_index (tag_list, GST_TAG_IMAGE, i, &sample))
      break;

    caps = gst_sample_get_caps (sample);
    caps_struct = gst_caps_get_structure (caps, 0);
    gst_structure_get_enum (caps_struct,
			    "image-type",
			    GST_TYPE_TAG_IMAGE_TYPE,
			    &type);
    if (type == GST_TAG_IMAGE_TYPE_UNDEFINED) {
      if (cover_sample == NULL) {
	/* take a ref here since we will continue and unref below */
	cover_sample = gst_sample_ref (sample);
      }
    } else if (type == GST_TAG_IMAGE_TYPE_FRONT_COVER) {
      cover_sample = sample;
      break;
    }
    gst_sample_unref (sample);
  }

  return cover_sample;
}
Esempio n. 3
0
bool MediaImpl::_videoPull()
{
//  qDebug() << "video pull" << endl;

  GstSample *sample = NULL;
  GstStructure *structure = NULL;
  GstCaps* caps = NULL;
  GstBuffer *buffer = NULL;

  // Retrieve the sample
  sample = queue_input_buf.get();

  if (sample == NULL)
  {
    // Either means we are not playing or we have reached EOS.
    return false;
  }
  else
  {
    caps = gst_sample_get_caps(sample);
    structure = gst_caps_get_structure(caps, 0);
    buffer = gst_sample_get_buffer(sample);

    int width  = 640;
    int height = 480;
    int bpp    = 32;
    int depth  = 32;

    gst_structure_get_int(structure, "width",  &width);
    gst_structure_get_int(structure, "height", &height);
    // TODO: use gst_video_info_from_caps if we want to support many different formats
    // otherwise, since we set the caps ourselves, we can assume bpp is 32 and depth too.

    _width = width;
    _height = height;
    int size = _width * _height;

//    video->resize(width, height);

//        qDebug() << gst_structure_to_string(capsStruct) << endl;
//        qDebug() << width << "x" << height << "=" << width*height << "(" << width*height*4 << "," << width*height*3 << ")" << endl;
//        qDebug() << "bpp: " << bpp << " depth: " << depth << endl;
//        qDebug() << "Buffer size: " << GST_BUFFER_SIZE(buffer) << endl;

    GstMapInfo map; 
    if (gst_buffer_map(buffer, &map, GST_MAP_READ))
    { 
      // For debugging:
      //gst_util_dump_mem(map.data, map.size)
      _data = map.data;
      gst_buffer_unmap(buffer, &map); 
      if(this->_frame != NULL)
        queue_output_buf.put(this->_frame);
      _frame = sample;
    } 

    return true;
  }
}
Esempio n. 4
0
/*!
  Returns audio format for a sample.
  If the buffer doesn't have a valid audio format, an empty QAudioFormat is returned.
*/
QAudioFormat QGstUtils::audioFormatForSample(GstSample *sample)
{
    GstCaps* caps = gst_sample_get_caps(sample);
    if (!caps)
        return QAudioFormat();

    return QGstUtils::audioFormatForCaps(caps);
}
Esempio n. 5
0
/*
  This function will be called in a separate thread when our appsink
  says there is data for us. user_data has to be defined
  when calling g_signal_connect. It can be used to pass objects etc.
  from your other function to the callback.
*/
GstFlowReturn callback (GstElement* sink, void* user_data)
{
    GstSample* sample = NULL;
    /* Retrieve the buffer */
    g_signal_emit_by_name(sink, "pull-sample", &sample, NULL);

    if (sample)
    {
        // we have a valid sample
        // do things with the image here
        static guint framecount = 0;
        int pixel_data = -1;

        GstBuffer* buffer = gst_sample_get_buffer(sample);
        GstMapInfo info; // contains the actual image
        if (gst_buffer_map(buffer, &info, GST_MAP_READ))
        {
            GstVideoInfo* video_info = gst_video_info_new();
            if (!gst_video_info_from_caps(video_info, gst_sample_get_caps(sample)))
            {
                // Could not parse video info (should not happen)
                g_warning("Failed to parse video info");
                return GST_FLOW_ERROR;
            }

            /* Get a pointer to the image data */
            unsigned char* data = info.data;

            /* Get the pixel value of the center pixel */
            int stride = video_info->finfo->bits / 8;
            unsigned int pixel_offset = video_info->width / 2 * stride +
                video_info->width * video_info->height / 2 * stride;

            // this is only one pixel
            // when dealing with formats like BGRx
            // pixel_data will have to consist out of
            // pixel_offset   => B
            // pixel_offset+1 => G
            // pixel_offset+2 => R
            // pixel_offset+3 => x
            pixel_data = info.data[pixel_offset];

            gst_buffer_unmap(buffer, &info);
            gst_video_info_free(video_info);
        }

        GstClockTime timestamp = GST_BUFFER_PTS(buffer);
        g_print("Captured frame %d, Pixel Value=%03d Timestamp=%" GST_TIME_FORMAT "            \r",
                framecount, pixel_data,
                GST_TIME_ARGS(timestamp));
        framecount++;


        // delete our reference so that gstreamer can handle the sample
        gst_sample_unref (sample);
    }
    return GST_FLOW_OK;
}
Esempio n. 6
0
static GdkPixbuf *
gst_thumbnailer_cover_from_tags (GstTagList   *tags,
                                 GCancellable *cancellable)
{
  GstSample          *cover = NULL;
  guint               i;
  GstSample          *sample;
  GstCaps            *caps;
  const GstStructure *caps_struct;
  gint                type;
  GstBuffer          *buffer;
  GdkPixbuf          *pixbuf = NULL;

  for (i = 0; ; i++)
    {
      if (g_cancellable_is_cancelled (cancellable))
        break;

      /* look for image in the tags */
      if (!gst_tag_list_get_sample_index (tags, GST_TAG_IMAGE, i, &sample))
        break;

      caps = gst_sample_get_caps (sample);
      caps_struct = gst_caps_get_structure (caps, 0);
      gst_structure_get_enum (caps_struct,
                              "image-type",
                              GST_TYPE_TAG_IMAGE_TYPE,
                              &type);

      if (type == GST_TAG_IMAGE_TYPE_FRONT_COVER)
        {
          /* found the cover */
          cover = sample;
          break;
        }

      gst_sample_unref (sample);
    }

  if (cover == NULL
      && !g_cancellable_is_cancelled (cancellable))
    {
      /* look for preview image */
      gst_tag_list_get_sample_index (tags, GST_TAG_PREVIEW_IMAGE, 0, &cover);
    }

  if (cover != NULL)
    {
      /* create image */
      buffer = gst_sample_get_buffer (cover);
      pixbuf = gst_thumbnailer_buffer_to_pixbuf (buffer);
      gst_sample_unref (cover);
    }

  return pixbuf;
}
Esempio n. 7
0
static GstVideoInfo getVideoInfo(GstSample * sample){
    GstCaps *caps = gst_sample_get_caps(sample);
    GstVideoInfo vinfo;
    if(caps){
		gst_video_info_from_caps (&vinfo, caps);
    }else{
    	ofLogError() << "couldn't get sample caps";
    }
    return vinfo;
}
Esempio n. 8
0
/* main context */
static gboolean display_frame(gpointer video_decoder)
{
    SpiceGstDecoder *decoder = (SpiceGstDecoder*)video_decoder;
    SpiceGstFrame *gstframe;
    GstCaps *caps;
    gint width, height;
    GstStructure *s;
    GstBuffer *buffer;
    GstMapInfo mapinfo;

    g_mutex_lock(&decoder->queues_mutex);
    decoder->timer_id = 0;
    gstframe = g_queue_pop_head(decoder->display_queue);
    g_mutex_unlock(&decoder->queues_mutex);
    /* If the queue is empty we don't even need to reschedule */
    g_return_val_if_fail(gstframe, G_SOURCE_REMOVE);

    if (!gstframe->sample) {
        spice_warning("got a frame without a sample!");
        goto error;
    }

    caps = gst_sample_get_caps(gstframe->sample);
    if (!caps) {
        spice_warning("GStreamer error: could not get the caps of the sample");
        goto error;
    }

    s = gst_caps_get_structure(caps, 0);
    if (!gst_structure_get_int(s, "width", &width) ||
        !gst_structure_get_int(s, "height", &height)) {
        spice_warning("GStreamer error: could not get the size of the frame");
        goto error;
    }

    buffer = gst_sample_get_buffer(gstframe->sample);
    if (!gst_buffer_map(buffer, &mapinfo, GST_MAP_READ)) {
        spice_warning("GStreamer error: could not map the buffer");
        goto error;
    }

    stream_display_frame(decoder->base.stream, gstframe->frame,
                         width, height, mapinfo.data);
    gst_buffer_unmap(buffer, &mapinfo);

 error:
    free_gst_frame(gstframe);
    schedule_frame(decoder);
    return G_SOURCE_REMOVE;
}
GstFlowReturn GStreamerImageStream::on_new_preroll(GstAppSink *appsink, GStreamerImageStream *user_data)
{
    // get the sample from appsink

    GstSample *sample = gst_app_sink_pull_preroll(appsink);

    // get sample info

    GstCaps *caps = gst_sample_get_caps(sample);
    GstStructure *structure = gst_caps_get_structure(caps, 0);

    int width;
    int height;

    gst_structure_get_int(structure, "width", &width);
    gst_structure_get_int(structure, "height", &height);

    if (width<=0 || height<=0)
    {
        OSG_NOTICE<<"Error: video size invalid width="<<width<<", height="<<height<<std::endl;
        return GST_FLOW_ERROR;
    }

    if (user_data->_width != width || user_data->_height != height)
    {
        user_data->_width = width;
        user_data->_height = height;


        int row_width = width*3;
        if ((row_width%4)!=0)
        {
            row_width += (4-(row_width%4));
        }

        // if buffer previously assigned free it before allocating new buffer.
        if (user_data->_internal_buffer) free(user_data->_internal_buffer);

        // allocate buffer
        user_data->_internal_buffer = (unsigned char*)malloc(sizeof(unsigned char)*row_width*height);

        // assign buffer to image
        user_data->setImage(user_data->_width, user_data->_height, 1, GL_RGB, GL_RGB, GL_UNSIGNED_BYTE, user_data->_internal_buffer, osg::Image::NO_DELETE, 4);
    }

    // clean resources
    gst_sample_unref(sample);

    return GST_FLOW_OK;
}
Esempio n. 10
0
void MediaPlayer::drawVideoFrame(QPainter &p, const QRect &rect)
{
    QMutexLocker m( &m_lastVideoSampleMutex );

    if ( !m_lastVideoSample )
        return;

    // get the snapshot buffer format now. We set the caps on the appsink so
    // that it can only be an rgb buffer.
    GstCaps *caps = gst_sample_get_caps( m_lastVideoSample );

    if ( !caps )
    {
        reportError( "could not get caps for the new video sample" );
        return;
    }

    GstStructure * structure = gst_caps_get_structure( caps, 0 );

    // We need to get the final caps on the buffer to get the size
    int width = 0;
    int height = 0;

    gst_structure_get_int( structure, "width", &width );
    gst_structure_get_int( structure, "height", &height );

    if ( !width || !height )
    {
        reportError( "could not get video height and width" );
        return;
    }

    // Create pixmap from buffer and save, gstreamer video buffers have a stride that
    // is rounded up to the nearest multiple of 4
    GstBuffer *buffer = gst_sample_get_buffer( m_lastVideoSample );
    GstMapInfo map;

    if ( !gst_buffer_map( buffer, &map, GST_MAP_READ ) )
    {
        reportError( "could not map video buffer" );
        return;
    }

    p.drawImage( rect, QImage( map.data, width, height, GST_ROUND_UP_4 (width * 4), QImage::Format_RGB32 ), QRect( 0, 0, width, height ) );

    // And clean up
    gst_buffer_unmap( buffer, &map );
}
Handle<Value> gvalue_to_v8(const GValue *gv) {
	switch(G_VALUE_TYPE(gv)) {
		case G_TYPE_STRING:
			return gchararray_to_v8(gv);
		case G_TYPE_BOOLEAN:
			return Nan::New<Boolean>(g_value_get_boolean(gv));
		case G_TYPE_INT:
			return Nan::New<Number>(g_value_get_int(gv));
		case G_TYPE_UINT:
			return Nan::New<Number>(g_value_get_uint(gv));
		case G_TYPE_FLOAT:
			return Nan::New<Number>(g_value_get_float(gv));
		case G_TYPE_DOUBLE:
			return Nan::New<Number>(g_value_get_double(gv));
	}

	if(GST_VALUE_HOLDS_ARRAY(gv)) {
		return gstvaluearray_to_v8(gv);
	} else if(GST_VALUE_HOLDS_BUFFER(gv)) {
		GstBuffer *buf = gst_value_get_buffer(gv);
		return gstbuffer_to_v8(buf);
	} else if(GST_VALUE_HOLDS_SAMPLE(gv)) {
		GstSample *sample = gst_value_get_sample(gv);
		Local<Object> caps = Nan::New<Object>();
		GstCaps *gcaps = gst_sample_get_caps(sample);
		if (gcaps) {
			const GstStructure *structure = gst_caps_get_structure(gcaps,0);
			if (structure) gst_structure_to_v8(caps, structure);
		}

		Local<Object> result = Nan::New<Object>();
		result->Set(Nan::New("buf").ToLocalChecked(), gstsample_to_v8(sample));
		result->Set(Nan::New("caps").ToLocalChecked(), caps);
		return result;
	}

	//printf("Value is of unhandled type %s\n", G_VALUE_TYPE_NAME(gv));

	/* Attempt to transform it into a GValue of type STRING */
	if(g_value_type_transformable (G_VALUE_TYPE(gv), G_TYPE_STRING)) {
		GValue b = G_VALUE_INIT;
		g_value_init(&b, G_TYPE_STRING);
		g_value_transform(gv, &b);
		return gchararray_to_v8(&b);
	}

	return Nan::Undefined();
}
Esempio n. 12
0
GstFlowReturn MediaImpl::gstNewSampleCallback(GstElement*, MediaImpl *p)
{
    // Make it thread-safe.
    p->lockMutex();

    // Get next frame.
    GstSample *sample = gst_app_sink_pull_sample(GST_APP_SINK(p->_appsink0));

    // Unref last frame.
    p->_freeCurrentSample();

    // Set current frame.
    p->_currentFrameSample = sample;

    // For live sources, video dimensions have not been set, because
    // gstPadAddedCallback is never called. Fix dimensions from first sample /
    // caps we receive.
    if (p->_isSharedMemorySource && ( p->_padHandlerData.width == -1 ||
                                      p->_padHandlerData.height == -1)) {
        GstCaps *caps = gst_sample_get_caps(sample);
        GstStructure *structure;
        structure = gst_caps_get_structure(caps, 0);
        gst_structure_get_int(structure, "width",  &p->_padHandlerData.width);
        gst_structure_get_int(structure, "height", &p->_padHandlerData.height);
        // g_print("Size is %u x %u\n", _padHandlerData.width, _padHandlerData.height);
    }

    // Try to retrieve data bits of frame.
    GstMapInfo& map = p->_mapInfo;
    GstBuffer *buffer = gst_sample_get_buffer( sample );
    if (gst_buffer_map(buffer, &map, GST_MAP_READ))
    {
        p->_currentFrameBuffer = buffer;
        // For debugging:
        //gst_util_dump_mem(map.data, map.size)

        // Retrieve data from map info.
        p->_data = map.data;

        // Bits have changed.
        p->_bitsChanged = true;
    }

    p->unlockMutex();

    return GST_FLOW_OK;
}
PassRefPtr<BitmapTexture> MediaPlayerPrivateGStreamerBase::updateTexture(TextureMapper* textureMapper)
{
    WTF::GMutexLocker<GMutex> lock(m_sampleMutex);
    if (!GST_IS_SAMPLE(m_sample.get()))
        return nullptr;

    GstCaps* caps = gst_sample_get_caps(m_sample.get());
    if (!caps)
        return nullptr;

    GstVideoInfo videoInfo;
    gst_video_info_init(&videoInfo);
    if (!gst_video_info_from_caps(&videoInfo, caps))
        return nullptr;

    IntSize size = IntSize(GST_VIDEO_INFO_WIDTH(&videoInfo), GST_VIDEO_INFO_HEIGHT(&videoInfo));
    RefPtr<BitmapTexture> texture = textureMapper->acquireTextureFromPool(size, GST_VIDEO_INFO_HAS_ALPHA(&videoInfo) ? BitmapTexture::SupportsAlpha : BitmapTexture::NoFlag);
    GstBuffer* buffer = gst_sample_get_buffer(m_sample.get());

#if GST_CHECK_VERSION(1, 1, 0)
    GstVideoGLTextureUploadMeta* meta;
    if ((meta = gst_buffer_get_video_gl_texture_upload_meta(buffer))) {
        if (meta->n_textures == 1) { // BRGx & BGRA formats use only one texture.
            const BitmapTextureGL* textureGL = static_cast<const BitmapTextureGL*>(texture.get());
            guint ids[4] = { textureGL->id(), 0, 0, 0 };

            if (gst_video_gl_texture_upload_meta_upload(meta, ids))
                return texture;
        }
    }
#endif

    // Right now the TextureMapper only supports chromas with one plane
    ASSERT(GST_VIDEO_INFO_N_PLANES(&videoInfo) == 1);

    GstVideoFrame videoFrame;
    if (!gst_video_frame_map(&videoFrame, &videoInfo, buffer, GST_MAP_READ))
        return nullptr;

    int stride = GST_VIDEO_FRAME_PLANE_STRIDE(&videoFrame, 0);
    const void* srcData = GST_VIDEO_FRAME_PLANE_DATA(&videoFrame, 0);
    texture->updateContents(srcData, WebCore::IntRect(WebCore::IntPoint(0, 0), size), WebCore::IntPoint(0, 0), stride, BitmapTexture::UpdateCannotModifyOriginalImageData);
    gst_video_frame_unmap(&videoFrame);

    return texture;
}
Esempio n. 14
0
/*
 * Write camera frame to Objection Detection process
 */
GstFlowReturn VideoSender::newBufferOBCB(GstAppSink *sink, gpointer user_data)
{
  qDebug() << "In" << __FUNCTION__;

  VideoSender *vs = static_cast<VideoSender *>(user_data);

  // Get new video sample
  GstSample *sample = gst_app_sink_pull_sample(sink);
  if (sample == NULL) {
    qWarning("%s: Failed to get new sample", __FUNCTION__);
    return GST_FLOW_OK;
  }

  if (!vs->ODprocessReady) {
    qDebug() << "ODprocess not ready yet, not sending frame";
    gst_sample_unref(sample);
    return GST_FLOW_OK;
  }

  GstCaps *caps = gst_sample_get_caps(sample);
  if (caps == NULL) {
    qWarning("%s: Failed to get caps of the sample", __FUNCTION__);
    gst_sample_unref(sample);
    return GST_FLOW_OK;
  }

  gint width, height;
  GstStructure *gststruct = gst_caps_get_structure(caps, 0);
  gst_structure_get_int(gststruct,"width", &width);
  gst_structure_get_int(gststruct,"height", &height);

  GstBuffer *buffer = gst_sample_get_buffer(sample);
  GstMapInfo map;
  if (gst_buffer_map(buffer, &map, GST_MAP_READ)) {

    vs->ODdata[OB_VIDEO_PARAM_WD3] = width >> 3;
    vs->ODdata[OB_VIDEO_PARAM_HD3] = height >> 3;
    vs->ODdata[OB_VIDEO_PARAM_BPP] = map.size * 8 / (width * height);

    if (vs->ODprocess) {
      vs->ODprocessReady = false;
      vs->ODprocess->write((const char *)vs->ODdata, sizeof(vs->ODdata));
      vs->ODprocess->write((const char *)map.data, map.size);
    }
    gst_buffer_unmap(buffer, &map);
  } else {
Esempio n. 15
0
static void
check_unsync_v24 (const GstTagList * tags, const gchar * file)
{
  const GValue *val;
  GstSample *sample;
  GstBuffer *buf;
  gchar *album = NULL;
  gchar *title = NULL;
  gchar *artist = NULL;
  GstMapInfo map;

  fail_unless (gst_tag_list_get_string (tags, GST_TAG_TITLE, &title));
  fail_unless (title != NULL);
  fail_unless_equals_string (title, "Starlight");
  g_free (title);

  fail_unless (gst_tag_list_get_string (tags, GST_TAG_ALBUM, &album));
  fail_unless (album != NULL);
  fail_unless_equals_string (album, "L'albumRockVol.4 CD1");
  g_free (album);

  fail_unless (gst_tag_list_get_string (tags, GST_TAG_ARTIST, &artist));
  fail_unless (artist != NULL);
  fail_unless_equals_string (artist, "Muse");
  g_free (artist);

  val = gst_tag_list_get_value_index (tags, GST_TAG_IMAGE, 0);
  fail_unless (val != NULL);
  fail_unless (GST_VALUE_HOLDS_SAMPLE (val));
  sample = gst_value_get_sample (val);
  fail_unless (sample != NULL);
  fail_unless (gst_sample_get_caps (sample) != NULL);
  buf = gst_sample_get_buffer (sample);
  fail_unless (buf != NULL);
  gst_buffer_map (buf, &map, GST_MAP_READ);
  fail_unless_equals_int (map.size, 38022);
  /* check for jpeg start/end markers */
  fail_unless_equals_int (map.data[0], 0xff);
  fail_unless_equals_int (map.data[1], 0xd8);
  fail_unless_equals_int (map.data[38020], 0xff);
  fail_unless_equals_int (map.data[38021], 0xd9);
  gst_buffer_unmap (buf, &map);
}
Esempio n. 16
0
// FIXME: Use gst_app_src_push_sample() instead when we switch to the appropriate GStreamer version.
static GstFlowReturn pushSample(GstAppSrc* appsrc, GstSample* sample)
{
    g_return_val_if_fail(GST_IS_SAMPLE(sample), GST_FLOW_ERROR);

    GstCaps* caps = gst_sample_get_caps(sample);
    if (caps)
        gst_app_src_set_caps(appsrc, caps);
    else
        GST_WARNING_OBJECT(appsrc, "received sample without caps");

    GstBuffer* buffer = gst_sample_get_buffer(sample);
    if (UNLIKELY(!buffer)) {
        GST_WARNING_OBJECT(appsrc, "received sample without buffer");
        return GST_FLOW_OK;
    }

    // gst_app_src_push_buffer() steals the reference, we need an additional one.
    return gst_app_src_push_buffer(appsrc, gst_buffer_ref(buffer));
}
static void
print_tag_foreach (const GstTagList * tags, const gchar * tag,
    gpointer user_data)
{
  GValue val = { 0, };
  gchar *str;
  guint depth = GPOINTER_TO_UINT (user_data);

  if (!gst_tag_list_copy_value (&val, tags, tag))
    return;

  if (G_VALUE_HOLDS_STRING (&val)) {
    str = g_value_dup_string (&val);
  } else if (G_VALUE_TYPE (&val) == GST_TYPE_SAMPLE) {
    GstSample *sample = gst_value_get_sample (&val);
    GstBuffer *img = gst_sample_get_buffer (sample);
    GstCaps *caps = gst_sample_get_caps (sample);

    if (img) {
      if (caps) {
        gchar *caps_str;

        caps_str = gst_caps_to_string (caps);
        str = g_strdup_printf ("buffer of %" G_GSIZE_FORMAT " bytes, "
            "type: %s", gst_buffer_get_size (img), caps_str);
        g_free (caps_str);
      } else {
        str = g_strdup_printf ("buffer of %" G_GSIZE_FORMAT " bytes",
            gst_buffer_get_size (img));
      }
    } else {
      str = g_strdup ("NULL buffer");
    }
  } else {
    str = gst_value_serialize (&val);
  }

  g_print ("%*s%s: %s\n", 2 * depth, " ", gst_tag_get_nick (tag), str);
  g_free (str);

  g_value_unset (&val);
}
Esempio n. 18
0
/*!
 * \brief OpenIMAJCapGStreamer::getImage
 * \return pointer to image bytes
 *  Retreive the previously grabbed buffer and return it
 */
unsigned char* OpenIMAJCapGStreamer::getImage()
{
    if(!buffer)
        return 0;
    
    //construct a frame header if we did not have any yet
    if(!frame)
    {
        gint height, width;
        
        //reuse the caps ptr
        if (buffer_caps)
            gst_caps_unref(buffer_caps);
        
        buffer_caps = gst_sample_get_caps(sample);
    
        // bail out in no caps
        GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);
        
        // bail out if width or height are 0
        if(!gst_structure_get_int(structure, "width", &width) ||
           !gst_structure_get_int(structure, "height", &height))
        {
            return 0;
        }
    }
    
    // gstreamer expects us to handle the memory at this point
    // so we can just wrap the raw buffer and be done with it
    // the data ptr in GstMapInfo is only valid throughout the mapifo objects life.
    // TODO: check if reusing the mapinfo object is ok.
    gboolean success = gst_buffer_map(buffer,info, (GstMapFlags)GST_MAP_READ);
    if (!success) {
        //something weird went wrong here. abort. abort.
        //fprintf(stderr,"GStreamer: unable to map buffer");
        return 0;
    }
    frame = (unsigned char*)info->data;
    gst_buffer_unmap(buffer,info);
    
    return frame;
}
GstFlowReturn AudioSourceProviderGStreamer::handleAudioBuffer(GstAppSink* sink)
{
    if (!m_client)
        return GST_FLOW_OK;

    // Pull a buffer from appsink and store it the appropriate buffer
    // list for the audio channel it represents.
    GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink));
    if (!sample)
        return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR;

    GstBuffer* buffer = gst_sample_get_buffer(sample.get());
    if (!buffer)
        return GST_FLOW_ERROR;

    GstCaps* caps = gst_sample_get_caps(sample.get());
    if (!caps)
        return GST_FLOW_ERROR;

    GstAudioInfo info;
    gst_audio_info_from_caps(&info, caps);

    WTF::GMutexLocker<GMutex> lock(m_adapterMutex);

    // Check the first audio channel. The buffer is supposed to store
    // data of a single channel anyway.
    switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
    case GST_AUDIO_CHANNEL_POSITION_MONO:
        gst_adapter_push(m_frontLeftAdapter, gst_buffer_ref(buffer));
        break;
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        gst_adapter_push(m_frontRightAdapter, gst_buffer_ref(buffer));
        break;
    default:
        break;
    }

    return GST_FLOW_OK;
}
void MediaPlayerPrivateGStreamerBase::paintToTextureMapper(TextureMapper* textureMapper, const FloatRect& targetRect, const TransformationMatrix& matrix, float opacity)
{
    if (!m_player->visible())
        return;

    if (m_usingFallbackVideoSink) {
        if (RefPtr<BitmapTexture> texture = updateTexture(textureMapper))
            textureMapper->drawTexture(*texture.get(), targetRect, matrix, opacity);
        return;
    }

#if USE(GSTREAMER_GL)
    if (!GST_IS_SAMPLE(m_sample.get()))
        return;

    GstCaps* caps = gst_sample_get_caps(m_sample.get());
    if (!caps)
        return;

    GstVideoInfo videoInfo;
    gst_video_info_init(&videoInfo);
    if (!gst_video_info_from_caps(&videoInfo, caps))
        return;

    GstBuffer* buffer = gst_sample_get_buffer(m_sample.get());
    GstVideoFrame videoFrame;
    if (!gst_video_frame_map(&videoFrame, &videoInfo, buffer, static_cast<GstMapFlags>(GST_MAP_READ | GST_MAP_GL)))
        return;

    unsigned textureID = *reinterpret_cast<unsigned*>(videoFrame.data[0]);
    BitmapTexture::Flags flags = BitmapTexture::NoFlag;
    if (GST_VIDEO_INFO_HAS_ALPHA(&videoInfo))
        flags |= BitmapTexture::SupportsAlpha;

    IntSize size = IntSize(GST_VIDEO_INFO_WIDTH(&videoInfo), GST_VIDEO_INFO_HEIGHT(&videoInfo));
    TextureMapperGL* textureMapperGL = reinterpret_cast<TextureMapperGL*>(textureMapper);
    textureMapperGL->drawTexture(textureID, flags, size, targetRect, matrix, opacity);
    gst_video_frame_unmap(&videoFrame);
#endif
}
Esempio n. 21
0
GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink)
{
    GstSample* sample = gst_app_sink_pull_sample(sink);
    if (!sample)
        return GST_FLOW_ERROR;

    GstBuffer* buffer = gst_sample_get_buffer(sample);
    if (!buffer) {
        gst_sample_unref(sample);
        return GST_FLOW_ERROR;
    }

    GstCaps* caps = gst_sample_get_caps(sample);
    if (!caps) {
        gst_sample_unref(sample);
        return GST_FLOW_ERROR;
    }

    GstAudioInfo info;
    gst_audio_info_from_caps(&info, caps);
    int frames = gst_buffer_get_size(buffer) / info.bpf;

    // Check the first audio channel. The buffer is supposed to store
    // data of a single channel anyway.
    switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
    case GST_AUDIO_CHANNEL_POSITION_MONO:
        gst_buffer_list_add(m_frontLeftBuffers, gst_buffer_ref(buffer));
        m_channelSize += frames;
        break;
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        gst_buffer_list_add(m_frontRightBuffers, gst_buffer_ref(buffer));
        break;
    default:
        break;
    }

    gst_sample_unref(sample);
    return GST_FLOW_OK;
}
Esempio n. 22
0
int OpenIMAJCapGStreamer::getBands() {
    if(!buffer)
        return 0;

    if (!buffer_caps)
        buffer_caps = gst_sample_get_caps(sample);
    
    
    GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);
    const gchar* name = gst_structure_get_name(structure);
    const gchar* format = gst_structure_get_string(structure, "format");
    
    if (strcasecmp(name, "video/x-raw") == 0) {
        if (strcasecmp(format, "BGR") == 0) {
            return 3;
        } else if(strcasecmp(format, "GRAY8") == 0) {
            return 1;
        } else if (strcasecmp(name, "video/x-bayer") == 0) {
            return 1;
        }
    }
    return 0;
}
Esempio n. 23
0
gboolean gub_blit_image(GUBGraphicContext *gcontext, GstSample *sample, void *texture_native_ptr)
{
    GstBuffer *buffer = NULL;
    GstCaps *caps = NULL;
    GstVideoInfo video_info;

    if (!gub_graphic_backend || !gub_graphic_backend->copy_texture) {
        return FALSE;
    }

    buffer = gst_sample_get_buffer(sample);
    if (!buffer) {
        gub_log("Sample contains no buffer");
        return FALSE;
    }

    caps = gst_sample_get_caps(sample);
    gst_video_info_from_caps(&video_info, caps);

    gub_graphic_backend->copy_texture(gcontext, &video_info, buffer, texture_native_ptr);

    return TRUE;
}
Esempio n. 24
0
GstBuffer* AudioLiveInputPipeline::pullNewBuffer(GstAppSink* sink)
{
    GstSample* sample = gst_app_sink_pull_sample(sink);
    if (!sample)
        return nullptr;

    GstBuffer* buffer = gst_sample_get_buffer(sample);
    if (!buffer) {
        gst_sample_unref(sample);
        return nullptr;
    }

    GstCaps* caps = gst_sample_get_caps(sample);
    if (!caps) {
        gst_sample_unref(sample);
        return nullptr;
    }

    GstAudioInfo info;
    gst_audio_info_from_caps(&info, caps);

    // Check the first audio channel.
    // The buffer is supposed to store data of a single channel anyway.
    switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        // Transferring ownership of buffer to the calling object.
        gst_buffer_ref(buffer);
        break;
    default:
        buffer = nullptr;
        break;
    }

    gst_sample_unref(sample);
    return buffer;
}
// Returns the size of the video
FloatSize MediaPlayerPrivateGStreamerBase::naturalSize() const
{
    if (!hasVideo())
        return FloatSize();

    if (!m_videoSize.isEmpty())
        return m_videoSize;

    WTF::GMutexLocker<GMutex> lock(m_sampleMutex);
    if (!GST_IS_SAMPLE(m_sample.get()))
        return FloatSize();

    GstCaps* caps = gst_sample_get_caps(m_sample.get());
    if (!caps)
        return FloatSize();


    // TODO: handle possible clean aperture data. See
    // https://bugzilla.gnome.org/show_bug.cgi?id=596571
    // TODO: handle possible transformation matrix. See
    // https://bugzilla.gnome.org/show_bug.cgi?id=596326

    // Get the video PAR and original size, if this fails the
    // video-sink has likely not yet negotiated its caps.
    int pixelAspectRatioNumerator, pixelAspectRatioDenominator, stride;
    IntSize originalSize;
    GstVideoFormat format;
    if (!getVideoSizeAndFormatFromCaps(caps, originalSize, format, pixelAspectRatioNumerator, pixelAspectRatioDenominator, stride))
        return FloatSize();

    LOG_MEDIA_MESSAGE("Original video size: %dx%d", originalSize.width(), originalSize.height());
    LOG_MEDIA_MESSAGE("Pixel aspect ratio: %d/%d", pixelAspectRatioNumerator, pixelAspectRatioDenominator);

    // Calculate DAR based on PAR and video size.
    int displayWidth = originalSize.width() * pixelAspectRatioNumerator;
    int displayHeight = originalSize.height() * pixelAspectRatioDenominator;

    // Divide display width and height by their GCD to avoid possible overflows.
    int displayAspectRatioGCD = greatestCommonDivisor(displayWidth, displayHeight);
    displayWidth /= displayAspectRatioGCD;
    displayHeight /= displayAspectRatioGCD;

    // Apply DAR to original video size. This is the same behavior as in xvimagesink's setcaps function.
    guint64 width = 0, height = 0;
    if (!(originalSize.height() % displayHeight)) {
        LOG_MEDIA_MESSAGE("Keeping video original height");
        width = gst_util_uint64_scale_int(originalSize.height(), displayWidth, displayHeight);
        height = static_cast<guint64>(originalSize.height());
    } else if (!(originalSize.width() % displayWidth)) {
        LOG_MEDIA_MESSAGE("Keeping video original width");
        height = gst_util_uint64_scale_int(originalSize.width(), displayHeight, displayWidth);
        width = static_cast<guint64>(originalSize.width());
    } else {
        LOG_MEDIA_MESSAGE("Approximating while keeping original video height");
        width = gst_util_uint64_scale_int(originalSize.height(), displayWidth, displayHeight);
        height = static_cast<guint64>(originalSize.height());
    }

    LOG_MEDIA_MESSAGE("Natural size: %" G_GUINT64_FORMAT "x%" G_GUINT64_FORMAT, width, height);
    m_videoSize = FloatSize(static_cast<int>(width), static_cast<int>(height));
    return m_videoSize;
}
Esempio n. 26
0
static void need_data_callback (GstAppSrc *src, guint length, gpointer user_data)
{
        EncoderStream *stream = (EncoderStream *)user_data;
        gint current_position;
        GstBuffer *buffer;
        GstPad *pad;
        GstEvent *event;

        current_position = (stream->current_position + 1) % SOURCE_RING_SIZE;
        for (;;) {
                if (stream->state != NULL) {
                        stream->state->last_heartbeat = gst_clock_get_time (stream->system_clock);
                }
                /* insure next buffer isn't current buffer */
                if ((current_position == stream->source->current_position) || stream->source->current_position == -1) {
                        if ((current_position == stream->source->current_position) && stream->source->eos) {
                                GstFlowReturn ret;

                                ret = gst_app_src_end_of_stream (src);
                                GST_INFO ("EOS of source %s, tell encoder %s, return %s", stream->source->name, stream->name, gst_flow_get_name (ret));
                                break;
                        }
                        GST_DEBUG ("waiting %s source ready", stream->name);
                        g_usleep (50000); /* wiating 50ms */
                        continue;
                }

                /* first buffer, set caps. */
                if (stream->current_position == -1) {
                        GstCaps *caps;
                        caps = gst_sample_get_caps (stream->source->ring[current_position]);
                        gst_app_src_set_caps (src, caps);
                        if (!g_str_has_prefix (gst_caps_to_string (caps), "video")) {
                                /* only for video stream, force key unit */
                                stream->encoder = NULL;
                        }
                        GST_INFO ("set stream %s caps: %s", stream->name, gst_caps_to_string (caps));
                }

                buffer = gst_sample_get_buffer (stream->source->ring[current_position]);
                GST_DEBUG ("%s encoder position %d; timestamp %" GST_TIME_FORMAT " source position %d",
                        stream->name,   
                        stream->current_position,
                        GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
                        stream->source->current_position);

                /* force key unit? */
                if ((stream->encoder != NULL) && (stream->encoder->segment_duration != 0)) {
                        if (stream->encoder->duration_accumulation >= stream->encoder->segment_duration) {
                                GstClockTime running_time;

                                stream->encoder->last_segment_duration = stream->encoder->duration_accumulation;
                                running_time = GST_BUFFER_PTS (buffer);
                                pad = gst_element_get_static_pad ((GstElement *)src, "src");
                                event = gst_video_event_new_downstream_force_key_unit (running_time,
                                                                                       running_time,
                                                                                       running_time,
                                                                                       TRUE,
                                                                                       stream->encoder->force_key_count);
                                gst_pad_push_event (pad, event);
                                stream->encoder->force_key_count++;
                                stream->encoder->duration_accumulation = 0;
                        }
                        stream->encoder->duration_accumulation += GST_BUFFER_DURATION (buffer);
                }

                /* push buffer */
                if (gst_app_src_push_buffer (src, gst_buffer_ref (buffer)) != GST_FLOW_OK) {
                        GST_ERROR ("%s, gst_app_src_push_buffer failure.", stream->name);
                }

                if (stream->state != NULL) {
                        stream->state->current_timestamp = GST_BUFFER_PTS (buffer);
                }

                break;
        }
        stream->current_position = current_position;
}
static void
send_tag (const GstTagList * list, const gchar * tag, gpointer data)
{
  InsanityGstPipelineTest *ptest = INSANITY_GST_PIPELINE_TEST (data);
  gint i, count;
  GValue string_value = { 0 };
  char label[48];

  count = gst_tag_list_get_tag_size (list, tag);
  g_value_init (&string_value, G_TYPE_STRING);

  ptest->priv->tag_count++;

  for (i = 0; i < count; i++) {
    gchar *str;

    if (gst_tag_get_type (tag) == G_TYPE_STRING) {
      if (!gst_tag_list_get_string_index (list, tag, i, &str))
        g_assert_not_reached ();
    } else if (gst_tag_get_type (tag) == GST_TYPE_SAMPLE) {
      GstSample *img;

      img = gst_value_get_sample (gst_tag_list_get_value_index (list, tag, i));
      if (img) {
        GstBuffer *buffer;
        GstCaps *caps;
        gchar *caps_str;

        buffer = gst_sample_get_buffer (img);
        caps = gst_sample_get_caps (img);

        caps_str = caps ? gst_caps_to_string (caps) : g_strdup ("unknown");
        str = g_strdup_printf ("sample of %" G_GSIZE_FORMAT " bytes, type: %s",
            gst_buffer_get_size (buffer), caps_str);
        g_free (caps_str);
      } else {
        str = g_strdup ("NULL sample");
      }
    } else if (gst_tag_get_type (tag) == GST_TYPE_DATE_TIME) {
      GstDateTime *dt = NULL;

      gst_tag_list_get_date_time_index (list, tag, i, &dt);
      str = gst_date_time_to_iso8601_string (dt);
      gst_date_time_unref (dt);
    } else {
      str =
          g_strdup_value_contents (gst_tag_list_get_value_index (list, tag, i));
    }

    if (i == 0) {
      g_value_set_string (&string_value, gst_tag_get_nick (tag));
      snprintf (label, sizeof (label), "tags.%u.id", ptest->priv->tag_count);
      insanity_test_set_extra_info (INSANITY_TEST (ptest), label,
          &string_value);
      g_value_reset (&string_value);
    }
    g_value_set_string (&string_value, str);
    if (count > 1)
      snprintf (label, sizeof (label), "tags.%u.value.%u",
          ptest->priv->tag_count, i);
    else
      snprintf (label, sizeof (label), "tags.%u.value", ptest->priv->tag_count);
    insanity_test_set_extra_info (INSANITY_TEST (ptest), label, &string_value);
    g_value_reset (&string_value);

    g_free (str);
  }
}
Esempio n. 28
0
/**
 * _owr_image_renderer_pull_bmp_image:
 * @image_renderer:
 *
 * Returns: (transfer full):
 */
GBytes * _owr_image_renderer_pull_bmp_image(OwrImageRenderer *image_renderer)
{
    GstCaps *caps;
    GstSample *sample;
    GstBuffer *buf = NULL;
    GstMapInfo info;
    GstStructure *s;
    guint bufsize, total_size, src_rowsize, dest_rowsize, image_width, image_height;
    guint8 *image_data, *src_data, *srcpos, *destpos;
    gboolean ret, disabled = FALSE;

    g_return_val_if_fail(OWR_IS_IMAGE_RENDERER(image_renderer), NULL);

    if (!image_renderer->priv->appsink)
        return NULL;

    sample = gst_app_sink_pull_sample(GST_APP_SINK(image_renderer->priv->appsink));
    if (!sample)
        return NULL;

    buf = gst_sample_get_buffer(sample);
    if (!buf) {
        gst_sample_unref(sample);
        return NULL;
    }

    caps = gst_sample_get_caps(sample);
    s = gst_caps_get_structure(caps, 0);
    ret = gst_structure_get_int(s, "width", (gint *)&image_width);
    ret |= gst_structure_get_int(s, "height", (gint *)&image_height);
    if (!ret) {
        g_critical("%s Could not get bmp video dimensions from configured caps on appsink",
            __FUNCTION__);
        image_width = 0;
        image_height = 0;
    }

    if (!gst_buffer_map(buf, &info, GST_MAP_READ))
        g_assert_not_reached();

    g_assert(info.data);

    g_object_get(image_renderer, "disabled", &disabled, NULL);
    bufsize = (guint) info.size;
    total_size = BMP_HEADER_SIZE + bufsize;
    image_data = disabled ? g_malloc0(total_size) : g_malloc(total_size);
    if (!image_data) {
        g_critical("%s Allocate mem failed (g_malloc(total_size))", __FUNCTION__);
        return NULL;
    }
    fill_bmp_header(image_data, image_width, image_height);

    src_rowsize = DIB_BITS_PER_PIXEL * image_width / 8;
    dest_rowsize = ((DIB_BITS_PER_PIXEL * image_width  + 31) / 32) * 4;
    src_data = info.data;
    destpos = image_data + total_size;

    if (!disabled) {
        for (srcpos = src_data; srcpos < src_data + bufsize; srcpos += src_rowsize) {
            destpos -= dest_rowsize;
            memcpy(destpos, srcpos, src_rowsize);
        }
    }

    gst_buffer_unmap(buf, &info);
    gst_sample_unref(sample);

    return g_bytes_new_take(image_data, total_size);
}
Esempio n. 29
0
gboolean
_gst_playbin_get_current_frame (GstElement          *playbin,
				int                  video_fps_n,
				int                  video_fps_d,
				FrameReadyCallback   cb,
				gpointer             user_data)
{
	ScreenshotData *data;
	GstCaps        *to_caps;
	GstSample      *sample;
	GstCaps        *sample_caps;
	GstStructure   *s;
	int             outwidth;
	int             outheight;

	data = g_new0 (ScreenshotData, 1);
	data->cb = cb;
	data->user_data = user_data;

	/* our desired output format (RGB24) */
	to_caps = gst_caps_new_simple ("video/x-raw",
				       "format", G_TYPE_STRING, "RGB",
				       /* Note: we don't ask for a specific width/height here, so that
				        * videoscale can adjust dimensions from a non-1/1 pixel aspect
				        * ratio to a 1/1 pixel-aspect-ratio. We also don't ask for a
				        * specific framerate, because the input framerate won't
				        * necessarily match the output framerate if there's a deinterlacer
				        * in the pipeline. */
				       "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
				       NULL);

	/* get frame */
	sample = NULL;
	g_signal_emit_by_name (playbin, "convert-sample", to_caps, &sample);
	gst_caps_unref (to_caps);

	if (sample == NULL) {
		g_warning ("Could not take screenshot: %s", "failed to retrieve or convert video frame");
		screenshot_data_finalize (data);
		return FALSE;
	}

	sample_caps = gst_sample_get_caps (sample);
	if (sample_caps == NULL) {
		g_warning ("Could not take screenshot: %s", "no caps on output buffer");
		return FALSE;
	}

	s = gst_caps_get_structure (sample_caps, 0);
	gst_structure_get_int (s, "width", &outwidth);
	gst_structure_get_int (s, "height", &outheight);
	if ((outwidth > 0) && (outheight > 0)) {
		GstMemory  *memory;
		GstMapInfo  info;

		memory = gst_buffer_get_memory (gst_sample_get_buffer (sample), 0);
		gst_memory_map (memory, &info, GST_MAP_READ);
		data->pixbuf = gdk_pixbuf_new_from_data (info.data,
							 GDK_COLORSPACE_RGB,
							 FALSE,
							 8,
							 outwidth,
							 outheight,
							 GST_ROUND_UP_4 (outwidth * 3),
							 destroy_pixbuf,
							 sample);

		gst_memory_unmap (memory, &info);
	}

	if (data->pixbuf == NULL)
		g_warning ("Could not take screenshot: %s", "could not create pixbuf");

	screenshot_data_finalize (data);

	return TRUE;
}
/* The main drawing function. */
static void
DrawGLScene (GstSample * sample)
{
  GstVideoFrame v_frame;
  GstVideoInfo v_info;
  guint texture = 0;
  GstBuffer *buf = gst_sample_get_buffer (sample);
  GstCaps *caps = gst_sample_get_caps (sample);

#ifdef WIN32
  if (!wglGetCurrentContext ())
    return;
#else
  if (!glXGetCurrentContext ())
    return;
#endif

  gst_video_info_from_caps (&v_info, caps);

  if (!gst_video_frame_map (&v_frame, &v_info, buf, GST_MAP_READ | GST_MAP_GL)) {
    g_warning ("Failed to map the video buffer");
    return;
  }

  texture = *(guint *) v_frame.data[0];

  glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);  // Clear The Screen And The Depth Buffer
  glLoadIdentity ();            // Reset The View

  glTranslatef (-0.4f, 0.0f, 0.0f);     // Move Left 1.5 Units And Into The Screen 6.0

  glRotatef (rtri, 0.0f, 1.0f, 0.0f);   // Rotate The Triangle On The Y axis 
  // draw a triangle (in smooth coloring mode)
  glBegin (GL_POLYGON);         // start drawing a polygon
  glColor3f (1.0f, 0.0f, 0.0f); // Set The Color To Red
  glVertex3f (0.0f, 0.4f, 0.0f);        // Top
  glColor3f (0.0f, 1.0f, 0.0f); // Set The Color To Green
  glVertex3f (0.4f, -0.4f, 0.0f);       // Bottom Right
  glColor3f (0.0f, 0.0f, 1.0f); // Set The Color To Blue
  glVertex3f (-0.4f, -0.4f, 0.0f);      // Bottom Left  
  glEnd ();                     // we're done with the polygon (smooth color interpolation)

  glEnable (GL_TEXTURE_2D);
  glBindTexture (GL_TEXTURE_2D, texture);
  glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
  glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
  glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  glTexEnvi (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);

  glLoadIdentity ();            // make sure we're no longer rotated.
  glTranslatef (0.5f, 0.0f, 0.0f);      // Move Right 3 Units, and back into the screen 6.0

  glRotatef (rquad, 1.0f, 0.0f, 0.0f);  // Rotate The Quad On The X axis 
  // draw a square (quadrilateral)
  glColor3f (0.4f, 0.4f, 1.0f); // set color to a blue shade.
  glBegin (GL_QUADS);           // start drawing a polygon (4 sided)
  glTexCoord3f (0.0f, 1.0f, 0.0f);
  glVertex3f (-0.4f, 0.4f, 0.0f);       // Top Left
  glTexCoord3f (1.0f, 1.0f, 0.0f);
  glVertex3f (0.4f, 0.4f, 0.0f);        // Top Right
  glTexCoord3f (1.0f, 0.0f, 0.0f);
  glVertex3f (0.4f, -0.4f, 0.0f);       // Bottom Right
  glTexCoord3f (0.0f, 0.0f, 0.0f);
  glVertex3f (-0.4f, -0.4f, 0.0f);      // Bottom Left  
  glEnd ();                     // done with the polygon

  glBindTexture (GL_TEXTURE_2D, 0);

  rtri += 1.0f;                 // Increase The Rotation Variable For The Triangle
  rquad -= 1.0f;                // Decrease The Rotation Variable For The Quad 

  // swap buffers to display, since we're double buffered.
  SDL_GL_SwapBuffers ();

  gst_video_frame_unmap (&v_frame);
}