Ejemplo n.º 1
0
void ApplicationSink::Priv::setCallbacks(ApplicationSink *self)
{
    if (m_appsink) {
        if (self) {
            static GstAppSinkCallbacks callbacks = { &eos, &new_preroll,
                                                     &new_buffer, &new_buffer_list };
            gst_app_sink_set_callbacks(appSink(), &callbacks, self, NULL);
        } else {
            static GstAppSinkCallbacks callbacks = { &eos_noop, &new_preroll_noop,
                                                     &new_buffer_noop, &new_buffer_list_noop };
            gst_app_sink_set_callbacks(appSink(), &callbacks, NULL, NULL);
        }
    }
}
Ejemplo n.º 2
0
void AudioFileReader::handleNewDeinterleavePad(GstPad* pad)
{
    // A new pad for a planar channel was added in deinterleave. Plug
    // in an appsink so we can pull the data from each
    // channel. Pipeline looks like:
    // ... deinterleave ! queue ! appsink.
    GstElement* queue = gst_element_factory_make("queue", 0);
    GstElement* sink = gst_element_factory_make("appsink", 0);

    GstAppSinkCallbacks callbacks;
    callbacks.eos = 0;
    callbacks.new_preroll = 0;
    callbacks.new_sample = onAppsinkPullRequiredCallback;
    gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0);

    g_object_set(sink, "sync", FALSE, NULL);

    gst_bin_add_many(GST_BIN(m_pipeline), queue, sink, NULL);

    GstPad* sinkPad = gst_element_get_static_pad(queue, "sink");
    gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING);
    gst_object_unref(GST_OBJECT(sinkPad));

    gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);

    gst_element_sync_state_with_parent(queue);
    gst_element_sync_state_with_parent(sink);
}
void AudioSourceProviderGStreamer::handleNewDeinterleavePad(GstPad* pad)
{
    m_deinterleaveSourcePads++;

    if (m_deinterleaveSourcePads > 2) {
        g_warning("The AudioSourceProvider supports only mono and stereo audio. Silencing out this new channel.");
        GstElement* queue = gst_element_factory_make("queue", 0);
        GstElement* sink = gst_element_factory_make("fakesink", 0);
        g_object_set(sink, "async", FALSE, nullptr);
        gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);

        GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
        gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);

        GQuark quark = g_quark_from_static_string("peer");
        g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
        gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
        gst_element_sync_state_with_parent(queue);
        gst_element_sync_state_with_parent(sink);
        return;
    }

    // A new pad for a planar channel was added in deinterleave. Plug
    // in an appsink so we can pull the data from each
    // channel. Pipeline looks like:
    // ... deinterleave ! queue ! appsink.
    GstElement* queue = gst_element_factory_make("queue", 0);
    GstElement* sink = gst_element_factory_make("appsink", 0);

    GstAppSinkCallbacks callbacks;
    callbacks.eos = 0;
    callbacks.new_preroll = 0;
    callbacks.new_sample = onAppsinkNewBufferCallback;
    gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0);

    g_object_set(sink, "async", FALSE, nullptr);

    GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
        "channels", G_TYPE_INT, 1,
        "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
        "layout", G_TYPE_STRING, "interleaved", nullptr));

    gst_app_sink_set_caps(GST_APP_SINK(sink), caps.get());

    gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);

    GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
    gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);

    GQuark quark = g_quark_from_static_string("peer");
    g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());

    gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);

    sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink"));
    gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, onAppsinkFlushCallback, this, nullptr);

    gst_element_sync_state_with_parent(queue);
    gst_element_sync_state_with_parent(sink);
}
Ejemplo n.º 4
0
static gboolean create_pipeline(SpiceGstDecoder *decoder)
{
    gchar *desc;
    gboolean auto_enabled;
    guint opt;
    GstAppSinkCallbacks appsink_cbs = { NULL };
    GError *err = NULL;
    GstBus *bus;

    auto_enabled = (g_getenv("SPICE_GSTVIDEO_AUTO") != NULL);
    if (auto_enabled || !VALID_VIDEO_CODEC_TYPE(decoder->base.codec_type)) {
        SPICE_DEBUG("Trying %s for codec type %d %s",
                    gst_opts[0].dec_name, decoder->base.codec_type,
                    (auto_enabled) ? "(SPICE_GSTVIDEO_AUTO is set)" : "");
        opt = 0;
    } else {
        opt = decoder->base.codec_type;
    }

    /* - We schedule the frame display ourselves so set sync=false on appsink
     *   so the pipeline decodes them as fast as possible. This will also
     *   minimize the risk of frames getting lost when we rebuild the
     *   pipeline.
     * - Set max-bytes=0 on appsrc so it does not drop frames that may be
     *   needed by those that follow.
     */
    desc = g_strdup_printf("appsrc name=src is-live=true format=time max-bytes=0 block=true "
                           "%s ! %s ! videoconvert ! appsink name=sink "
                           "caps=video/x-raw,format=BGRx sync=false drop=false",
                           gst_opts[opt].dec_caps, gst_opts[opt].dec_name);
    SPICE_DEBUG("GStreamer pipeline: %s", desc);

    decoder->pipeline = gst_parse_launch_full(desc, NULL, GST_PARSE_FLAG_FATAL_ERRORS, &err);
    g_free(desc);
    if (!decoder->pipeline) {
        spice_warning("GStreamer error: %s", err->message);
        g_clear_error(&err);
        return FALSE;
    }

    decoder->appsrc = GST_APP_SRC(gst_bin_get_by_name(GST_BIN(decoder->pipeline), "src"));
    decoder->appsink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(decoder->pipeline), "sink"));

    appsink_cbs.new_sample = new_sample;
    gst_app_sink_set_callbacks(decoder->appsink, &appsink_cbs, decoder, NULL);
    bus = gst_pipeline_get_bus(GST_PIPELINE(decoder->pipeline));
    gst_bus_add_watch(bus, handle_pipeline_message, decoder);
    gst_object_unref(bus);

    decoder->clock = gst_pipeline_get_clock(GST_PIPELINE(decoder->pipeline));

    if (gst_element_set_state(decoder->pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
        SPICE_DEBUG("GStreamer error: Unable to set the pipeline to the playing state.");
        free_pipeline(decoder);
        return FALSE;
    }

    return TRUE;
}
Ejemplo n.º 5
0
bool ofGstUtils::startPipeline() {
    gstData.pipeline=gstPipeline;

    // pause the pipeline
    if(gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PAUSED) ==
            GST_STATE_CHANGE_FAILURE) {
//		ofLog(OF_LOG_ERROR, "GStreamer: unable to set pipeline to paused\n");

        return false;
    }

    bool ret = false;

    if(!bIsStream) {
        ofGstDataLock(&gstData);
        ret = allocate();
        ofGstDataUnlock(&gstData);

    } else {
        ret = true;
    }

    if(gstSink) {
        // set the appsink to emit signals to get eos and errors
        g_object_set (G_OBJECT (gstSink), "emit-signals", FALSE, "sync", !bFrameByFrame, (void*)NULL);
        /*g_signal_connect (gstSink, "new-buffer", G_CALLBACK (on_new_buffer_from_source), &gstData);
        g_signal_connect (gstSink, "new-preroll", G_CALLBACK (on_new_preroll_from_source), &gstData);*/

        if(!bFrameByFrame) {
            //		printf("SET CALLBACKS\n");
            GstAppSinkCallbacks gstCallbacks;
            gstCallbacks.eos = &on_eos_from_source;
            gstCallbacks.new_preroll = &on_new_preroll_from_source;
            gstCallbacks.new_buffer = &on_new_buffer_from_source;

            gst_app_sink_set_callbacks(GST_APP_SINK(gstSink), &gstCallbacks, &gstData, NULL);
        }

    }


    setSpeed(1.0);

    return ret;
}
Ejemplo n.º 6
0
bool ofGstUtils::startPipeline(){

	bPaused 			= true;
	speed 				= 1.0f;

	// pause the pipeline
	if(gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PAUSED) ==	GST_STATE_CHANGE_FAILURE) {
		ofLog(OF_LOG_ERROR, "GStreamer: unable to set pipeline to paused\n");

		return false;
	}

	// wait for paused state to query the duration
	if(!isStream){
		GstState state = GST_STATE_PAUSED;
		if(gst_element_get_state(gstPipeline,&state,NULL,2*GST_SECOND)==GST_STATE_CHANGE_FAILURE){
			return false;
		}
	}

	bLoaded = true;


	if(isAppSink){
		// set the appsink to not emit signals, we are using callbacks instead
		// and frameByFrame to get buffers by polling instead of callback
		g_object_set (G_OBJECT (gstSink), "emit-signals", FALSE, "sync", !bFrameByFrame, (void*)NULL);

		if(!bFrameByFrame){
			GstAppSinkCallbacks gstCallbacks;
			gstCallbacks.eos = &on_eos_from_source;
			gstCallbacks.new_preroll = &on_new_preroll_from_source;
			gstCallbacks.new_buffer = &on_new_buffer_from_source;

			gst_app_sink_set_callbacks(GST_APP_SINK(gstSink), &gstCallbacks, this, NULL);
		}

	}

	setSpeed(1.0);

	ofAddListener(ofEvents.update,this,&ofGstUtils::update);

	return true;
}
Ejemplo n.º 7
0
void GstAppSinkPipeline::Initialize(std::string pipelineString)
{	
	GstPipelineWrapper::InitializePipelineWithString(pipelineString);
	
	// setup appsink
	appsink = GstPipelineWrapper::GetElementByName(APPSINK_NAME);
	GstAppSinkCallbacks appsinkCallbacks;
	appsinkCallbacks.new_preroll	= &GstAppSinkPipeline::NewPrerollCallback;
	appsinkCallbacks.new_sample		= &GstAppSinkPipeline::NewSampleCallback;
	appsinkCallbacks.eos			= &GstAppSinkPipeline::EndOfStreamCallback; 
	
//	std::cout << pipelineString << std::endl;
	
	gst_app_sink_set_drop			(GST_APP_SINK(appsink), true);
	gst_app_sink_set_max_buffers	(GST_APP_SINK(appsink), 1);
	//gst_app_sink_set_emit_signals	(GST_APP_SINK(appsink), true);
	gst_app_sink_set_callbacks		(GST_APP_SINK(appsink), &appsinkCallbacks, this, (GDestroyNotify)GstAppSinkPipeline::DestroyCallback);		
}
Ejemplo n.º 8
0
GstElement *MediaPlayer::createVideoSink()
{
    GstElement * sink = createElement("appsink", "videosink");

    if ( !sink )
        return 0;

    // Set the caps - so far we only want our image to be RGB
    GstCaps *sinkCaps = gst_caps_new_simple( "video/x-raw", "format", G_TYPE_STRING, "BGRA", NULL );
    gst_app_sink_set_caps( GST_APP_SINK( sink ), sinkCaps );
    gst_caps_unref(sinkCaps);

    // Set up the callbacks
    GstAppSinkCallbacks callbacks = { 0, 0, 0, 0, 0 };
    callbacks.new_sample = cb_new_sample;

    gst_app_sink_set_callbacks( GST_APP_SINK(sink), &callbacks, this, NULL );
    return sink;
}
Ejemplo n.º 9
0
void Eyrie::record() {
	QVariant ret;
	if(recbin != NULL) {
		qDebug() << "Ending recording";
		gst_element_set_state(recbin, GST_STATE_NULL);
		recbin = NULL;
		QMetaObject::invokeMethod(parent(), "reset", Q_RETURN_ARG(QVariant, ret));
		return;
	}
	qDebug() << "Starting recording";
	QMetaObject::invokeMethod(parent(), "setStatus", Q_RETURN_ARG(QVariant, ret), Q_ARG(QVariant, ""));
	recbin = gst_pipeline_new("pipeline");
	GError *err = NULL;
	recbin = gst_parse_launch("autoaudiosrc ! level ! tee name=t   t. ! queue ! audioconvert ! audioresample ! appsink name=asink caps=audio/x-raw-float,channels=1,rate=11025,width=32,endianness=1234  t. ! queue ! audioconvert ! monoscope ! videobalance saturation=0 ! videoflip method=6 ! ffmpegcolorspace ! xvimagesink name=overlay", &err);
	sink = gst_bin_get_by_name(GST_BIN(recbin), "asink");
	GstAppSinkCallbacks appsink_cbs = { NULL, NULL, on_buffer, NULL };
	gst_app_sink_set_callbacks(GST_APP_SINK(sink), &appsink_cbs, this, NULL);
	overlay = gst_bin_get_by_name(GST_BIN(recbin), "overlay");
	gst_x_overlay_set_xwindow_id(GST_X_OVERLAY(overlay), view->effectiveWinId());
	gst_x_overlay_set_render_rectangle(GST_X_OVERLAY(overlay), 655, 140, 100, 200);
	gst_element_set_state(recbin, GST_STATE_PLAYING);
	attempts = 0;
	timer->start(10000);
}
Ejemplo n.º 10
0
bool ofGstUtils::startPipeline(){

	bPaused 			= true;
	speed 				= 1.0f;


	if(gst_element_set_state (GST_ELEMENT(gstPipeline), GST_STATE_READY) ==	GST_STATE_CHANGE_FAILURE) {
		gLogManager.log( "GStreamer: unable to set pipeline to ready\n",ELL_WARNING);

		return false;
	}
	if(gst_element_get_state (GST_ELEMENT(gstPipeline), NULL, NULL, 10 * GST_SECOND)==GST_STATE_CHANGE_FAILURE){
		gLogManager.log( "GStreamer: unable to get pipeline to ready\n",ELL_WARNING);
		return false;
	}

	// pause the pipeline
	if(gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PAUSED) ==	GST_STATE_CHANGE_FAILURE) {
		gLogManager.log("GStreamer: unable to set pipeline to paused\n",ELL_WARNING);

		return false;
	}

	// wait for paused state to query the duration
	if(!isStream){
		GstState state = GST_STATE_PAUSED;
		if(gst_element_get_state(gstPipeline,&state,NULL,2*GST_SECOND)==GST_STATE_CHANGE_FAILURE){
			gLogManager.log("GStreamer: unable to get pipeline to paused\n",ELL_WARNING);
			return false;
		}
		bPlaying = true;
		bLoaded = true;
	}



	if(isAppSink){
		gLogManager.log("attaching callbacks",ELL_INFO);
		// set the appsink to not emit signals, we are using callbacks instead
		// and frameByFrame to get buffers by polling instead of callback
		g_object_set (G_OBJECT (gstSink), "emit-signals", FALSE, "sync", !bFrameByFrame, (void*)NULL);

		if(!bFrameByFrame){
			GstAppSinkCallbacks gstCallbacks;
			gstCallbacks.eos = &on_eos_from_source;
			gstCallbacks.new_preroll = &on_new_preroll_from_source;
#if GST_VERSION_MAJOR==0
			gstCallbacks.new_buffer = &on_new_buffer_from_source;
#else
			gstCallbacks.new_sample = &on_new_buffer_from_source;
#endif

			gst_app_sink_set_callbacks(GST_APP_SINK(gstSink), &gstCallbacks, this, NULL);
		}

	}

	if(!isStream){
		setSpeed(1.0);
	}

	//ofAddListener(ofEvents().update,this,&ofGstUtils::update);

	return true;
}
Ejemplo n.º 11
0
static void
link_sinkpad_cb (GstPad * pad, GstPad * peer, gpointer user_data)
{
  KmsRecorderEndpoint *self = KMS_RECORDER_ENDPOINT (user_data);
  KmsSinkPadData *sinkdata;
  GstAppSinkCallbacks callbacks;
  GstElement *appsink, *appsrc;
  KmsRecordingProfile profile;
  DataEvtProbe *data;
  KmsMediaType type;
  GstPad *target;
  gchar *id, *key;

  target = gst_ghost_pad_get_target (GST_GHOST_PAD (pad));
  if (target == NULL) {
    GST_ERROR_OBJECT (pad, "No target pad set");
    return;
  }

  key = g_object_get_data (G_OBJECT (target), KMS_PAD_IDENTIFIER_KEY);

  if (key == NULL) {
    GST_ERROR_OBJECT (pad, "No identifier assigned");
    g_object_unref (&target);
    return;
  }

  KMS_ELEMENT_LOCK (KMS_ELEMENT (self));

  sinkdata = g_hash_table_lookup (self->priv->sink_pad_data, key);
  if (sinkdata == NULL) {
    GST_ERROR_OBJECT (self, "Invalid pad %" GST_PTR_FORMAT " connected %"
        GST_PTR_FORMAT, pad, peer);
    goto end;
  }

  switch (sinkdata->type) {
    case KMS_ELEMENT_PAD_TYPE_AUDIO:
      type = KMS_MEDIA_TYPE_AUDIO;
      break;
    case KMS_ELEMENT_PAD_TYPE_VIDEO:
      type = KMS_MEDIA_TYPE_VIDEO;
      break;
    default:
      GST_ERROR_OBJECT (self, "Invalid pad %" GST_PTR_FORMAT " connected %"
          GST_PTR_FORMAT, pad, peer);
      goto end;
  }

  profile = self->priv->profile;

  GST_DEBUG_OBJECT (pad, "linked to %" GST_PTR_FORMAT, peer);

  id = gst_pad_get_name (pad);

  appsrc = kms_base_media_muxer_add_src (self->priv->mux, type, id);

  if (appsrc == NULL) {
    GST_ERROR_OBJECT (self, "Can not get appsrc for pad %" GST_PTR_FORMAT, pad);
    KMS_ELEMENT_UNLOCK (KMS_ELEMENT (self));
    g_object_unref (target);
    g_free (id);

    return;
  }

  g_hash_table_insert (self->priv->srcs, id, g_object_ref (appsrc));

  if (sinkdata->sink_probe != 0UL) {
    gst_pad_remove_probe (target, sinkdata->sink_probe);
  }

  callbacks.eos = recv_eos;
  callbacks.new_preroll = NULL;
  callbacks.new_sample = recv_sample;

  appsink = gst_pad_get_parent_element (target);
  gst_app_sink_set_callbacks (GST_APP_SINK (appsink), &callbacks, appsrc, NULL);
  g_object_unref (appsink);

  data = data_evt_probe_new (appsrc, profile);
  sinkdata->sink_probe =
      gst_pad_add_probe (target, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM,
      configure_pipeline_capabilities, data,
      (GDestroyNotify) data_evt_probe_destroy);

end:
  KMS_ELEMENT_UNLOCK (KMS_ELEMENT (self));

  g_clear_object (&target);
}
Ejemplo n.º 12
0
static void
gst_nle_source_pad_added_cb (GstElement * element, GstPad * pad,
    GstNleSource * nlesrc)
{
  GstCaps *caps;
  const GstStructure *s;
  const gchar *mime;
  GstElement *appsink = NULL;
  GstPad *sink_pad;
  GstAppSinkCallbacks appsink_cbs;
  GstNleSrcItem *item;

  item = (GstNleSrcItem *) g_list_nth_data (nlesrc->queue, nlesrc->index);

  caps = gst_pad_get_caps_reffed (pad);
  s = gst_caps_get_structure (caps, 0);
  mime = gst_structure_get_name (s);
  GST_DEBUG_OBJECT (nlesrc, "Found mime type: %s", mime);

  if (g_strrstr (mime, "video") && !nlesrc->video_linked) {
    appsink = gst_element_factory_make ("appsink", NULL);
    memset (&appsink_cbs, 0, sizeof (appsink_cbs));
    appsink_cbs.eos = gst_nle_source_on_video_eos;
    appsink_cbs.new_preroll = gst_nle_source_on_preroll_buffer;
    appsink_cbs.new_buffer = gst_nle_source_on_video_buffer;
    nlesrc->video_linked = TRUE;
    if (!nlesrc->video_srcpad_added) {
      gst_pad_set_active (nlesrc->video_srcpad, TRUE);
      gst_element_add_pad (GST_ELEMENT (nlesrc),
          gst_object_ref (nlesrc->video_srcpad));
      nlesrc->video_srcpad_added = TRUE;
    }
    gst_pad_add_event_probe (GST_BASE_SINK_PAD (GST_BASE_SINK (appsink)),
        (GCallback) gst_nle_source_video_pad_probe_cb, nlesrc);
    nlesrc->video_eos = FALSE;
  } else if (g_strrstr (mime, "audio") && nlesrc->with_audio
      && !nlesrc->audio_linked && (item ? item->rate == 1.0 : TRUE)) {
    appsink = gst_element_factory_make ("appsink", NULL);
    memset (&appsink_cbs, 0, sizeof (appsink_cbs));
    appsink_cbs.eos = gst_nle_source_on_audio_eos;
    appsink_cbs.new_preroll = gst_nle_source_on_preroll_buffer;
    appsink_cbs.new_buffer = gst_nle_source_on_audio_buffer;
    nlesrc->audio_linked = TRUE;
    if (!nlesrc->audio_srcpad_added) {
      gst_pad_set_active (nlesrc->audio_srcpad, TRUE);
      gst_element_add_pad (GST_ELEMENT (nlesrc),
          gst_object_ref (nlesrc->audio_srcpad));
      nlesrc->audio_srcpad_added = TRUE;
    }
    gst_pad_add_event_probe (GST_BASE_SINK_PAD (GST_BASE_SINK (appsink)),
        (GCallback) gst_nle_source_audio_pad_probe_cb, nlesrc);
    nlesrc->audio_eos = FALSE;
  }
  if (appsink != NULL) {
    g_object_set (appsink, "sync", FALSE, NULL);
    gst_app_sink_set_callbacks (GST_APP_SINK (appsink), &appsink_cbs, nlesrc,
        NULL);
    gst_bin_add (GST_BIN (nlesrc->decoder), appsink);
    sink_pad = gst_element_get_static_pad (appsink, "sink");
    gst_pad_link (pad, sink_pad);
    gst_element_sync_state_with_parent (appsink);
    gst_object_unref (sink_pad);
  }
}
Ejemplo n.º 13
0
bool VideoSender::enableSending(bool enable)
{
  GstElement *sink;
#define USE_TEE 0
#if USE_TEE
  GstElement *ob;
#endif
  GError *error = NULL;

  qDebug() << "In" << __FUNCTION__ << ", Enable:" << enable;

  // Disable video sending
  if (enable == false) {
    qDebug() << "Stopping video encoding";
    if (pipeline) {
      gst_element_set_state(pipeline, GST_STATE_NULL);
    }

    qDebug() << "Deleting pipeline";
    if (pipeline) {
      gst_object_unref(GST_OBJECT(pipeline));
      pipeline = NULL;
    }
    encoder = NULL;

    ODdata[OB_VIDEO_PARAM_CONTINUE] = 0;
    if (ODprocess) {
      ODprocess->write((const char *)ODdata, sizeof(ODdata));
    }

    return true;
  }

  if (pipeline) {
    // Do nothing as the pipeline has already been created and is
    // probably running
    qCritical("Pipeline exists already, doing nothing");
    return true;
  }

  // Initialisation. We don't pass command line arguments here
  if (!gst_init_check(NULL, NULL, NULL)) {
    qCritical("Failed to init GST");
    return false;
  }

  if (!hardware) {
    qCritical("No hardware plugin");
    return false;
  }

  QString pipelineString = "";
  pipelineString.append(videoSource + " name=source");
  pipelineString.append(" ! ");
  pipelineString.append("capsfilter caps=\"video/x-raw,format=(string)I420,framerate=(fraction)30/1,");
  switch(quality) {
  default:
  case 0:
    pipelineString.append("width=(int)320,height=(int)240");
    break;
  case 1:
    pipelineString.append("width=(int)640,height=(int)480");
    break;
  case 2:
    pipelineString.append("width=(int)800,height=(int)600");
    break;
  }

  pipelineString.append("\"");

#if USE_TEE
  pipelineString.append(" ! ");
  pipelineString.append("tee name=scripttee");
  // FIXME: does this case latency?
  pipelineString.append(" ! ");
  pipelineString.append("queue");
#endif
  pipelineString.append(" ! ");
  pipelineString.append(hardware->getEncodingPipeline());
  pipelineString.append(" ! ");
  pipelineString.append("rtph264pay name=rtppay config-interval=1 mtu=500");
  pipelineString.append(" ! ");
  pipelineString.append("appsink name=sink sync=false max-buffers=1 drop=true");
#if USE_TEE
  // Tee (branch) frames for external components
  pipelineString.append(" scripttee. ");
  // TODO: downscale to 320x240?
  pipelineString.append(" ! ");
  pipelineString.append("appsink name=ob sync=false max-buffers=1 drop=true");
#endif
  qDebug() << "Using pipeline:" << pipelineString;

  // Create encoding video pipeline
  pipeline = gst_parse_launch(pipelineString.toUtf8(), &error);
  if (!pipeline) {
    qCritical("Failed to parse pipeline: %s", error->message);
    g_error_free(error);
    return false;
  }

  encoder = gst_bin_get_by_name(GST_BIN(pipeline), "encoder");
  if (!encoder) {
    qCritical("Failed to get encoder");
    return false;
  }

  // Assuming here that X86 uses x264enc
  if (hardware->getHardwareName() == "generic_x86") {
    g_object_set(G_OBJECT(encoder), "speed-preset", 1, NULL); // ultrafast
    g_object_set(G_OBJECT(encoder), "tune", 0x00000004, NULL); // zerolatency
  }

  if (hardware->getHardwareName() == "tegrak1" ||
      hardware->getHardwareName() == "tegrax1") {
    //g_object_set(G_OBJECT(encoder), "input-buffers", 2, NULL); // not valid on 1.0
    //g_object_set(G_OBJECT(encoder), "output-buffers", 2, NULL); // not valid on 1.0
    //g_object_set(G_OBJECT(encoder), "quality-level", 0, NULL);
    //g_object_set(G_OBJECT(encoder), "rc-mode", 0, NULL);
  }

  if (hardware->getHardwareName() == "tegrax2") {
    g_object_set(G_OBJECT(encoder), "preset-level", 0, NULL); // 0 == UltraFastPreset for high perf
  }

  setBitrate(bitrate);

  {
    GstElement *source;
    source = gst_bin_get_by_name(GST_BIN(pipeline), "source");
    if (!source) {
      qCritical("Failed to get source");
      return false;
    }

    g_object_set(G_OBJECT(source), "do-timestamp", true, NULL);

    if (videoSource == "videotestsrc") {
      g_object_set(G_OBJECT(source), "is-live", true, NULL);
    } else if (videoSource == "v4l2src") {
      //g_object_set(G_OBJECT(source), "always-copy", false, NULL);

      const char *camera = "/dev/video0";
      QByteArray env_camera = qgetenv("PLECO_SLAVE_CAMERA");
      if (!env_camera.isNull()) {
        camera = env_camera.data();
      }
      g_object_set(G_OBJECT(source), "device", camera, NULL);
    }

    if (hardware->getHardwareName() == "tegrak1" ||
        hardware->getHardwareName() == "tegrax1") {
      g_object_set(G_OBJECT(source), "io-mode", 1, NULL);
    }
  }


  sink = gst_bin_get_by_name(GST_BIN(pipeline), "sink");
  if (!sink) {
    qCritical("Failed to get sink");
    return false;
  }

  // Set appsink callbacks
  GstAppSinkCallbacks appSinkCallbacks;
  appSinkCallbacks.eos             = NULL;
  appSinkCallbacks.new_preroll     = NULL;
  appSinkCallbacks.new_sample      = &newBufferCB;

  gst_app_sink_set_callbacks(GST_APP_SINK(sink), &appSinkCallbacks, this, NULL);
#if USE_TEE
  // Callbacks for the OB process appsink
  ob = gst_bin_get_by_name(GST_BIN(pipeline), "ob");
  if (!ob) {
    qCritical("Failed to get ob appsink");
    return false;
  }

  // Set appsink callbacks
  GstAppSinkCallbacks obCallbacks;
  obCallbacks.eos             = NULL;
  obCallbacks.new_preroll     = NULL;
  obCallbacks.new_sample      = &newBufferOBCB;

  gst_app_sink_set_callbacks(GST_APP_SINK(ob), &obCallbacks, this, NULL);
#endif
  // Start running 
  gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);

  launchObjectDetection();

  return true;
}
bool GStreamerWrapper::open( std::string strFilename, bool bGenerateVideoBuffer, bool bGenerateAudioBuffer )
{
	if( m_bFileIsOpen )
	{
		stop();
		close();
	}

	// init property variables
	m_iNumVideoStreams = 0;
	m_iNumAudioStreams = 0;
	m_iCurrentVideoStream = 0;
	m_iCurrentAudioStream = 0;
	m_iWidth = m_iHeight = 0;
	m_iCurrentFrameNumber = 0;		// set to invalid, as it is not decoded yet
	m_dCurrentTimeInMs = 0;			// set to invalid, as it is not decoded yet
	m_bIsAudioSigned = false;
	m_bIsNewVideoFrame = false;
	m_iNumAudioChannels = 0;
	m_iAudioSampleRate = 0;
	m_iAudioBufferSize = 0;
	m_iAudioWidth = 0;
	m_AudioEndianness = LITTLE_ENDIAN;
	m_fFps = 0;
	m_dDurationInMs = 0;
	m_iNumberOfFrames = 0;

	m_fVolume = 1.0f;
	m_fSpeed = 1.0f;
	m_PlayDirection = FORWARD;
	m_CurrentPlayState = NOT_INITIALIZED;
	m_LoopMode = LOOP;
	m_strFilename = strFilename;

#ifdef THREADED_MESSAGE_HANDLER
		m_MsgHandlingThread = std::thread( std::bind( threadedMessageHandler, this ) );
#endif


	////////////////////////////////////////////////////////////////////////// PIPELINE
	// Init main pipeline --> playbin2
	m_GstPipeline = gst_element_factory_make( "playbin2", "pipeline" );

	// Check and re-arrange filename string
	if ( strFilename.find( "file:/", 0 ) == std::string::npos &&
		 strFilename.find( "file:///", 0 ) == std::string::npos &&
		 strFilename.find( "http://", 0 ) == std::string::npos )
	{
		strFilename = "file:/" + strFilename;
	}

	// Open Uri
	g_object_set( m_GstPipeline, "uri", strFilename.c_str(), NULL );


	////////////////////////////////////////////////////////////////////////// VIDEO SINK
	// Extract and Config Video Sink
	if ( bGenerateVideoBuffer )
	{
		// Create the video appsink and configure it
		m_GstVideoSink = gst_element_factory_make( "appsink", "videosink" );
		gst_base_sink_set_sync( GST_BASE_SINK( m_GstVideoSink ), true );
		gst_app_sink_set_max_buffers( GST_APP_SINK( m_GstVideoSink ), 8 );
		gst_app_sink_set_drop( GST_APP_SINK( m_GstVideoSink ),true );
		gst_base_sink_set_max_lateness( GST_BASE_SINK( m_GstVideoSink ), -1);

		// Set some fix caps for the video sink
		// It would seem that GStreamer then tries to transform any incoming video stream according to these caps
		GstCaps* caps = gst_caps_new_simple( "video/x-raw-rgb",
			"bpp", G_TYPE_INT, 24,
			"depth", G_TYPE_INT, 24,
			"endianness",G_TYPE_INT,4321,
			"red_mask",G_TYPE_INT,0xff0000,
			"green_mask",G_TYPE_INT,0x00ff00,
			"blue_mask",G_TYPE_INT,0x0000ff,
			"alpha_mask",G_TYPE_INT,0x000000ff,
			NULL );


		gst_app_sink_set_caps( GST_APP_SINK( m_GstVideoSink ), caps );
		gst_caps_unref( caps );

		// Set the configured video appsink to the main pipeline
		g_object_set( m_GstPipeline, "video-sink", m_GstVideoSink, (void*)NULL );
		// Tell the video appsink that it should not emit signals as the buffer retrieving is handled via callback methods
		g_object_set( m_GstVideoSink, "emit-signals", false, "sync", true, (void*)NULL );

		// Set Video Sink callback methods
		m_GstVideoSinkCallbacks.eos = &GStreamerWrapper::onEosFromVideoSource;
		m_GstVideoSinkCallbacks.new_preroll = &GStreamerWrapper::onNewPrerollFromVideoSource;
		m_GstVideoSinkCallbacks.new_buffer = &GStreamerWrapper::onNewBufferFromVideoSource;
		gst_app_sink_set_callbacks( GST_APP_SINK( m_GstVideoSink ), &m_GstVideoSinkCallbacks, this, NULL );
	}
	else
	{
#if defined _WIN32 // Use direct show as playback plugin if on Windows; Needed for features like play direction and playback speed to work correctly
		GstElement* videoSink = gst_element_factory_make( "directdrawsink", NULL );
		g_object_set( m_GstPipeline, "video-sink", videoSink, NULL );
#elif defined LINUX
		GstElement* videoSink = gst_element_factory_make( "xvimagesink", NULL );    //possible alternatives: ximagesink (no (gpu) fancy stuff) or better: cluttersink
		g_object_set( m_GstPipeline, "video-sink", videoSink, NULL );
#else // Use Mac OSX plugin otherwise
		GstElement* videoSink = gst_element_factory_make( "osxvideosink", NULL );
		g_object_set( m_GstPipeline, "video-sink", videoSink, NULL );
#endif
	}

	////////////////////////////////////////////////////////////////////////// AUDIO SINK
	// Extract and config Audio Sink
	if ( bGenerateAudioBuffer )
	{
		// Create and configure audio appsink
		m_GstAudioSink = gst_element_factory_make( "appsink", "audiosink" );
		gst_base_sink_set_sync( GST_BASE_SINK( m_GstAudioSink ), true );
		// Set the configured audio appsink to the main pipeline
		g_object_set( m_GstPipeline, "audio-sink", m_GstAudioSink, (void*)NULL );
		// Tell the video appsink that it should not emit signals as the buffer retrieving is handled via callback methods
		g_object_set( m_GstAudioSink, "emit-signals", false, "sync", true, (void*)NULL );

		// Set Audio Sink callback methods
		m_GstAudioSinkCallbacks.eos = &GStreamerWrapper::onEosFromAudioSource;
		m_GstAudioSinkCallbacks.new_preroll = &GStreamerWrapper::onNewPrerollFromAudioSource;
		m_GstAudioSinkCallbacks.new_buffer = &GStreamerWrapper::onNewBufferFromAudioSource;
		gst_app_sink_set_callbacks( GST_APP_SINK( m_GstAudioSink ), &m_GstAudioSinkCallbacks, this, NULL );
	}
	else
	{
#if defined _WIN32 // Use direct sound plugin if on Windows; Needed for features like play direction and playback speed to work correctly
		GstElement* audioSink = gst_element_factory_make( "directsoundsink", NULL );
		g_object_set ( m_GstPipeline, "audio-sink", audioSink, NULL );
#elif defined LINUX
		GstElement* audioSink = gst_element_factory_make( "pulsesink", NULL );  //alternative: alsasink
		g_object_set ( m_GstPipeline, "audio-sink", audioSink, NULL );
#else // Use Mac OSC plugin otherwise
		GstElement* audioSink = gst_element_factory_make( "osxaudiosink", NULL );
		g_object_set ( m_GstPipeline,"audio-sink", audioSink, NULL );
#endif
	}

	////////////////////////////////////////////////////////////////////////// BUS
	// Set GstBus
	m_GstBus = gst_pipeline_get_bus( GST_PIPELINE( m_GstPipeline ) );

	if ( m_GstPipeline != NULL )
	{
//just add this callback for threaded message handling
#ifdef THREADED_MESSAGE_HANDLER
		gst_bus_add_watch (m_GstBus, onHandleGstMessages, this );
#endif
		// We need to stream the file a little bit in order to be able to retrieve information from it
		gst_element_set_state( m_GstPipeline, GST_STATE_READY );
		gst_element_set_state( m_GstPipeline, GST_STATE_PAUSED );

		// For some reason this is needed in order to gather video information such as size, framerate etc ...
		GstState state;
		gst_element_get_state( m_GstPipeline, &state, NULL, 2 * GST_SECOND );
		m_CurrentPlayState = OPENED;
	}

	// Retrieve and store all relevant Media Information
	retrieveVideoInfo();

	if( !hasVideo() && !hasAudio() )	// is a valid multimedia file?
	{
		close();
		return false;
	}

	// Print Media Info
	printMediaFileInfo();

	// TODO: Check if everything was initialized correctly
	// A file has been opened
	m_bFileIsOpen = true;

	return true;
}
Ejemplo n.º 15
0
bool ofGstUtils::startPipeline(){

	bPaused 			= true;
	speed 				= 1.0f;


	if(gst_element_set_state (GST_ELEMENT(gstPipeline), GST_STATE_READY) ==	GST_STATE_CHANGE_FAILURE) {
		ofLogError("ofGstUtils") << "startPipeline(): unable to set pipeline to ready";
		return false;
	}
	if(gst_element_get_state (GST_ELEMENT(gstPipeline), NULL, NULL, 10 * GST_SECOND)==GST_STATE_CHANGE_FAILURE){
		ofLogError("ofGstUtils") << "startPipeline(): unable to get pipeline ready status";
		return false;
	}

	// pause the pipeline
	if(gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PAUSED) ==	GST_STATE_CHANGE_FAILURE) {
		ofLogError("ofGstUtils") << "startPipeline(): unable to pause pipeline";
		return false;
	}

	// wait for paused state to query the duration
	if(!isStream){
		GstState state = GST_STATE_PAUSED;
		if(gst_element_get_state(gstPipeline,&state,NULL,2*GST_SECOND)==GST_STATE_CHANGE_FAILURE){
			ofLogError("ofGstUtils") << "startPipeline(): unable to get pipeline paused state";
			return false;
		}
		bPlaying = true;
		bLoaded = true;
	}

	bus = gst_pipeline_get_bus (GST_PIPELINE(gstPipeline));

	if(bus){
		gst_bus_add_watch (bus, (GstBusFunc) busFunction, this);
	}



	if(isAppSink){
		ofLogVerbose("ofGstUtils") << "startPipeline(): attaching callbacks";
		// set the appsink to not emit signals, we are using callbacks instead
		// and frameByFrame to get buffers by polling instead of callback
		g_object_set (G_OBJECT (gstSink), "emit-signals", FALSE, "sync", !bFrameByFrame, (void*)NULL);
		//gst_app_sink_set_drop(GST_APP_SINK(gstSink),1);
		//gst_app_sink_set_max_buffers(GST_APP_SINK(gstSink),2);

		if(!bFrameByFrame){
			GstAppSinkCallbacks gstCallbacks;
			gstCallbacks.eos = &on_eos_from_source;
			gstCallbacks.new_preroll = &on_new_preroll_from_source;
#if GST_VERSION_MAJOR==0
			gstCallbacks.new_buffer = &on_new_buffer_from_source;
#else
			gstCallbacks.new_sample = &on_new_buffer_from_source;
#endif

			gst_app_sink_set_callbacks(GST_APP_SINK(gstSink), &gstCallbacks, this, NULL);
		}

	}

	if(!isStream){
		setSpeed(1.0);
	}


	return true;
}
bool GStreamerFramesReceiver::LoadVideo(char * URL)
{
	GstStateChangeReturn res;

	/* Initialize GStreamer */
	gst_init(NULL, NULL);

	/* Build the pipeline */
	GError *error = NULL;
	char * init_str = g_strdup_printf("rtspsrc location=%s latency=1000 drop-on-latency=false ! queue ! rtph264depay ! queue2 ! avdec_h264 ! queue2 ! appsink name=mysink", URL);
	pipeline = gst_parse_launch(init_str, &error);
	g_free(init_str);
	
	if (error)
	{
		gchar * message = g_strdup_printf("Unable to build pipeline: %s", error -> message);
		g_clear_error(&error);
		g_free(message);
		return false;
	}

	sink = gst_bin_get_by_name(GST_BIN(pipeline), "mysink");

	/* Instruct appsink to drop old buffers when the maximum amount of queued buffers is reached. */
	gst_app_sink_set_drop(GST_APP_SINK(sink), true);

	/* Set the maximum amount of buffers that can be queued in appsink.
	 * After this amount of buffers are queued in appsink, any more buffers
	 * will block upstream elements until a sample is pulled from appsink.
	 */
	gst_app_sink_set_max_buffers(GST_APP_SINK(sink), 1);		// number of queued recived buffers in appsink before updating new frame
	g_object_set(G_OBJECT(sink), "sync", TRUE, NULL);			// GST_OBJECT

	// Registering callbacks to appsink element
	GstAppSinkCallbacks callbacks = { on_eos, new_preroll, new_buffer, NULL };
	gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, NULL);

	res = gst_element_set_state (pipeline, GST_STATE_PLAYING);

	if (res == GST_STATE_CHANGE_FAILURE)
	{
		g_printerr ("Unable to set the pipeline to the playing state.\n");
		gst_object_unref (pipeline);
		pipeline = NULL;
		return false;
	}
	else if (res == GST_STATE_CHANGE_NO_PREROLL)
	{
		g_print ("live sources not supported yet\n");
		gst_object_unref (pipeline);
		pipeline = NULL;
		return false;
	}
	else if (res == GST_STATE_CHANGE_ASYNC)
	{
		// can happen when buffering occurs
		GstState current, pending;
		res = gst_element_get_state(GST_ELEMENT(pipeline), &current, &pending, GST_CLOCK_TIME_NONE);
		if(res == GST_STATE_CHANGE_FAILURE || res == GST_STATE_CHANGE_ASYNC)
		{
			g_printerr ("Unable to set the pipeline to the playing state.\n");
			gst_object_unref (pipeline);
			pipeline = NULL;
			return false;
		}
	}

	bool isFrameOK = false;

	/* get the preroll buffer from appsink, this block untils appsink really prerolls */
	GstSample * sample;
	g_signal_emit_by_name (sink, "pull-preroll", &sample, NULL);

	if (sample)
	{
		/* get the snapshot buffer format now. We set the caps on the appsink so
		 * that it can only be an rgb buffer. The only thing we have not specified
		 * on the caps is the height, which is dependant on the pixel-aspect-ratio
		 * of the source material
		 */
		GstCaps *caps = gst_sample_get_caps(sample);
		int width, height;
		PixelFormat pixelFormat;
		isFrameOK = ExtractImageParams(caps, width, height, pixelFormat);
		gst_sample_unref (sample);
	}

	if (!isFrameOK)
	{
		g_printerr ("Unable to get the snapshot buffer format.\n");
		gst_object_unref (pipeline);
		pipeline = NULL;
		return false;
	}
	
	mainLoopThread = g_thread_new("mainLoopThread", MainLoopThreadFunction, this);

	return true;
}
/*
 *      PsychGSCreateMovie() -- Create a movie object.
 *
 *      This function tries to open a moviefile (with or without audio/video tracks)
 *      and create an associated movie object for it.
 *
 *      win = Pointer to window record of associated onscreen window.
 *      moviename = char* with the name of the moviefile.
 *      preloadSecs = How many seconds of the movie should be preloaded/prefetched into RAM at movie open time?
 *      moviehandle = handle to the new movie.
 */
void PsychGSCreateMovie(PsychWindowRecordType *win, const char* moviename, double preloadSecs, int* moviehandle)
{
    GstCaps                     *colorcaps;
    GstElement			*theMovie = NULL;
    GMainLoop			*MovieContext = NULL;
    GstBus			*bus = NULL;
    GstFormat			fmt;
    GstElement      *videosink;
    gint64			length_format;
    GstPad			*pad, *peerpad;
    const GstCaps		*caps;
    GstStructure		*str;
    gint			width,height;
    gint			rate1, rate2;
    int				i, slotid;
    GError			*error = NULL;
    char			movieLocation[FILENAME_MAX];
    psych_bool			trueValue = TRUE;
    char			msgerr[10000];
    char			errdesc[1000];
    psych_bool			printErrors;

    // Suppress output of error-messages if moviehandle == 1000. That means we
    // run in our own Posix-Thread, not in the Matlab-Thread. Printing via Matlabs
    // printing facilities would likely cause a terrible crash.
    printErrors = (*moviehandle == -1000) ? FALSE : TRUE;
    
    // Set movie handle to "failed" initially:
    *moviehandle = -1;

    // We start GStreamer only on first invocation.
    if (firsttime) {        
        // Initialize GStreamer: The routine is defined in PsychVideoCaptureSupportGStreamer.c
		PsychGSCheckInit("movie playback");
        firsttime = FALSE;
    }

    if (win && !PsychIsOnscreenWindow(win)) {
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Provided windowPtr is not an onscreen window."); else return;
    }

    if (NULL == moviename) {
        if (printErrors) PsychErrorExitMsg(PsychError_internal, "NULL-Ptr instead of moviename passed!"); else return;
    }

    if (numMovieRecords >= PSYCH_MAX_MOVIES) {
        *moviehandle = -2;
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Allowed maximum number of simultaneously open movies exceeded!"); else return;
    }

    // Search first free slot in movieRecordBANK:
    for (i=0; (i < PSYCH_MAX_MOVIES) && (movieRecordBANK[i].theMovie); i++);
    if (i>=PSYCH_MAX_MOVIES) {
        *moviehandle = -2;
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Allowed maximum number of simultaneously open movies exceeded!"); else return;
    }

    // Slot slotid will contain the movie record for our new movie object:
    slotid=i;

    // Zero-out new record in moviebank:
    memset(&movieRecordBANK[slotid], 0, sizeof(PsychMovieRecordType));
    
    // Create name-string for moviename: If an URI qualifier is at the beginning,
    // we're fine and just pass the URI as-is. Otherwise we add the file:// URI prefix.
    if (strstr(moviename, "://") || ((strstr(moviename, "v4l") == moviename) && strstr(moviename, "//"))) {
	snprintf(movieLocation, sizeof(movieLocation)-1, "%s", moviename);
    } else {
	snprintf(movieLocation, sizeof(movieLocation)-1, "file:///%s", moviename);
    }
    strncpy(movieRecordBANK[slotid].movieLocation, movieLocation, FILENAME_MAX);
    strncpy(movieRecordBANK[slotid].movieName, moviename, FILENAME_MAX);

    // Create movie playback pipeline:
    theMovie = gst_element_factory_make ("playbin2", "ptbmovieplaybackpipeline");

    // Assign name of movie to play:
    g_object_set(G_OBJECT(theMovie), "uri", movieLocation, NULL);

    // Connect callback to about-to-finish signal: Signal is emitted as soon as
    // end of current playback iteration is approaching. The callback checks if
    // looped playback is requested. If so, it schedules a new playback iteration.
    g_signal_connect(G_OBJECT(theMovie), "about-to-finish", G_CALLBACK(PsychMovieAboutToFinishCB), &(movieRecordBANK[slotid]));

    // Assign message context, message bus and message callback for
    // the pipeline to report events and state changes, errors etc.:    
    MovieContext = g_main_loop_new (NULL, FALSE);
    movieRecordBANK[slotid].MovieContext = MovieContext;
    bus = gst_pipeline_get_bus(GST_PIPELINE(theMovie));
    // Didn't work: g_signal_connect (G_OBJECT(bus), "message::error", G_CALLBACK(PsychMessageErrorCB), NULL);
    //              g_signal_connect (G_OBJECT(bus), "message::warning", G_CALLBACK(PsychMessageErrorCB), NULL);
    gst_bus_add_watch(bus, PsychMovieBusCallback, &(movieRecordBANK[slotid]));
    gst_object_unref(bus);

    // Assign a fakesink named "ptbsink0" as destination video-sink for
    // all video content. This allows us to get hold of the video frame buffers for
    // converting them into PTB OpenGL textures:
    videosink = gst_element_factory_make ("appsink", "ptbsink0");
    if (!videosink) {
	printf("PTB-ERROR: Failed to create video-sink appsink ptbsink!\n");
	PsychGSProcessMovieContext(movieRecordBANK[slotid].MovieContext, TRUE);
	PsychErrorExitMsg(PsychError_system, "Opening the movie failed. Reason hopefully given above.");
    };

    movieRecordBANK[slotid].videosink = videosink;

    // Our OpenGL texture creation routine needs GL_BGRA8 data in G_UNSIGNED_8_8_8_8_REV
    // format, but the pipeline usually delivers YUV data in planar format. Therefore
    // need to perform colorspace/colorformat conversion. We build a little videobin
    // which consists of a ffmpegcolorspace converter plugin connected to our appsink
    // plugin which will deliver video data to us for conversion into textures.
    // The "sink" pad of the converter plugin is connected as the "sink" pad of our
    // videobin, and the videobin is connected to the video-sink output of the pipeline,
    // thereby receiving decoded video data. We place a videocaps filter inbetween the
    // converter and the appsink to enforce a color format conversion to the "colorcaps"
    // we need. colorcaps define the needed data format for efficient conversion into
    // a RGBA8 texture:
    colorcaps = gst_caps_new_simple (   "video/x-raw-rgb",
					"bpp", G_TYPE_INT, 32,
					"depth", G_TYPE_INT, 32,
					"alpha_mask", G_TYPE_INT, 0x000000FF,
					"red_mask", G_TYPE_INT,   0x0000FF00,
					"green_mask", G_TYPE_INT, 0x00FF0000,
					"blue_mask", G_TYPE_INT,  0xFF000000,
					NULL);

    /*
    // Old style method: Only left here for documentation to show how one can create
    // video sub-pipelines via bin's and connect them to each other via ghostpads: 

    GstElement *videobin = gst_bin_new ("video_output_bin");
    GstElement *videocon = gst_element_factory_make ("ffmpegcolorspace", "color_converter");
    gst_bin_add_many(GST_BIN(videobin), videocon, videosink, NULL);

    GstPad *ghostpad = gst_ghost_pad_new("Video_Ghostsink", gst_element_get_pad(videocon, "sink"));
    gst_element_add_pad(videobin, ghostpad);

    gst_element_link_filtered(videocon, videosink, colorcaps);

    // Assign our special videobin as video-sink of the pipeline:
    g_object_set(G_OBJECT(theMovie), "video-sink", videobin, NULL);
    */

    // New style method: Leaves the freedom of choice of color converter (if any)
    // to the auto-plugger.

    // Assign 'colorcaps' as caps to our videosink. This marks the videosink so
    // that it can only receive video image data in the format defined by colorcaps,
    // i.e., a format that is easy to consume for OpenGL's texture creation on std.
    // gpu's. It is the job of the video pipeline's autoplugger to plug in proper
    // color & format conversion plugins to satisfy videosink's needs.
    gst_app_sink_set_caps(GST_APP_SINK(videosink), colorcaps);

    // Assign our special appsink 'videosink' as video-sink of the pipeline:
    g_object_set(G_OBJECT(theMovie), "video-sink", videosink, NULL);
    gst_caps_unref(colorcaps);

    // Get the pad from the final sink for probing width x height of movie frames and nominal framerate of movie:	
    pad = gst_element_get_pad(videosink, "sink");

    PsychGSProcessMovieContext(movieRecordBANK[slotid].MovieContext, FALSE);

    // Should we preroll / preload?	
    if ((preloadSecs > 0) || (preloadSecs == -1)) {
	// Preload / Preroll the pipeline:
	if (!PsychMoviePipelineSetState(theMovie, GST_STATE_PAUSED, 30.0)) {
		PsychGSProcessMovieContext(movieRecordBANK[slotid].MovieContext, TRUE);
		PsychErrorExitMsg(PsychError_user, "In OpenMovie: Opening the movie failed. Reason given above.");
	}
    } else {
	// Ready the pipeline:
	if (!PsychMoviePipelineSetState(theMovie, GST_STATE_READY, 30.0)) {
		PsychGSProcessMovieContext(movieRecordBANK[slotid].MovieContext, TRUE);
		PsychErrorExitMsg(PsychError_user, "In OpenMovie: Opening the movie failed. Reason given above.");
	}    
    }

    // Query number of available video and audio tracks in movie:
    g_object_get (G_OBJECT(theMovie),
               "n-video", &movieRecordBANK[slotid].nrVideoTracks,
               "n-audio", &movieRecordBANK[slotid].nrAudioTracks,
                NULL);

    // We need a valid onscreen window handle for real video playback:
    if ((NULL == win) && (movieRecordBANK[slotid].nrVideoTracks > 0)) {
        if (printErrors) PsychErrorExitMsg(PsychError_user, "No windowPtr to an onscreen window provided. Must do so for movies with videotrack!"); else return;
    }
 
    PsychGSProcessMovieContext(movieRecordBANK[slotid].MovieContext, FALSE);

    PsychInitMutex(&movieRecordBANK[slotid].mutex);
    PsychInitCondition(&movieRecordBANK[slotid].condition, NULL);

    if (oldstyle) {
	// Install the probe callback for reception of video frames from engine at the sink-pad itself:
	gst_pad_add_buffer_probe(pad, G_CALLBACK(PsychHaveVideoDataCallback), &(movieRecordBANK[slotid]));
    } else {
	// Install callbacks used by the videosink (appsink) to announce various events:
	gst_app_sink_set_callbacks(GST_APP_SINK(videosink), &videosinkCallbacks, &(movieRecordBANK[slotid]), PsychDestroyNotifyCallback);
    }

    // Drop frames if callback can't pull buffers fast enough:
    // This together with the max queue lengths of 1 allows to
    // maintain audio-video sync by framedropping if needed.
    gst_app_sink_set_drop(GST_APP_SINK(videosink), TRUE);

    // Only allow one queued buffer before dropping:
    gst_app_sink_set_max_buffers(GST_APP_SINK(videosink), 1);

    // Assign harmless initial settings for fps and frame size:
    rate1 = 0;
    rate2 = 1;
    width = height = 0;

    // Videotrack available?
    if (movieRecordBANK[slotid].nrVideoTracks > 0) {
	// Yes: Query size and framerate of movie:
	peerpad = gst_pad_get_peer(pad);
	caps=gst_pad_get_negotiated_caps(peerpad);
	if (caps) {
		str=gst_caps_get_structure(caps,0);

		/* Get some data about the frame */
		rate1 = 1; rate2 = 1;
		gst_structure_get_fraction(str, "pixel-aspect-ratio", &rate1, &rate2);
		movieRecordBANK[slotid].aspectRatio = (double) rate1 / (double) rate2;
		gst_structure_get_int(str,"width",&width);
		gst_structure_get_int(str,"height",&height);
		rate1 = 0; rate2 = 1;
		gst_structure_get_fraction(str, "framerate", &rate1, &rate2);

	 } else {
		printf("PTB-DEBUG: No frame info available after preroll.\n");	
	 }
    }

    if (strstr(moviename, "v4l2:")) {
	// Special case: The "movie" is actually a video4linux2 live source.
	// Need to make parameters up for now, so it to work as "movie":
	rate1 = 30; width = 640; height = 480;
	movieRecordBANK[slotid].nrVideoTracks = 1;

	// Uglyness at its best ;-)
	if (strstr(moviename, "320")) { width = 320; height = 240; };
    }

    // Release the pad:
    gst_object_unref(pad);

    // Assign new record in moviebank:
    movieRecordBANK[slotid].theMovie = theMovie;
    movieRecordBANK[slotid].loopflag = 0;
    movieRecordBANK[slotid].frameAvail = 0;
    movieRecordBANK[slotid].imageBuffer = NULL;

    *moviehandle = slotid;

    // Increase counter:
    numMovieRecords++;

    // Compute basic movie properties - Duration and fps as well as image size:
    
    // Retrieve duration in seconds:
    fmt = GST_FORMAT_TIME;
    if (gst_element_query_duration(theMovie, &fmt, &length_format)) {
	// This returns nsecs, so convert to seconds:
    	movieRecordBANK[slotid].movieduration = (double) length_format / (double) 1e9;
	//printf("PTB-DEBUG: Duration of movie %i [%s] is %lf seconds.\n", slotid, moviename, movieRecordBANK[slotid].movieduration);
    } else {
	movieRecordBANK[slotid].movieduration = DBL_MAX;
	printf("PTB-WARNING: Could not query duration of movie %i [%s] in seconds. Returning infinity.\n", slotid, moviename);
    }

    // Assign expected framerate, assuming a linear spacing between frames:
    movieRecordBANK[slotid].fps = (double) rate1 / (double) rate2;
    //printf("PTB-DEBUG: Framerate fps of movie %i [%s] is %lf fps.\n", slotid, moviename, movieRecordBANK[slotid].fps);

    // Compute framecount from fps and duration:
    movieRecordBANK[slotid].nrframes = (int)(movieRecordBANK[slotid].fps * movieRecordBANK[slotid].movieduration + 0.5);
    //printf("PTB-DEBUG: Number of frames in movie %i [%s] is %i.\n", slotid, moviename, movieRecordBANK[slotid].nrframes);

    // Define size of images in movie:
    movieRecordBANK[slotid].width = width;
    movieRecordBANK[slotid].height = height;

    // Ready to rock!
    return;
}
Ejemplo n.º 18
0
QString Chromaprinter::CreateFingerprint() {
  Q_ASSERT(QThread::currentThread() != qApp->thread());

  buffer_.open(QIODevice::WriteOnly);

  GMainContext* context = g_main_context_new();
  g_main_context_push_thread_default(context);
  event_loop_ = g_main_loop_new(context, FALSE);

  pipeline_ = gst_pipeline_new("pipeline");
  GstElement* src = CreateElement("filesrc", pipeline_);
  GstElement* decode = CreateElement("decodebin2", pipeline_);
  GstElement* convert = CreateElement("audioconvert", pipeline_);
  GstElement* resample = CreateElement("audioresample", pipeline_);
  GstElement* sink = CreateElement("appsink", pipeline_);

  if (!src || !decode || !convert || !resample || !sink) {
    return QString();
  }

  convert_element_ = convert;

  // Connect the elements
  gst_element_link_many(src, decode, nullptr);
  gst_element_link_many(convert, resample, nullptr);

  // Chromaprint expects mono floats at a sample rate of 11025Hz.
  GstCaps* caps = gst_caps_new_simple(
      "audio/x-raw-int", "width", G_TYPE_INT, 16, "channels", G_TYPE_INT,
      kDecodeChannels, "rate", G_TYPE_INT, kDecodeRate, nullptr);
  gst_element_link_filtered(resample, sink, caps);
  gst_caps_unref(caps);

  GstAppSinkCallbacks callbacks;
  memset(&callbacks, 0, sizeof(callbacks));
  callbacks.new_buffer = NewBufferCallback;
  gst_app_sink_set_callbacks(reinterpret_cast<GstAppSink*>(sink), &callbacks,
                             this, nullptr);
  g_object_set(G_OBJECT(sink), "sync", FALSE, nullptr);
  g_object_set(G_OBJECT(sink), "emit-signals", TRUE, nullptr);

  // Set the filename
  g_object_set(src, "location", filename_.toUtf8().constData(), nullptr);

  // Connect signals
  CHECKED_GCONNECT(decode, "new-decoded-pad", &NewPadCallback, this);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this);
  guint bus_callback_id = gst_bus_add_watch(
      gst_pipeline_get_bus(GST_PIPELINE(pipeline_)), BusCallback, this);

  QTime time;
  time.start();

  // Start playing
  gst_element_set_state(pipeline_, GST_STATE_PLAYING);

  g_main_loop_run(event_loop_);
  g_main_loop_unref(event_loop_);
  g_main_context_unref(context);

  int decode_time = time.restart();

  buffer_.close();
  QByteArray data = buffer_.data();

  ChromaprintContext* chromaprint =
      chromaprint_new(CHROMAPRINT_ALGORITHM_DEFAULT);
  chromaprint_start(chromaprint, kDecodeRate, kDecodeChannels);
  chromaprint_feed(chromaprint, reinterpret_cast<void*>(data.data()),
                   data.size() / 2);
  chromaprint_finish(chromaprint);

  void* fprint = nullptr;
  int size = 0;
  int ret = chromaprint_get_raw_fingerprint(chromaprint, &fprint, &size);
  QByteArray fingerprint;
  if (ret == 1) {
    void* encoded = nullptr;
    int encoded_size = 0;
    chromaprint_encode_fingerprint(fprint, size, CHROMAPRINT_ALGORITHM_DEFAULT,
                                   &encoded, &encoded_size, 1);

    fingerprint.append(reinterpret_cast<char*>(encoded), encoded_size);

    chromaprint_dealloc(fprint);
    chromaprint_dealloc(encoded);
  }
  chromaprint_free(chromaprint);
  int codegen_time = time.elapsed();

  qLog(Debug) << "Decode time:" << decode_time
              << "Codegen time:" << codegen_time;

  // Cleanup
  callbacks.new_buffer = nullptr;
  gst_app_sink_set_callbacks(reinterpret_cast<GstAppSink*>(sink), &callbacks,
                             this, nullptr);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           nullptr, nullptr);
  g_source_remove(bus_callback_id);
  gst_element_set_state(pipeline_, GST_STATE_NULL);
  gst_object_unref(pipeline_);

  return fingerprint;
}
Ejemplo n.º 19
0
// init
bool gstCamera::init( gstCameraSrc src )
{
	GError* err = NULL;
	printf(LOG_GSTREAMER "gstCamera attempting to initialize with %s\n", gstCameraSrcToString(src));

	// build pipeline string
	if( !buildLaunchStr(src) )
	{
		printf(LOG_GSTREAMER "gstCamera failed to build pipeline string\n");
		return false;
	}

	// launch pipeline
	mPipeline = gst_parse_launch(mLaunchStr.c_str(), &err);

	if( err != NULL )
	{
		printf(LOG_GSTREAMER "gstCamera failed to create pipeline\n");
		printf(LOG_GSTREAMER "   (%s)\n", err->message);
		g_error_free(err);
		return false;
	}

	GstPipeline* pipeline = GST_PIPELINE(mPipeline);

	if( !pipeline )
	{
		printf(LOG_GSTREAMER "gstreamer failed to cast GstElement into GstPipeline\n");
		return false;
	}	

	// retrieve pipeline bus
	/*GstBus**/ mBus = gst_pipeline_get_bus(pipeline);

	if( !mBus )
	{
		printf(LOG_GSTREAMER "gstreamer failed to retrieve GstBus from pipeline\n");
		return false;
	}

	// add watch for messages (disabled when we poll the bus ourselves, instead of gmainloop)
	//gst_bus_add_watch(mBus, (GstBusFunc)gst_message_print, NULL);

	// get the appsrc
	GstElement* appsinkElement = gst_bin_get_by_name(GST_BIN(pipeline), "mysink");
	GstAppSink* appsink = GST_APP_SINK(appsinkElement);

	if( !appsinkElement || !appsink)
	{
		printf(LOG_GSTREAMER "gstreamer failed to retrieve AppSink element from pipeline\n");
		return false;
	}
	
	mAppSink = appsink;
	
	// setup callbacks
	GstAppSinkCallbacks cb;
	memset(&cb, 0, sizeof(GstAppSinkCallbacks));
	
	cb.eos         = onEOS;
	cb.new_preroll = onPreroll;
	cb.new_sample  = onBuffer;
	
	gst_app_sink_set_callbacks(mAppSink, &cb, (void*)this, NULL);
	
	return true;
}
Ejemplo n.º 20
0
/**
 * Set up the Gstreamer pipeline. Appsrc gets raw frames, and appsink takes
 * encoded frames.
 *
 * The pipeline looks like this:
 *
 * <pre>
 *  .--------.   .-----------.   .----------.
 *  | appsrc |   |  x264enc  |   | appsink  |
 *  |   .----|   |----.  .---|   |----.     |
 *  |   |src |-->|sink|  |src|-->|sink|-----+-->handoff
 *  |   '----|   |----'  '---|   |----'     |   handler
 *  '--------'   '-----------'   '----------'
 * </pre>
 */
static int pipeline_init(struct videnc_state *st, const struct vidsz *size)
{
	GstAppSrc *source;
	GstAppSink *sink;
	GstBus *bus;
	GError* gerror = NULL;
	char pipeline[1024];
	GstStateChangeReturn ret;
	int err = 0;

	if (!st || !size)
		return EINVAL;

	re_printf("    ~~~~ pipeline_init (%d x %d)\n", size->w, size->h);

	snprintf(pipeline, sizeof(pipeline),
	 "appsrc name=source is-live=TRUE block=TRUE "
	 "do-timestamp=TRUE max-bytes=1000000 ! "
	 "videoparse width=%d height=%d format=i420 framerate=%d/1 ! "
	 "x264enc byte-stream=TRUE rc-lookahead=0 "
	 "sync-lookahead=0 bitrate=%d ! "
	 "appsink name=sink emit-signals=TRUE drop=TRUE",
	 size->w, size->h,
	 st->encoder.fps, st->encoder.bitrate / 1000 /* kbit/s */);

	re_printf(
		  "------------------------------------------------\n"
		  "%s\n"
		  "------------------------------------------------\n"
		  ,
		  pipeline);

	/* Initialize pipeline. */
	st->streamer.pipeline = gst_parse_launch(pipeline, &gerror);

	if (gerror) {
		warning("gst_video: launch error: %d: %s: %s\n",
			gerror->code, gerror->message, pipeline);
		err = gerror->code;
		g_error_free(gerror);
		return err;
	}

	/* Configure appsource */
	source = GST_APP_SRC(gst_bin_get_by_name(
				 GST_BIN(st->streamer.pipeline), "source"));
	gst_app_src_set_callbacks(source, &(st->streamer.appsrcCallbacks),
			  st, (GDestroyNotify)appsrc_destroy_notify_cb);

	/* Configure appsink. */
	sink = GST_APP_SINK(gst_bin_get_by_name(
				GST_BIN(st->streamer.pipeline), "sink"));
	gst_app_sink_set_callbacks(sink, &(st->streamer.appsinkCallbacks),
			   st, (GDestroyNotify)appsink_destroy_notify_cb);
	gst_object_unref(GST_OBJECT(sink));

	/* Bus watch */
	bus = gst_pipeline_get_bus(GST_PIPELINE(st->streamer.pipeline));
	gst_bus_set_sync_handler(bus, (GstBusSyncHandler)bus_sync_handler_cb,
				 st, (GDestroyNotify)bus_destroy_notify_cb);
	gst_object_unref(GST_OBJECT(bus));

	/* Set start values of locks */
	pthread_mutex_lock(&st->streamer.wait.mutex);
	st->streamer.wait.flag = 0;
	pthread_mutex_unlock(&st->streamer.wait.mutex);

	pthread_mutex_lock(&st->streamer.eos.mutex);
	st->streamer.eos.flag = 0;
	pthread_mutex_unlock(&st->streamer.eos.mutex);

	/* Start pipeline */
	re_printf("    ~~~~ pipeline_init -- starting pipeline\n");

	ret = gst_element_set_state(st->streamer.pipeline, GST_STATE_PLAYING);
	if (GST_STATE_CHANGE_FAILURE == ret) {
		g_warning("set state returned GST_STATE_CHANGE_FAILURE\n");
		err = EPROTO;
		goto out;
	}

	st->streamer.source = source;

	/* Mark pipeline as working */
	st->streamer.valid = true;

	re_printf("    ~~~~ pipeline_init OK (source=%p, sink=%p)\n",
		  source, sink);

 out:
	return err;
}
Ejemplo n.º 21
0
// ----------------------------------------------------------------------------
// Handle the "pad-added" message
void
GStreamerImportFileHandle::OnPadAdded(GstPad *pad)
{
   // Retrieve the stream caps...skip stream if unavailable
   GstCaps *caps = gst_pad_get_current_caps(pad);
   if (!caps)
   {
      WARN(mPipeline, ("OnPadAdded: unable to retrieve stream caps"));
      return;
   }

   // Get the caps structure...no need to release
   GstStructure *str = gst_caps_get_structure(caps, 0);
   if (!str)
   {
      WARN(mPipeline, ("OnPadAdded: unable to retrieve caps structure"));
      gst_caps_unref(caps);
      return;
   }

   // Only accept audio streams...no need to release
   const gchar *name = gst_structure_get_name(str);
   if (!g_strrstr(name, "audio"))
   {
      WARN(mPipeline, ("OnPadAdded: bypassing '%s' stream", name));
      gst_caps_unref(caps);
      return;
   }

   // Allocate a new stream context
   GStreamContext *c = g_new0(GStreamContext, 1);
   if (!c)
   {
      WARN(mPipeline, ("OnPadAdded: unable to allocate stream context"));
      gst_caps_unref(caps);
      return;
   }

   // Set initial state
   c->mUse = true;

   // Always add it to the context list to keep the number of contexts
   // in sync with the number of streams
   g_mutex_lock(&mStreamsLock);
   g_ptr_array_add(mStreams, c);
   g_mutex_unlock(&mStreamsLock);

   // Need pointer to context during pad removal (pad-remove signal)
   SETCTX(pad, c);

   // Save the stream's start time and duration
   gst_pad_query_position(pad, GST_FORMAT_TIME, &c->mPosition);
   gst_pad_query_duration(pad, GST_FORMAT_TIME, &c->mDuration);

   // Retrieve the number of channels and validate
   gint channels = -1;
   gst_structure_get_int(str, "channels", &channels);
   if (channels <= 0)
   {
      WARN(mPipeline, ("OnPadAdded: channel count is invalid %d", channels));
      gst_caps_unref(caps);
      return;
   }
   c->mNumChannels = channels;

   // Retrieve the sample rate and validate
   gint rate = -1;
   gst_structure_get_int(str, "rate", &rate);
   if (rate <= 0)
   {
      WARN(mPipeline, ("OnPadAdded: sample rate is invalid %d", rate));
      gst_caps_unref(caps);
      return;
   }
   c->mSampleRate = (double) rate;

   c->mType = g_strdup(name);
   if (c->mType == NULL)
   {
      WARN(mPipeline, ("OnPadAdded: unable to allocate audio type"));
      gst_caps_unref(caps);
      return;
   }

   // Done with capabilities
   gst_caps_unref(caps);

   // Create audioconvert element
   c->mConv = gst_element_factory_make("audioconvert", NULL);
   if (!c->mConv)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create audioconvert element"));
      return;
   }

   // Create appsink element
   c->mSink = gst_element_factory_make("appsink", NULL);
   if (!c->mSink)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create appsink element"));
      return;
   }
   SETCTX(c->mSink, c);

   // Set the appsink callbacks and add the context pointer
   gst_app_sink_set_callbacks(GST_APP_SINK(c->mSink), &AppSinkCallbacks, this, NULL);

   // Set the capabilities that we desire
   caps = gst_static_caps_get(&supportedCaps);
   if (!caps)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create static caps"));
      return;
   }
   gst_app_sink_set_caps(GST_APP_SINK(c->mSink), caps);
   gst_caps_unref(caps);

   // Do not sync to the clock...process as quickly as possible
   gst_base_sink_set_sync(GST_BASE_SINK(c->mSink), FALSE);

   // Don't drop buffers...allow queue to build unfettered
   gst_app_sink_set_drop(GST_APP_SINK(c->mSink), FALSE);

   // Add both elements to the pipeline
   gst_bin_add_many(GST_BIN(mPipeline), c->mConv, c->mSink, NULL);

   // Link them together
   if (!gst_element_link(c->mConv, c->mSink))
   {
      WARN(mPipeline, ("OnPadAdded: failed to link autioconvert and appsink"));
      return;
   }

   // Link the audiconvert sink pad to the src pad
   GstPadLinkReturn ret = GST_PAD_LINK_OK;
   GstPad *convsink = gst_element_get_static_pad(c->mConv, "sink");
   if (convsink)
   {
      ret = gst_pad_link(pad, convsink);
      gst_object_unref(convsink);
   }
   if (!convsink || ret != GST_PAD_LINK_OK)
   {
      WARN(mPipeline, ("OnPadAdded: failed to link uridecodebin to audioconvert - %d", ret));
      return;
   }

   // Synchronize audioconvert state with parent
   if (!gst_element_sync_state_with_parent(c->mConv))
   {
      WARN(mPipeline, ("OnPadAdded: unable to sync audioconvert state"));
      return;
   }

   // Synchronize appsink state with parent
   if (!gst_element_sync_state_with_parent(c->mSink))
   {
      WARN(mPipeline, ("OnPadAdded: unable to sync appaink state"));
      return;
   }

   return;
}
Ejemplo n.º 22
0
// ----------------------------------------------------------------------------
// Import streams
int
GStreamerImportFileHandle::Import(TrackFactory *trackFactory,
                                  Track ***outTracks,
                                  int *outNumTracks,
                                  Tags *tags)
{
   // Save track factory pointer
   mTrackFactory = trackFactory;

   // Create the progrress dialog
   CreateProgress();

   // Block streams that are to be bypassed
   g_mutex_lock(&mStreamsLock);
   bool haveStreams = false;
   for (guint i = 0; i < mStreams->len; i++)
   {
      GStreamContext *c = (GStreamContext *) g_ptr_array_index(mStreams, i);

      // Did the user choose to skip this stream?
      if (!c->mUse)
      {
         // Get the audioconvert sink pad and unlink
         GstPad *convsink = gst_element_get_static_pad(c->mConv, "sink");
         GstPad *convpeer = gst_pad_get_peer(convsink);
         gst_pad_unlink(convpeer, convsink);
         gst_object_unref(convpeer);

         // Set bitbucket callbacks so the prerolled sample won't get processed
         // when we change the state to PLAYING
         gst_app_sink_set_callbacks(GST_APP_SINK(c->mSink), &AppSinkBitBucket, this, NULL);

         // Set state to playing for conv and sink so EOS gets processed
         gst_element_set_state(c->mConv, GST_STATE_PLAYING);
         gst_element_set_state(c->mSink, GST_STATE_PLAYING);

         // Send an EOS event to the pad to force them to drain
         gst_pad_send_event(convsink, gst_event_new_eos());

         // Resync state with pipeline
         gst_element_sync_state_with_parent(c->mConv);
         gst_element_sync_state_with_parent(c->mSink);

         // Done with the pad
         gst_object_unref(convsink);

         // Unlink audioconvert and appsink
         gst_element_unlink(c->mConv, c->mSink);

         // Remove them from the bin
         gst_bin_remove_many(GST_BIN(mPipeline), c->mConv, c->mSink, NULL);

         // All done with them
         c->mConv = NULL;
         c->mSink = NULL;

         continue;
      }

      // We have a stream to process
      haveStreams = true;
   }
   g_mutex_unlock(&mStreamsLock);

   // Can't do much if we don't have any streams to process
   if (!haveStreams)
   {
      wxMessageBox(wxT("File doesn't contain any audio streams."),
                   wxT("GStreamer Importer"));
      return eProgressFailed;
   }

   // Get the ball rolling...
   GstStateChangeReturn state = gst_element_set_state(mPipeline, GST_STATE_PLAYING);
   if (state == GST_STATE_CHANGE_FAILURE)
   {
      wxMessageBox(wxT("Unable to import file, state change failed."),
                   wxT("GStreamer Importer"));
      return eProgressFailed;
   }

   // Get the duration of the stream
   gint64 duration;
   gst_element_query_duration(mPipeline, GST_FORMAT_TIME, &duration);

   // Handle bus messages and update progress while files is importing
   bool success = true;
   int updateResult = eProgressSuccess;
   while (ProcessBusMessage(success) && success && updateResult == eProgressSuccess)
   {
      gint64 position;

      // Update progress indicator and give user chance to abort
      if (gst_element_query_position(mPipeline, GST_FORMAT_TIME, &position))
      {
         updateResult = mProgress->Update((wxLongLong_t) position,
                                          (wxLongLong_t) duration);
      }
   }

   // Disable pipeline
   gst_element_set_state(mPipeline, GST_STATE_NULL);

   // Something bad happened
   if (!success || updateResult == eProgressFailed || updateResult == eProgressCancelled)
   {
      return updateResult;
   }

   // Grah the streams lock
   g_mutex_lock(&mStreamsLock);

   // Count the total number of tracks collected
   *outNumTracks = 0;
   for (guint s = 0; s < mStreams->len; s++)
   {
      GStreamContext *c = (GStreamContext*)g_ptr_array_index(mStreams, s);
      if (c->mChannels)
      {
         *outNumTracks += c->mNumChannels;
      }
   }

   // Create new tracks
   *outTracks = new Track *[*outNumTracks];

   // Copy audio from mChannels to newly created tracks (destroying mChannels in process)
   int trackindex = 0;
   for (guint s = 0; s < mStreams->len; s++)
   {
      GStreamContext *c = (GStreamContext*)g_ptr_array_index(mStreams, s);
      if (c->mChannels)
      {
         for (int ch = 0; ch < c->mNumChannels; ch++)
         {
            c->mChannels[ch]->Flush();
            (*outTracks)[trackindex++] = c->mChannels[ch];
         }

         delete [] c->mChannels;
         c->mChannels = NULL;
      }
   }
   g_mutex_unlock(&mStreamsLock);

   // Set any tags found in the stream
   *tags = mTags;

   return updateResult;
}
Ejemplo n.º 23
0
// A call to this callback indicates that this interface module will be
// a talker. Any talker initialization can be done in this function.
void openavbIntfMpeg2tsGstTxInitCB(media_q_t *pMediaQ)
{
	AVB_TRACE_ENTRY(AVB_TRACE_INTF_DETAIL);

	if (pMediaQ)
	{
		pvt_data_t *pPvtData = pMediaQ->pPvtIntfInfo;
		if (!pPvtData)
		{
			AVB_LOG_ERROR("Private interface module data not allocated.");
			return;
		}

		pPvtData->pipe = (GstElement*)NULL;
		pPvtData->appsink = (GstAppSink*)NULL;
		pPvtData->appsrc = (GstAppSrc*)NULL;
		pPvtData->bus = (GstBus*)NULL;
		pPvtData->nWaiting = 0;

		GError *error = NULL;
		pPvtData->pipe = gst_parse_launch(pPvtData->pPipelineStr, &error);
		if (error)
		{
			AVB_LOGF_ERROR("Error creating pipeline: %s", error->message);
			return;
		}

		AVB_LOGF_INFO("Pipeline: %s", pPvtData->pPipelineStr);
		pPvtData->appsink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(pPvtData->pipe), APPSINK_NAME));
		if (!pPvtData->appsink)
		{
			AVB_LOG_ERROR("Failed to find appsink element");
			return;
		}

		// create bus
		pPvtData->bus = gst_pipeline_get_bus(GST_PIPELINE(pPvtData->pipe));
		if (!pPvtData->bus)
		{
			AVB_LOG_ERROR("Failed to create bus");
			return;
		}

		/* add callback for bus messages */
		gst_bus_add_watch(pPvtData->bus, (GstBusFunc)bus_message, pMediaQ);

		// Setup callback function to handle new buffers delivered to sink
		GstAppSinkCallbacks cbfns;
		memset(&cbfns, 0, sizeof(GstAppSinkCallbacks));

		gst_al_set_callback(&cbfns, sinkNewBufferSample);

		gst_app_sink_set_callbacks(pPvtData->appsink, &cbfns, (gpointer)(pMediaQ), NULL);

		// Set most capabilities in pipeline (config), not code

		// Don't drop buffers
		g_object_set(pPvtData->appsink, "drop", 0, NULL);

		// Start playing
		gst_element_set_state(pPvtData->pipe, GST_STATE_PLAYING);
	}

	AVB_TRACE_EXIT(AVB_TRACE_INTF_DETAIL);
}
Ejemplo n.º 24
0
bool ofGstUtils::startPipeline(){

	bPaused 			= true;
	speed 				= 1.0f;


	GstBus * bus = gst_pipeline_get_bus (GST_PIPELINE(gstPipeline));

	if(bus){
		busWatchID = gst_bus_add_watch (bus, (GstBusFunc) busFunction, this);
	}

	gst_object_unref(bus);

	if(isAppSink){
		ofLogVerbose("ofGstUtils") << "startPipeline(): attaching callbacks";
		// set the appsink to not emit signals, we are using callbacks instead
		// and frameByFrame to get buffers by polling instead of callback
		g_object_set (G_OBJECT (gstSink), "emit-signals", FALSE, "sync", !bFrameByFrame, (void*)NULL);
		// gst_app_sink_set_drop(GST_APP_SINK(gstSink),1);
		// gst_app_sink_set_max_buffers(GST_APP_SINK(gstSink),2);

		if(!bFrameByFrame){
			GstAppSinkCallbacks gstCallbacks;
			gstCallbacks.eos = &on_eos_from_source;
			gstCallbacks.new_preroll = &on_new_preroll_from_source;
#if GST_VERSION_MAJOR==0
			gstCallbacks.new_buffer = &on_new_buffer_from_source;
#else
			gstCallbacks.new_sample = &on_new_buffer_from_source;
#endif

			gst_app_sink_set_callbacks(GST_APP_SINK(gstSink), &gstCallbacks, this, NULL);
		}
	}

	// pause the pipeline
	//GstState targetState;
	GstState state;
	auto ret = gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PAUSED);
    switch (ret) {
		case GST_STATE_CHANGE_FAILURE:
			ofLogError("ofGstUtils") << "startPipeline(): unable to pause pipeline";
			return false;
			break;
		case GST_STATE_CHANGE_NO_PREROLL:
			ofLogVerbose() << "Pipeline is live and does not need PREROLL waiting PLAY";
			gst_element_set_state(GST_ELEMENT(gstPipeline), GST_STATE_PLAYING);
			if(isAppSink){
				gst_app_sink_set_max_buffers(GST_APP_SINK(gstSink),1);
				gst_app_sink_set_drop (GST_APP_SINK(gstSink),true);
			}
			break;
		case GST_STATE_CHANGE_ASYNC:
			ofLogVerbose() << "Pipeline is PREROLLING";
			//targetState = GST_STATE_PAUSED;
			if(!isStream && gst_element_get_state(gstPipeline,&state,NULL,5*GST_SECOND)!=GST_STATE_CHANGE_SUCCESS){
				ofLogError("ofGstUtils") << "startPipeline(): unable to pause pipeline after 5s";
				return false;
			}else{
				ofLogVerbose() << "Pipeline is PREROLLED";
			}
			break;
		case GST_STATE_CHANGE_SUCCESS:
			ofLogVerbose() << "Pipeline is PREROLLED";
			break;
    }

	// wait for paused state to query the duration
	if(!isStream){
		bPlaying = true;
		bLoaded = true;
	}

	return true;
}
/* callback called when demux creates a src pad.
 * We will create an AppSink to get the data
 */
static void
on_demuxNewPad (GstElement * demux, GstPad * pad, gpointer user_data)
{
  GstAdaptiveDemuxTestEnginePrivate *priv =
      (GstAdaptiveDemuxTestEnginePrivate *) user_data;
  GstElement *pipeline;
  GstElement *sink;
  gboolean ret;
  gchar *name;
  GstPad *appsink_pad;
  GstAppSinkCallbacks appSinkCallbacks;
  GstAdaptiveDemuxTestOutputStream *stream;
  GObjectClass *gobject_class;

  fail_unless (priv != NULL);
  name = gst_pad_get_name (pad);

  stream = g_slice_new0 (GstAdaptiveDemuxTestOutputStream);
  GST_DEBUG ("created pad %p", pad);

  sink = gst_element_factory_make ("appsink", name);
  g_free (name);
  fail_unless (sink != NULL);

  GST_TEST_LOCK (priv);

  /* register the AppSink pointer in the test output data */
  gst_object_ref (sink);
  stream->appsink = GST_APP_SINK (sink);

  appSinkCallbacks.eos = on_appSinkEOS;
  appSinkCallbacks.new_preroll = NULL;
  appSinkCallbacks.new_sample = on_appSinkNewSample;

  gst_app_sink_set_callbacks (GST_APP_SINK (sink), &appSinkCallbacks, priv,
      NULL);
  appsink_pad = gst_element_get_static_pad (sink, "sink");
  gst_pad_add_probe (appsink_pad,
      GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH,
      (GstPadProbeCallback) on_appsink_event, priv, NULL);
  gst_object_unref (appsink_pad);

  gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
      (GstPadProbeCallback) on_demux_sent_data, priv, NULL);
  gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM |
      GST_PAD_PROBE_TYPE_EVENT_FLUSH,
      (GstPadProbeCallback) on_demux_sent_event, priv, NULL);
  gobject_class = G_OBJECT_GET_CLASS (sink);
  if (g_object_class_find_property (gobject_class, "sync")) {
    GST_DEBUG ("Setting sync=FALSE on AppSink");
    g_object_set (G_OBJECT (sink), "sync", FALSE, NULL);
  }
  stream->pad = gst_object_ref (pad);


  g_ptr_array_add (priv->engine.output_streams, stream);
  GST_TEST_UNLOCK (priv);

  pipeline = GST_ELEMENT (gst_element_get_parent (demux));
  fail_unless (pipeline != NULL);
  ret = gst_bin_add (GST_BIN (pipeline), sink);
  fail_unless_equals_int (ret, TRUE);
  gst_object_unref (pipeline);
  ret = gst_element_link (demux, sink);
  fail_unless_equals_int (ret, TRUE);
  ret = gst_element_sync_state_with_parent (sink);
  fail_unless_equals_int (ret, TRUE);
  GST_TEST_LOCK (priv);
  if (priv->callbacks->demux_pad_added) {
    priv->callbacks->demux_pad_added (&priv->engine, stream, priv->user_data);
  }
  GST_TEST_UNLOCK (priv);
}
Ejemplo n.º 26
0
gboolean streamer_run(guint in_framerate, guint out_width, guint out_height, const gchar* out_fname) {
	gboolean tcp = !g_strcmp0(out_fname, "tcp"),
			toapp = !g_strcmp0(out_fname, "app"),
			res;
	static GstAppSinkCallbacks callbacks = {on_eos, NULL, on_new_preroll};

	app.outfile = fopen("out.jpeg", "wb");

	g_cond_init(&app.cond);
	app.loop = g_main_loop_new(NULL, FALSE);
	app.in_width = 0;
	app.in_height = 0;
	app.out_width = out_width;
	app.out_height = out_height;
	app.in_framerate = in_framerate;
	app.resize = FALSE;
	app.have_data = FALSE;

	app.pipeline = gst_pipeline_new("video-streamer");
	app.source = gst_element_factory_make("appsrc", "app-src");
	app.scale = gst_element_factory_make("videoscale", "video-scale");
	app.capsfilter = gst_element_factory_make("capsfilter", "caps-filter");
	app.conv = gst_element_factory_make("jpegenc", "jpeg-converter");
	if(toapp) {
		app.sink = gst_element_factory_make("appsink", "app-sink");
		gst_app_sink_set_callbacks(GST_APP_SINK(app.sink), &callbacks, NULL, NULL);
		// gst_app_sink_set_drop(GST_APP_SINK(app.sink), TRUE);
		// g_object_set (G_OBJECT (app.sink), "caps",
  // 			gst_caps_new_simple ("video/x-raw",
		// 				 "format", G_TYPE_STRING, "RGB",
		// 				 "width", G_TYPE_INT, out_width,
		// 				 "height", G_TYPE_INT, out_height,
		// 				 "framerate", GST_TYPE_FRACTION, in_framerate, 1,
		// 				 NULL), NULL);
	} else if(tcp) {
		app.sink = gst_element_factory_make("tcpserversink", "tcp-sink");
	} else {
		app.mux = gst_element_factory_make("avimux", "avi-muxer");
		g_assert(app.mux);
		app.sink = gst_element_factory_make("filesink", "file-sink");
	}

	g_assert(app.pipeline);
	g_assert(app.source);
	g_assert(app.scale);
	g_assert(app.capsfilter);
	g_assert(app.conv);
	g_assert(app.sink);

	app.bus = gst_pipeline_get_bus(GST_PIPELINE(app.pipeline));
	app.bus_watch_id = gst_bus_add_watch(app.bus, bus_call, app.loop);
	gst_object_unref(app.bus);

	g_object_set(G_OBJECT(app.source),
		"stream-type", 0,
		"format", GST_FORMAT_TIME, NULL);
	g_signal_connect(app.source, "need-data", G_CALLBACK(cb_need_data), NULL);
	//g_signal_connect(source, "enough-data", G_CALLBACK(stop_feed), NULL);

	g_object_set(G_OBJECT(app.source), "caps",
		gst_caps_new_simple("video/x-raw",
		"format", G_TYPE_STRING, "RGB",
		"width", G_TYPE_INT, out_width,
		"height", G_TYPE_INT, out_height,
		"framerate", GST_TYPE_FRACTION, in_framerate, 1,
		NULL), NULL);
	if(tcp || toapp) {
		gst_bin_add_many(GST_BIN(app.pipeline),
			app.source, app.scale, app.capsfilter, app.conv, app.sink, NULL);
	} else {
		g_object_set(G_OBJECT(app.sink), "location", out_fname, NULL);
		gst_bin_add_many(GST_BIN(app.pipeline),
			app.source, app.scale, app.capsfilter, app.conv,  app.mux, app.sink, NULL);
	}

	g_object_set(G_OBJECT(app.capsfilter), "caps",
		gst_caps_new_simple("video/x-raw",
		"format", G_TYPE_STRING, "RGB",
		"width", G_TYPE_INT, out_width,
		"height", G_TYPE_INT, out_height,
		"framerate", GST_TYPE_FRACTION, in_framerate, 1,
		NULL), NULL);
	if(tcp || toapp)
		res = gst_element_link_many(app.source, app.scale, app.capsfilter,
			app.conv, app.sink, NULL);
	else
		res = gst_element_link_many(app.source, app.scale, app.capsfilter,
			app.conv, app.mux, app.sink, NULL);

	if(!res) {
		g_printerr("ERROR: linking failed\n");
		return FALSE;
	}

	gst_element_set_state(app.pipeline, GST_STATE_PLAYING);

	g_debug("Running...\n");

	if ((app.m_loop_thread = g_thread_new("mainloop", (GThreadFunc)main_loop_run, NULL)) == NULL){
		g_printerr("ERROR: cannot start loop thread\n");
		return FALSE;
	}

	return TRUE;
}
Ejemplo n.º 27
0
static gint create_encoder_pipeline (Encoder *encoder)
{
        GstElement *pipeline, *element;
        Bin *bin;
        Link *link;
        GSList *bins, *links, *elements;
        GstElementFactory *element_factory;
        GType type;
        EncoderStream *stream;
        GstAppSrcCallbacks callbacks = {
                need_data_callback,
                NULL,
                NULL
        };
        GstAppSinkCallbacks encoder_appsink_callbacks = {
                NULL,
                NULL,
                new_sample_callback
        };
        GstCaps *caps;
        GstBus *bus;
 
        pipeline = gst_pipeline_new (NULL);

        /* add element to pipeline first. */
        bins = encoder->bins;
        while (bins != NULL) {
                bin = bins->data;
                elements = bin->elements;
                while (elements != NULL) {
                        element = elements->data;
                        if (!gst_bin_add (GST_BIN (pipeline), element)) {
                                GST_ERROR ("add element %s to bin %s error.", gst_element_get_name (element), bin->name);
                                return 1;
                        }
                        elements = g_slist_next (elements);
                }
                bins = g_slist_next (bins);
        }

        /* then links element. */
        bins = encoder->bins;
        while (bins != NULL) {
                bin = bins->data;
                element = bin->first;
                element_factory = gst_element_get_factory (element);
                type = gst_element_factory_get_element_type (element_factory);
                stream = NULL;
                if (g_strcmp0 ("GstAppSrc", g_type_name (type)) == 0) {
                        GST_INFO ("Encoder appsrc found.");
                        stream = encoder_get_stream (encoder, bin->name);
                        gst_app_src_set_callbacks (GST_APP_SRC (element), &callbacks, stream, NULL);
                }
                element = bin->last;
                element_factory = gst_element_get_factory (element);
                type = gst_element_factory_get_element_type (element_factory);
                if ((g_strcmp0 ("GstAppSink", g_type_name (type)) == 0) ||
                    (g_strcmp0 ("GstHlsSink", g_type_name (type)) == 0) ||
                    (g_strcmp0 ("GstFileSink", g_type_name (type)) == 0)) {
                        GstPad *pad;

                        if (g_strcmp0 ("GstAppSink", g_type_name (type)) == 0) {
                                GST_INFO ("Encoder appsink found.");
                                gst_app_sink_set_callbacks (GST_APP_SINK (element), &encoder_appsink_callbacks, encoder, NULL);
                        }
                        pad = gst_element_get_static_pad (element, "sink");
                        gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM, encoder_appsink_event_probe, encoder, NULL);
                }
                links = bin->links;
                while (links != NULL) {
                        link = links->data;
                        GST_INFO ("link element: %s -> %s", link->src_name, link->sink_name);
                        if (link->caps != NULL) {
                                caps = gst_caps_from_string (link->caps);
                                gst_element_link_filtered (link->src, link->sink, caps);
                                gst_caps_unref (caps);

                        } else {
                                gst_element_link (link->src, link->sink);
                        }
                        links = g_slist_next (links);
                }
                bins = g_slist_next (bins);
        }
        bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
        gst_bus_add_watch (bus, bus_callback, encoder);
        g_object_unref (bus);
        encoder->pipeline = pipeline;

        return 0;
}
Ejemplo n.º 28
0
void ofxGstRTPServer::play(){
	// pass the pipeline to the gstUtils so it starts everything
	gst.setPipelineWithSink(pipelineStr,"",true);

	// get the rtp and rtpc elements from the pipeline so we can read their properties
	// during execution
	rtpbin = gst.getGstElementByName("rtpbin");
	vRTPsink = gst.getGstElementByName("vrtpsink");
	vRTPCsink = gst.getGstElementByName("vrtcpsink");
	vRTPCsrc = gst.getGstElementByName("vrtcpsrc");

	aRTPsink = gst.getGstElementByName("artpsink");
	aRTPCsink = gst.getGstElementByName("artcpsink");
	aRTPCsrc = gst.getGstElementByName("artcpsrc");

	dRTPsink = gst.getGstElementByName("drtpsink");
	dRTPCsink = gst.getGstElementByName("drtcpsink");
	dRTPCsrc = gst.getGstElementByName("drtcpsrc");

	oRTPsink = gst.getGstElementByName("ortpsink");
	oRTPCsink = gst.getGstElementByName("ortcpsink");
	oRTPCsrc = gst.getGstElementByName("ortcpsrc");

	vEncoder = gst.getGstElementByName("vencoder");
	dEncoder = gst.getGstElementByName("dencoder");
	aEncoder = gst.getGstElementByName("aencoder");
	appSrcVideoRGB = gst.getGstElementByName("appsrcvideo");
	appSrcDepth = gst.getGstElementByName("appsrcdepth");
	appSrcOsc = gst.getGstElementByName("appsrcosc");

#if ENABLE_ECHO_CANCEL
	if(echoCancel && audioChannelReady){
		appSrcAudio = gst.getGstElementByName("audioechosrc");
		if(appSrcAudio){
			gst_app_src_set_stream_type((GstAppSrc*)appSrcAudio,GST_APP_STREAM_TYPE_STREAM);
		}

		#ifdef TARGET_LINUX
			gstAudioIn.setPipelineWithSink("pulsesrc stream-properties=\"props,media.role=phone\" name=audiocapture ! audio/x-raw,format=S16LE,rate=44100,channels=1 ! audioresample ! audioconvert ! audio/x-raw,format=S16LE,rate=32000,channels=1 ! appsink name=audioechosink");
			volume = gstAudioIn.getGstElementByName("audiocapture");
		#elif defined(TARGET_OSX)
			// for osx we specify the output format since osxaudiosrc doesn't report the formats supported by the hw
			// FIXME: we should detect the format somehow and set it automatically
			gstAudioIn.setPipelineWithSink("osxaudiosrc name=audiocapture ! audio/x-raw,rate=44100,channels=1 ! volume name=volume ! audioresample ! audioconvert ! audio/x-raw,format=S16LE,rate=32000,channels=1 ! appsink name=audioechosink");
			volume = gstAudioIn.getGstElementByName("volume");
		#endif

		appSinkAudio = gstAudioIn.getGstElementByName("audioechosink");
		audiocapture = gstAudioIn.getGstElementByName("audiocapture");

		// set callbacks to receive audio data
		GstAppSinkCallbacks gstCallbacks;
		gstCallbacks.eos = &on_eos_from_audio;
		gstCallbacks.new_preroll = &on_new_preroll_from_audio;
		gstCallbacks.new_sample = &on_new_buffer_from_audio;
		gst_app_sink_set_callbacks(GST_APP_SINK(appSinkAudio), &gstCallbacks, this, NULL);
		gst_app_sink_set_emit_signals(GST_APP_SINK(appSinkAudio),0);
	}
#endif

#if ENABLE_NAT_TRANSVERSAL
	if(videoStream){
		g_object_set(G_OBJECT(vRTPsink),"agent",videoStream->getAgent(),"stream",videoStream->getStreamID(),"component",1,NULL);
		g_object_set(G_OBJECT(vRTPCsink),"agent",videoStream->getAgent(),"stream",videoStream->getStreamID(),"component",2,NULL);
		g_object_set(G_OBJECT(vRTPCsrc),"agent",videoStream->getAgent(),"stream",videoStream->getStreamID(),"component",3,NULL);
	}
	if(depthStream){
		g_object_set(G_OBJECT(dRTPsink),"agent",depthStream->getAgent(),"stream",depthStream->getStreamID(),"component",1,NULL);
		g_object_set(G_OBJECT(dRTPCsink),"agent",depthStream->getAgent(),"stream",depthStream->getStreamID(),"component",2,NULL);
		g_object_set(G_OBJECT(dRTPCsrc),"agent",depthStream->getAgent(),"stream",depthStream->getStreamID(),"component",3,NULL);
	}
	if(audioStream){
		g_object_set(G_OBJECT(aRTPsink),"agent",audioStream->getAgent(),"stream",audioStream->getStreamID(),"component",1,NULL);
		g_object_set(G_OBJECT(aRTPCsink),"agent",audioStream->getAgent(),"stream",audioStream->getStreamID(),"component",2,NULL);
		g_object_set(G_OBJECT(aRTPCsrc),"agent",audioStream->getAgent(),"stream",audioStream->getStreamID(),"component",3,NULL);
	}
	if(oscStream){
		g_object_set(G_OBJECT(oRTPsink),"agent",oscStream->getAgent(),"stream",oscStream->getStreamID(),"component",1,NULL);
		g_object_set(G_OBJECT(oRTPCsink),"agent",oscStream->getAgent(),"stream",oscStream->getStreamID(),"component",2,NULL);
		g_object_set(G_OBJECT(oRTPCsrc),"agent",oscStream->getAgent(),"stream",oscStream->getStreamID(),"component",3,NULL);
	}
#endif


	if(appSrcVideoRGB) gst_app_src_set_stream_type((GstAppSrc*)appSrcVideoRGB,GST_APP_STREAM_TYPE_STREAM);
	if(appSrcDepth) gst_app_src_set_stream_type((GstAppSrc*)appSrcDepth,GST_APP_STREAM_TYPE_STREAM);
	if(appSrcOsc) gst_app_src_set_stream_type((GstAppSrc*)appSrcOsc,GST_APP_STREAM_TYPE_STREAM);

	g_signal_connect(rtpbin,"on-new-ssrc",G_CALLBACK(&ofxGstRTPServer::on_new_ssrc_handler),this);

#if ENABLE_ECHO_CANCEL
	if(echoCancel && audioChannelReady){
		gstAudioIn.startPipeline();
		gstAudioIn.play();
	}
#endif

	gst.startPipeline();
	gst.play();

	ofAddListener(ofEvents().update,this,&ofxGstRTPServer::update);
}