static gboolean
gst_rtp_mp4g_pay_setcaps (GstRTPBasePayload * payload, GstCaps * caps)
{
  GstRtpMP4GPay *rtpmp4gpay;
  GstStructure *structure;
  const GValue *codec_data;
  const gchar *media_type = NULL;
  gboolean res;

  rtpmp4gpay = GST_RTP_MP4G_PAY (payload);

  structure = gst_caps_get_structure (caps, 0);

  codec_data = gst_structure_get_value (structure, "codec_data");
  if (codec_data) {
    GST_LOG_OBJECT (rtpmp4gpay, "got codec_data");
    if (G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) {
      GstBuffer *buffer;
      const gchar *name;

      buffer = gst_value_get_buffer (codec_data);
      GST_LOG_OBJECT (rtpmp4gpay, "configuring codec_data");

      name = gst_structure_get_name (structure);

      /* parse buffer */
      if (!strcmp (name, "audio/mpeg")) {
        res = gst_rtp_mp4g_pay_parse_audio_config (rtpmp4gpay, buffer);
        media_type = "audio";
      } else if (!strcmp (name, "video/mpeg")) {
        res = gst_rtp_mp4g_pay_parse_video_config (rtpmp4gpay, buffer);
        media_type = "video";
      } else {
        res = FALSE;
      }
      if (!res)
        goto config_failed;

      /* now we can configure the buffer */
      if (rtpmp4gpay->config)
        gst_buffer_unref (rtpmp4gpay->config);

      rtpmp4gpay->config = gst_buffer_copy (buffer);
    }
  }
  if (media_type == NULL)
    goto config_failed;

  gst_rtp_base_payload_set_options (payload, media_type, TRUE, "MPEG4-GENERIC",
      rtpmp4gpay->rate);

  res = gst_rtp_mp4g_pay_new_caps (rtpmp4gpay);

  return res;

  /* ERRORS */
config_failed:
  {
    GST_DEBUG_OBJECT (rtpmp4gpay, "failed to parse config");
    return FALSE;
  }
}
Exemplo n.º 2
0
static gboolean
bus_call (GstBus     *bus,
          GstMessage *msg,
          gpointer    data)
{
  GMainLoop *loop = (GMainLoop *) data;

  switch (GST_MESSAGE_TYPE (msg)) 
  {

    case GST_MESSAGE_EOS:
    {
      gst_element_set_state (pipeline, GST_STATE_PAUSED);
      break;
    }

    case GST_MESSAGE_ERROR: 
    {
      gchar  *debug;
      GError *error;

      gst_message_parse_error (msg, &error, &debug);
      g_free (debug);

      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);
                 
      g_main_loop_quit (loop);
      break;
    }
    
    case GST_MESSAGE_ELEMENT: 
    {
        if(message_has_structure(msg))
        {
          const GstStructure *structure;
          const gchar *name;
          const gchar *timestamp;

          structure = gst_message_get_structure(msg);
          name = gst_structure_get_name(structure);
          timestamp = gst_structure_get_string(structure,"timestamp");

          g_printf ("name =%s timestamp=%s\n", name, timestamp);

          if (timestamps == NULL)
          { /* create the hash which will contain the timestamps */
              timestamps = g_hash_table_new_full(g_str_hash,g_str_equal,g_free,g_free);
          }

          { /*insert the camera_transition_xx/timestamp comming from the component */
            gpointer value;
            value = g_hash_table_lookup(timestamps,name);

            /* Just insert new keys, do not overwrite */
            if (value == NULL)
            {
              g_hash_table_insert(timestamps, g_strdup(name), g_strdup(timestamp));
            }
          }

          /* Check if we are done */
          if (endpoint_reached ())
          {
              g_main_loop_quit (loop);
          }
        }

        break;
    }
    
    default:
      break;
  }

  return TRUE;
}
Exemplo n.º 3
0
static GstFlowReturn
gst_amc_video_enc_handle_output_frame (GstAmcVideoEnc * self,
    GstAmcBuffer * buf, const GstAmcBufferInfo * buffer_info,
    GstVideoCodecFrame * frame)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstVideoEncoder *encoder = GST_VIDEO_ENCODER_CAST (self);

  /* The BUFFER_FLAG_CODEC_CONFIG logic is borrowed from
   * gst-omx. see *_handle_output_frame in
   * gstomxvideoenc.c and gstomxh264enc.c */
  if ((buffer_info->flags & BUFFER_FLAG_CODEC_CONFIG)
      && buffer_info->size > 0) {
    GstStructure *s;
    GstVideoCodecState *state;

    state = gst_video_encoder_get_output_state (encoder);
    s = gst_caps_get_structure (state->caps, 0);
    if (!strcmp (gst_structure_get_name (s), "video/x-h264")) {
      gst_video_codec_state_unref (state);

      if (buffer_info->size > 4 &&
          GST_READ_UINT32_BE (buf->data + buffer_info->offset) == 0x00000001) {
        GList *l = NULL;
        GstBuffer *hdrs;

        GST_DEBUG_OBJECT (self, "got codecconfig in byte-stream format");

        hdrs = gst_buffer_new_and_alloc (buffer_info->size);
        gst_buffer_fill (hdrs, 0, buf->data + buffer_info->offset,
            buffer_info->size);

        l = g_list_append (l, hdrs);
        gst_video_encoder_set_headers (encoder, l);
      }
    } else {
      GstBuffer *codec_data;

      GST_DEBUG_OBJECT (self, "Handling codec data");

      codec_data = gst_buffer_new_and_alloc (buffer_info->size);
      gst_buffer_fill (codec_data, 0, buf->data + buffer_info->offset,
          buffer_info->size);
      state->codec_data = codec_data;
      gst_video_codec_state_unref (state);

      if (!gst_video_encoder_negotiate (encoder)) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_NOT_NEGOTIATED;
      }

      return GST_FLOW_OK;
    }
  }

  if (buffer_info->size > 0) {
    GstBuffer *out_buf;
    GstPad *srcpad;

    srcpad = GST_VIDEO_ENCODER_SRC_PAD (encoder);
    out_buf =
        gst_video_encoder_allocate_output_buffer (encoder, buffer_info->size);
    gst_buffer_fill (out_buf, 0, buf->data + buffer_info->offset,
        buffer_info->size);

    GST_BUFFER_PTS (out_buf) =
        gst_util_uint64_scale (buffer_info->presentation_time_us, GST_USECOND,
        1);

    if (frame) {
      frame->output_buffer = out_buf;
      flow_ret = gst_video_encoder_finish_frame (encoder, frame);
    } else {
      /* This sometimes happens at EOS or if the input is not properly framed,
       * let's handle it gracefully by allocating a new buffer for the current
       * caps and filling it
       */

      GST_ERROR_OBJECT (self, "No corresponding frame found");
      flow_ret = gst_pad_push (srcpad, out_buf);
    }
  } else if (frame) {
    flow_ret = gst_video_encoder_finish_frame (encoder, frame);
  }

  return flow_ret;
}
Exemplo n.º 4
0
static gboolean gst_avdtp_sink_init_mp3_pkt_conf(
		GstAvdtpSink *self, GstCaps *caps,
		mpeg_capabilities_t *pkt)
{
	const GValue *value = NULL;
	gint rate, layer;
	const gchar *name;
	GstStructure *structure = gst_caps_get_structure(caps, 0);

	name = gst_structure_get_name(structure);

	if (!(IS_MPEG_AUDIO(name))) {
		GST_ERROR_OBJECT(self, "Unexpected format %s, "
				"was expecting mp3", name);
		return FALSE;
	}

	/* layer */
	value = gst_structure_get_value(structure, "layer");
	layer = g_value_get_int(value);
	if (layer == 1)
		pkt->layer = BT_MPEG_LAYER_1;
	else if (layer == 2)
		pkt->layer = BT_MPEG_LAYER_2;
	else if (layer == 3)
		pkt->layer = BT_MPEG_LAYER_3;
	else {
		GST_ERROR_OBJECT(self, "Unexpected layer: %d", layer);
		return FALSE;
	}

	/* crc */
	if (self->mp3_using_crc != -1)
		pkt->crc = self->mp3_using_crc;
	else {
		GST_ERROR_OBJECT(self, "No info about crc was received, "
				" can't proceed");
		return FALSE;
	}

	/* channel mode */
	if (self->channel_mode != -1)
		pkt->channel_mode = self->channel_mode;
	else {
		GST_ERROR_OBJECT(self, "No info about channel mode "
				"received, can't proceed");
		return FALSE;
	}

	/* mpf - we will only use the mandatory one */
	pkt->mpf = 0;

	value = gst_structure_get_value(structure, "rate");
	rate = g_value_get_int(value);
	if (rate == 44100)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_44100;
	else if (rate == 48000)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_48000;
	else if (rate == 32000)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_32000;
	else if (rate == 24000)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_24000;
	else if (rate == 22050)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_22050;
	else if (rate == 16000)
		pkt->frequency = BT_MPEG_SAMPLING_FREQ_16000;
	else {
		GST_ERROR_OBJECT(self, "Invalid rate while setting caps");
		return FALSE;
	}

	/* vbr - we always say its vbr, we don't have how to know it */
	pkt->bitrate = 0x8000;

	return TRUE;
}
Exemplo n.º 5
0
static GstCaps *
gst_avdtp_src_getcaps (GstBaseSrc * bsrc, GstCaps * filter)
{
  GstAvdtpSrc *avdtpsrc = GST_AVDTP_SRC (bsrc);
  GstCaps *caps = NULL, *ret = NULL;

  if (avdtpsrc->dev_caps) {
    const GValue *value;
    const char *format;
    int rate;
    GstStructure *structure = gst_caps_get_structure (avdtpsrc->dev_caps, 0);

    format = gst_structure_get_name (structure);

    if (g_str_equal (format, "audio/x-sbc")) {
      /* FIXME: we can return a fixed payload type once we
       * are in PLAYING */
      caps = gst_caps_new_simple ("application/x-rtp",
          "media", G_TYPE_STRING, "audio",
          "payload", GST_TYPE_INT_RANGE, 96, 127,
          "encoding-name", G_TYPE_STRING, "SBC", NULL);
    } else if (g_str_equal (format, "audio/mpeg")) {
      caps = gst_caps_new_simple ("application/x-rtp",
          "media", G_TYPE_STRING, "audio",
          "payload", GST_TYPE_INT_RANGE, 96, 127,
          "encoding-name", G_TYPE_STRING, "MP4A-LATM", NULL);

      value = gst_structure_get_value (structure, "mpegversion");
      if (!value || !G_VALUE_HOLDS_INT (value)) {
        GST_ERROR_OBJECT (avdtpsrc, "Failed to get mpegversion");
        gst_caps_unref (caps);
        return NULL;
      }
      gst_caps_set_simple (caps, "mpegversion", G_TYPE_INT,
          g_value_get_int (value), NULL);

      value = gst_structure_get_value (structure, "channels");
      if (!value || !G_VALUE_HOLDS_INT (value)) {
        GST_ERROR_OBJECT (avdtpsrc, "Failed to get channels");
        gst_caps_unref (caps);
        return NULL;
      }
      gst_caps_set_simple (caps, "channels", G_TYPE_INT,
          g_value_get_int (value), NULL);

      value = gst_structure_get_value (structure, "base-profile");
      if (!value || !G_VALUE_HOLDS_STRING (value)) {
        GST_ERROR_OBJECT (avdtpsrc, "Failed to get base-profile");
        gst_caps_unref (caps);
        return NULL;
      }
      gst_caps_set_simple (caps, "base-profile", G_TYPE_STRING,
          g_value_get_string (value), NULL);

    } else {
      GST_ERROR_OBJECT (avdtpsrc,
          "Only SBC and MPEG-2/4 are supported at the moment");
    }

    value = gst_structure_get_value (structure, "rate");
    if (!value || !G_VALUE_HOLDS_INT (value)) {
      GST_ERROR_OBJECT (avdtpsrc, "Failed to get sample rate");
      gst_caps_unref (caps);
      return NULL;
    }
    rate = g_value_get_int (value);

    gst_caps_set_simple (caps, "clock-rate", G_TYPE_INT, rate, NULL);

    if (filter) {
      ret = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
      gst_caps_unref (caps);
    } else
      ret = caps;
  } else {
    GST_DEBUG_OBJECT (avdtpsrc, "device not open, using template caps");
    ret = GST_BASE_SRC_CLASS (parent_class)->get_caps (bsrc, filter);
  }

  return ret;
}
Exemplo n.º 6
0
// ----------------------------------------------------------------------------
// Handle the "pad-added" message
void
GStreamerImportFileHandle::OnPadAdded(GstPad *pad)
{
   // Retrieve the stream caps...skip stream if unavailable
   GstCaps *caps = gst_pad_get_current_caps(pad);
   if (!caps)
   {
      WARN(mPipeline, ("OnPadAdded: unable to retrieve stream caps"));
      return;
   }

   // Get the caps structure...no need to release
   GstStructure *str = gst_caps_get_structure(caps, 0);
   if (!str)
   {
      WARN(mPipeline, ("OnPadAdded: unable to retrieve caps structure"));
      gst_caps_unref(caps);
      return;
   }

   // Only accept audio streams...no need to release
   const gchar *name = gst_structure_get_name(str);
   if (!g_strrstr(name, "audio"))
   {
      WARN(mPipeline, ("OnPadAdded: bypassing '%s' stream", name));
      gst_caps_unref(caps);
      return;
   }

   // Allocate a new stream context
   GStreamContext *c = g_new0(GStreamContext, 1);
   if (!c)
   {
      WARN(mPipeline, ("OnPadAdded: unable to allocate stream context"));
      gst_caps_unref(caps);
      return;
   }

   // Set initial state
   c->mUse = true;

   // Always add it to the context list to keep the number of contexts
   // in sync with the number of streams
   g_mutex_lock(&mStreamsLock);
   g_ptr_array_add(mStreams, c);
   g_mutex_unlock(&mStreamsLock);

   // Need pointer to context during pad removal (pad-remove signal)
   SETCTX(pad, c);

   // Save the stream's start time and duration
   gst_pad_query_position(pad, GST_FORMAT_TIME, &c->mPosition);
   gst_pad_query_duration(pad, GST_FORMAT_TIME, &c->mDuration);

   // Retrieve the number of channels and validate
   gint channels = -1;
   gst_structure_get_int(str, "channels", &channels);
   if (channels <= 0)
   {
      WARN(mPipeline, ("OnPadAdded: channel count is invalid %d", channels));
      gst_caps_unref(caps);
      return;
   }
   c->mNumChannels = channels;

   // Retrieve the sample rate and validate
   gint rate = -1;
   gst_structure_get_int(str, "rate", &rate);
   if (rate <= 0)
   {
      WARN(mPipeline, ("OnPadAdded: sample rate is invalid %d", rate));
      gst_caps_unref(caps);
      return;
   }
   c->mSampleRate = (double) rate;

   c->mType = g_strdup(name);
   if (c->mType == NULL)
   {
      WARN(mPipeline, ("OnPadAdded: unable to allocate audio type"));
      gst_caps_unref(caps);
      return;
   }

   // Done with capabilities
   gst_caps_unref(caps);

   // Create audioconvert element
   c->mConv = gst_element_factory_make("audioconvert", NULL);
   if (!c->mConv)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create audioconvert element"));
      return;
   }

   // Create appsink element
   c->mSink = gst_element_factory_make("appsink", NULL);
   if (!c->mSink)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create appsink element"));
      return;
   }
   SETCTX(c->mSink, c);

   // Set the appsink callbacks and add the context pointer
   gst_app_sink_set_callbacks(GST_APP_SINK(c->mSink), &AppSinkCallbacks, this, NULL);

   // Set the capabilities that we desire
   caps = gst_static_caps_get(&supportedCaps);
   if (!caps)
   {
      WARN(mPipeline, ("OnPadAdded: failed to create static caps"));
      return;
   }
   gst_app_sink_set_caps(GST_APP_SINK(c->mSink), caps);
   gst_caps_unref(caps);

   // Do not sync to the clock...process as quickly as possible
   gst_base_sink_set_sync(GST_BASE_SINK(c->mSink), FALSE);

   // Don't drop buffers...allow queue to build unfettered
   gst_app_sink_set_drop(GST_APP_SINK(c->mSink), FALSE);

   // Add both elements to the pipeline
   gst_bin_add_many(GST_BIN(mPipeline), c->mConv, c->mSink, NULL);

   // Link them together
   if (!gst_element_link(c->mConv, c->mSink))
   {
      WARN(mPipeline, ("OnPadAdded: failed to link autioconvert and appsink"));
      return;
   }

   // Link the audiconvert sink pad to the src pad
   GstPadLinkReturn ret = GST_PAD_LINK_OK;
   GstPad *convsink = gst_element_get_static_pad(c->mConv, "sink");
   if (convsink)
   {
      ret = gst_pad_link(pad, convsink);
      gst_object_unref(convsink);
   }
   if (!convsink || ret != GST_PAD_LINK_OK)
   {
      WARN(mPipeline, ("OnPadAdded: failed to link uridecodebin to audioconvert - %d", ret));
      return;
   }

   // Synchronize audioconvert state with parent
   if (!gst_element_sync_state_with_parent(c->mConv))
   {
      WARN(mPipeline, ("OnPadAdded: unable to sync audioconvert state"));
      return;
   }

   // Synchronize appsink state with parent
   if (!gst_element_sync_state_with_parent(c->mSink))
   {
      WARN(mPipeline, ("OnPadAdded: unable to sync appaink state"));
      return;
   }

   return;
}
Exemplo n.º 7
0
void PlaybackPipeline::attachTrack(RefPtr<SourceBufferPrivateGStreamer> sourceBufferPrivate, RefPtr<TrackPrivateBase> trackPrivate, GstStructure* structure, GstCaps* caps)
{
    WebKitMediaSrc* webKitMediaSrc = m_webKitMediaSrc.get();

    GST_OBJECT_LOCK(webKitMediaSrc);
    Stream* stream = getStreamBySourceBufferPrivate(webKitMediaSrc, sourceBufferPrivate.get());
    GST_OBJECT_UNLOCK(webKitMediaSrc);

    ASSERT(stream);

    GST_OBJECT_LOCK(webKitMediaSrc);
    unsigned padId = stream->parent->priv->numberOfPads;
    stream->parent->priv->numberOfPads++;
    GST_OBJECT_UNLOCK(webKitMediaSrc);

    const gchar* mediaType = gst_structure_get_name(structure);

    GST_DEBUG_OBJECT(webKitMediaSrc, "Configured track %s: appsrc=%s, padId=%u, mediaType=%s", trackPrivate->id().string().utf8().data(), GST_ELEMENT_NAME(stream->appsrc), padId, mediaType);

    GUniquePtr<gchar> parserBinName(g_strdup_printf("streamparser%u", padId));

    if (!g_strcmp0(mediaType, "video/x-h264")) {
        GRefPtr<GstCaps> filterCaps = adoptGRef(gst_caps_new_simple("video/x-h264", "alignment", G_TYPE_STRING, "au", nullptr));
        GstElement* capsfilter = gst_element_factory_make("capsfilter", nullptr);
        g_object_set(capsfilter, "caps", filterCaps.get(), nullptr);

        stream->parser = gst_bin_new(parserBinName.get());

        GstElement* parser = gst_element_factory_make("h264parse", nullptr);
        gst_bin_add_many(GST_BIN(stream->parser), parser, capsfilter, nullptr);
        gst_element_link_pads(parser, "src", capsfilter, "sink");

        GRefPtr<GstPad> pad = adoptGRef(gst_element_get_static_pad(parser, "sink"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("sink", pad.get()));

        pad = adoptGRef(gst_element_get_static_pad(capsfilter, "src"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("src", pad.get()));
    } else if (!g_strcmp0(mediaType, "video/x-h265")) {
        GRefPtr<GstCaps> filterCaps = adoptGRef(gst_caps_new_simple("video/x-h265", "alignment", G_TYPE_STRING, "au", nullptr));
        GstElement* capsfilter = gst_element_factory_make("capsfilter", nullptr);
        g_object_set(capsfilter, "caps", filterCaps.get(), nullptr);

        stream->parser = gst_bin_new(parserBinName.get());

        GstElement* parser = gst_element_factory_make("h265parse", nullptr);
        gst_bin_add_many(GST_BIN(stream->parser), parser, capsfilter, nullptr);
        gst_element_link_pads(parser, "src", capsfilter, "sink");

        GRefPtr<GstPad> pad = adoptGRef(gst_element_get_static_pad(parser, "sink"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("sink", pad.get()));

        pad = adoptGRef(gst_element_get_static_pad(capsfilter, "src"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("src", pad.get()));
    } else if (!g_strcmp0(mediaType, "audio/mpeg")) {
        gint mpegversion = -1;
        gst_structure_get_int(structure, "mpegversion", &mpegversion);

        GstElement* parser = nullptr;
        if (mpegversion == 1)
            parser = gst_element_factory_make("mpegaudioparse", nullptr);
        else if (mpegversion == 2 || mpegversion == 4)
            parser = gst_element_factory_make("aacparse", nullptr);
        else
            ASSERT_NOT_REACHED();

        stream->parser = gst_bin_new(parserBinName.get());
        gst_bin_add(GST_BIN(stream->parser), parser);

        GRefPtr<GstPad> pad = adoptGRef(gst_element_get_static_pad(parser, "sink"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("sink", pad.get()));

        pad = adoptGRef(gst_element_get_static_pad(parser, "src"));
        gst_element_add_pad(stream->parser, gst_ghost_pad_new("src", pad.get()));
    } else if (!g_strcmp0(mediaType, "video/x-vp9"))
        stream->parser = nullptr;
    else {
        GST_ERROR_OBJECT(stream->parent, "Unsupported media format: %s", mediaType);
        return;
    }

    GST_OBJECT_LOCK(webKitMediaSrc);
    stream->type = Unknown;
    GST_OBJECT_UNLOCK(webKitMediaSrc);

    GRefPtr<GstPad> sourcePad;
    if (stream->parser) {
        gst_bin_add(GST_BIN(stream->parent), stream->parser);
        gst_element_sync_state_with_parent(stream->parser);

        GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(stream->parser, "sink"));
        sourcePad = adoptGRef(gst_element_get_static_pad(stream->appsrc, "src"));
        gst_pad_link(sourcePad.get(), sinkPad.get());
        sourcePad = adoptGRef(gst_element_get_static_pad(stream->parser, "src"));
    } else {
        GST_DEBUG_OBJECT(m_webKitMediaSrc.get(), "Stream of type %s doesn't require a parser bin", mediaType);
        sourcePad = adoptGRef(gst_element_get_static_pad(stream->appsrc, "src"));
    }
    ASSERT(sourcePad);

    // FIXME: Is padId the best way to identify the Stream? What about trackId?
    g_object_set_data(G_OBJECT(sourcePad.get()), "padId", GINT_TO_POINTER(padId));
    webKitMediaSrcLinkParser(sourcePad.get(), caps, stream);

    ASSERT(stream->parent->priv->mediaPlayerPrivate);
    int signal = -1;

    GST_OBJECT_LOCK(webKitMediaSrc);
    if (g_str_has_prefix(mediaType, "audio")) {
        stream->type = Audio;
        stream->parent->priv->numberOfAudioStreams++;
        signal = SIGNAL_AUDIO_CHANGED;
        stream->audioTrack = RefPtr<WebCore::AudioTrackPrivateGStreamer>(static_cast<WebCore::AudioTrackPrivateGStreamer*>(trackPrivate.get()));
    } else if (g_str_has_prefix(mediaType, "video")) {
        stream->type = Video;
        stream->parent->priv->numberOfVideoStreams++;
        signal = SIGNAL_VIDEO_CHANGED;
        stream->videoTrack = RefPtr<WebCore::VideoTrackPrivateGStreamer>(static_cast<WebCore::VideoTrackPrivateGStreamer*>(trackPrivate.get()));
    } else if (g_str_has_prefix(mediaType, "text")) {
        stream->type = Text;
        stream->parent->priv->numberOfTextStreams++;
        signal = SIGNAL_TEXT_CHANGED;

        // FIXME: Support text tracks.
    }
    GST_OBJECT_UNLOCK(webKitMediaSrc);

    if (signal != -1)
        g_signal_emit(G_OBJECT(stream->parent), webKitMediaSrcSignals[signal], 0, nullptr);
}
Exemplo n.º 8
0
//static 
void MediaParserGst::cb_pad_added(GstElement* /* element */, GstPad* new_pad,
                                  gpointer data)
{
    MediaParserGst* parser = static_cast<MediaParserGst*>(data);

    GstCaps* caps = gst_pad_get_caps(new_pad);    
    print_caps(caps);    
    
    GstStructure* str = gst_caps_get_structure (caps, 0);
    if (!str) {
        log_error(_("MediaParserGst: couldn't get structure name."));
        parser->link_to_fakesink(new_pad);
        return;    
    } 
    
    const gchar* caps_name = gst_structure_get_name (str);
    
    bool media_type_audio;

    if (std::equal(caps_name, caps_name+5, "audio")) {
        media_type_audio = true;
    } else if (std::equal(caps_name, caps_name+5, "video")) {
        media_type_audio = false;
    } else {
        log_error(_("MediaParserGst: ignoring stream of type %s."),
                  caps_name);
        parser->link_to_fakesink(new_pad);
        return;
    }    
    
    gboolean parsed = false;
    gboolean framed = false;
    
    gst_structure_get_boolean(str, "parsed", &parsed);
    gst_structure_get_boolean(str, "framed", &framed);
    
    bool already_parsed = parsed || framed;
    
    GstPad* final_pad = 0;
    
    if (already_parsed) {
        final_pad = new_pad;
    } else {
        // We'll try to find a parser, so that we will eventually receive
        // timestamped buffers, on which the MediaParser system relies.
        GstElementFactory* parserfactory = swfdec_gst_get_parser_factory (caps);

        if (!parserfactory) {
            log_error(_("MediaParserGst: Failed to find a parser (media: %s)."),
                      caps_name);
            parser->link_to_fakesink(new_pad);
            return;
        }

        GstElement* parserel = gst_element_factory_create (parserfactory, NULL);
        gst_object_unref (parserfactory);
        if (!parserel) {
            log_error(_("MediaParserGst: Failed to find a parser. We'll continue, "
                        "but either audio or video will not work!"));
            parser->link_to_fakesink(new_pad);
            return;
        }
                     
        gboolean success = gst_bin_add(GST_BIN(parser->_bin), parserel);
        if (!success) {
            gst_object_unref(parserel);
            log_error(_("MediaParserGst: couldn't add parser."));
            parser->link_to_fakesink(new_pad);
            return;
        }
        
        GstPad* sinkpad = gst_element_get_static_pad (parserel, "sink");
        assert(sinkpad);
        
        GstPadLinkReturn ret = gst_pad_link(new_pad, sinkpad);
        
        gst_object_unref(GST_OBJECT(sinkpad));

        if (!GST_PAD_LINK_SUCCESSFUL(ret)) {
            log_error(_("MediaParserGst: couldn't link parser."));
            parser->link_to_fakesink(new_pad);
            return;
        }
        
        final_pad = gst_element_get_static_pad (parserel, "src");
    } 

    if (media_type_audio) {
      
        parser->_audiosink = swfdec_gst_connect_sinkpad_by_pad (final_pad, caps);
        if (!parser->_audiosink) {
            log_error(_("MediaParserGst: couldn't link \"fake\" sink."));
            return;        
        }

        gst_pad_set_chain_function(parser->_audiosink,
                MediaParserGst::cb_chain_func_audio);
        
        g_object_set_data(G_OBJECT (parser->_audiosink), "mediaparser-obj",
                parser);
        
        LOG_ONCE(
            log_unimpl("MediaParserGst won't set codec, sampleRate, "
                "sampleSize, stereo and duration in AudioInfo"); 
        );
        AudioInfo* audioinfo = new AudioInfo(0, 0, 0, false, 0,
                CODEC_TYPE_CUSTOM);
        audioinfo->extra.reset(new ExtraInfoGst(caps));

        parser->_audioInfo.reset(audioinfo);
        log_debug(_("MediaParserGst: Linked audio source (type: %s)"), caps_name); 

    } else {
Exemplo n.º 9
0
static void
empathy_audio_src_handle_message (GstBin *bin, GstMessage *message)
{
  EmpathyGstAudioSrc *self = EMPATHY_GST_AUDIO_SRC (bin);
  EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);

  if  (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ELEMENT &&
        GST_MESSAGE_SRC (message) == GST_OBJECT (priv->level))
    {
      const GstStructure *s;
      const gchar *name;
      const GValue *list;
      guint i, len;
      gdouble peak = -G_MAXDOUBLE;
      gdouble rms = -G_MAXDOUBLE;

      s = gst_message_get_structure (message);
      name = gst_structure_get_name (s);

      if (g_strcmp0 ("level", name) != 0)
        goto out;

      list = gst_structure_get_value (s, "peak");
      len = gst_value_list_get_size (list);

      for (i =0 ; i < len; i++)
        {
          const GValue *value;
          gdouble db;

          value = gst_value_list_get_value (list, i);
          db = g_value_get_double (value);
          peak = MAX (db, peak);
        }

      list = gst_structure_get_value (s, "rms");
      len = gst_value_list_get_size (list);

      for (i =0 ; i < len; i++)
        {
          const GValue *value;
          gdouble db;

          value = gst_value_list_get_value (list, i);
          db = g_value_get_double (value);
          rms = MAX (db, rms);
        }

      g_mutex_lock (priv->lock);

      priv->peak_level = peak;
      priv->rms_level = rms;
      if (priv->idle_id == 0)
        priv->idle_id = g_idle_add (empathy_audio_src_levels_updated, self);

      g_mutex_unlock (priv->lock);
    }

out:
   GST_BIN_CLASS (empathy_audio_src_parent_class)->handle_message (bin,
    message);
}
Exemplo n.º 10
0
bool tcam_gst_raw_only_has_mono (const GstCaps* caps)
{
    if (caps == nullptr)
    {
        return false;
    }

    auto correct_format = [] (const char* str)
    {
        if (str == nullptr)
        {
            return false;
        }

        const static std::vector<std::string> formats = {"GRAY8", "GRAY16_LE", "GRAY16_BE"};

        if (std::find(formats.begin(), formats.end(), str) == formats.end())
        {
            return false;
        }
        return true;
    };

    for (unsigned int i = 0; i < gst_caps_get_size(caps); ++i)
    {
        GstStructure* struc = gst_caps_get_structure(caps, i);

        if (strcmp("video/x-raw", gst_structure_get_name(struc)) == 0)
        {
            if (gst_structure_has_field(struc, "format"))
            {
                if (gst_structure_get_field_type(struc, "format") == G_TYPE_STRING)
                {
                    if (!correct_format(gst_structure_get_string(struc, "format")))
                    {
                        return false;
                    }
                }
                else if (gst_structure_get_field_type(struc, "format") == GST_TYPE_LIST)
                {
                    auto vec = gst_list_to_vector(gst_structure_get_value(struc, "format"));

                    for (const auto& fmt : vec)
                    {
                        if (!correct_format(fmt.c_str()))
                        {
                            return false;
                        }
                    }
                }
                else
                {
                    tcam_error("Cannot handle format type in GstStructure.");
                }
            }
            else
            {
                // since raw can be anything
                // do not assume it is gray but color
                return false;
            }
        }
        else
        {
            return false;
        }
    }

    return true;
}
Exemplo n.º 11
0
/*
  Given the pad in this direction and the given caps, what caps are allowed on
  the other pad in this element ?
*/
static GstCaps *
gst_cenc_decrypt_transform_caps (GstBaseTransform * base,
    GstPadDirection direction, GstCaps * caps, GstCaps * filter)
{
  GstCaps *res = NULL;
  gint i, j;

  g_return_val_if_fail (direction != GST_PAD_UNKNOWN, NULL);

  GST_DEBUG_OBJECT (base, "direction: %s   caps: %" GST_PTR_FORMAT "   filter:"
      " %" GST_PTR_FORMAT, (direction == GST_PAD_SRC) ? "Src" : "Sink",
      caps, filter);

  if(direction == GST_PAD_SRC && gst_caps_is_any (caps)){
    res = gst_pad_get_pad_template_caps (GST_BASE_TRANSFORM_SINK_PAD (base));
    goto filter;
  }
  
  res = gst_caps_new_empty ();

  for (i = 0; i < gst_caps_get_size (caps); ++i) {
    GstStructure *in = gst_caps_get_structure (caps, i);
    GstStructure *out = NULL;

    if (direction == GST_PAD_SINK) {
      gint n_fields;

      if (!gst_structure_has_field (in, "original-media-type"))
        continue;

      out = gst_structure_copy (in);
      n_fields = gst_structure_n_fields (in);

      gst_structure_set_name (out,
          gst_structure_get_string (out, "original-media-type"));

      /* filter out the DRM related fields from the down-stream caps */
      for(j=n_fields-1; j>=0; --j){
          const gchar *field_name;

          field_name = gst_structure_nth_field_name (in, j);

          if( g_str_has_prefix(field_name, "protection-system") ||
              g_str_has_prefix(field_name, "original-media-type") ){
              gst_structure_remove_field (out, field_name);
          }
      }
      gst_cenc_decrypt_append_if_not_duplicate(res, out);
    } else {                    /* GST_PAD_SRC */
      gint n_fields;
      GstStructure *tmp = NULL;
      guint p;
      tmp = gst_structure_copy (in);
      gst_cenc_remove_codec_fields (tmp);
      for(p=0; gst_cenc_decrypt_protection_ids[p]; ++p){
        /* filter out the audio/video related fields from the down-stream 
           caps, because they are not relevant to the input caps of this 
           element and they can cause caps negotiation failures with 
           adaptive bitrate streams */
        out = gst_structure_copy (tmp);
        gst_structure_set (out,
                           "protection-system", G_TYPE_STRING, gst_cenc_decrypt_protection_ids[p],
                           "original-media-type", G_TYPE_STRING, gst_structure_get_name (in),
                           NULL);
        gst_structure_set_name (out, "application/x-cenc");
        gst_cenc_decrypt_append_if_not_duplicate(res, out);
      }
      gst_structure_free (tmp);
    }
  }
  if(direction == GST_PAD_SINK && gst_caps_get_size (res)==0){
    gst_caps_unref (res);
    res = gst_caps_new_any ();
  }
 filter:
  if (filter) {
    GstCaps *intersection;

    GST_DEBUG_OBJECT (base, "Using filter caps %" GST_PTR_FORMAT, filter);
    intersection =
      gst_caps_intersect_full (res, filter, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (res);
    res = intersection;
  }

  GST_DEBUG_OBJECT (base, "returning %" GST_PTR_FORMAT, res);
  return res;
}
Exemplo n.º 12
0
GstCaps* tcam_gst_find_largest_caps (const GstCaps* incoming)
{
    /**
     * find_largest_caps tries to find the largest caps
     * according to the following rules:
     *
     * 1. determine the preferred format
     *       prefer bayer 8-bit over everything else
     *       if bayer 8-bit does not exist order according to the following list:
     *       color formats like BGR
     *       formats like MJPEG
     *       GRAY16
     *       GRAY8
     *       bayer12/16
     *
     * 2. find the largest resolution
     * 3. for the format with the largest resolution take the highest framerate
     */

    std::vector<uint32_t> format_fourccs = index_format_fourccs(incoming);

    uint32_t preferred_fourcc = find_preferred_format(format_fourccs);

    if(!g_strcmp0(gst_caps_to_string(incoming), "EMPTY"))
    {
        return nullptr;
    }

    int largest_index = -1;
    int largest_width = -1;
    int largest_height = -1;

    for (guint i = 0; i < gst_caps_get_size(incoming); ++i)
    {
        GstStructure* struc = gst_caps_get_structure(incoming, i);

        const char* format = gst_structure_get_string(struc, "format");

        uint32_t fourcc = tcam_fourcc_from_gst_1_0_caps_string(gst_structure_get_name(struc), format);

        // TODO: what about video/x-raw, format={GRAY8, GRAY16_LE}
        if (fourcc != preferred_fourcc)
        {
            continue;
        }

        int width = -1;
        int height = -1;
        bool new_width = false;
        bool new_height = false;

        // will fail if width is a range so we only handle
        // halfway fixated caps
        if (gst_structure_get_int(struc, "width", &width))
        {
            if (largest_width <= width)
            {
                largest_width = width;
                new_width = true;
            }
        }
        else
        {
            tcam_warning("Field 'width' does not have the type 'int'");
        }

        if (gst_structure_get_int(struc, "height", &height))
        {
            if (largest_height <= height)
            {
                largest_height = height;
                new_height = true;
            }
        }
        else
        {
            tcam_warning("Field 'height' does not have the type 'int'");
        }

        if (new_width && new_height)
        {
            largest_index = i;
        }
    }

    GstCaps* largest_caps = gst_caps_copy_nth(incoming, largest_index);

    tcam_info("Fixating assumed largest caps: %s", gst_caps_to_string(largest_caps));

    if (!tcam_gst_fixate_caps(largest_caps))
    {
        tcam_error("Cannot fixate largest caps. Returning NULL");
        return nullptr;
    }

    GstStructure* s = gst_caps_get_structure(largest_caps, 0);

    int h;
    gst_structure_get_int(s, "height", &h);
    int w;
    gst_structure_get_int(s, "width", &w);

    int num;
    int den;
    gst_structure_get_fraction(s, "framerate", &num, &den);

    GValue vh = G_VALUE_INIT;

    g_value_init(&vh, G_TYPE_INT);
    g_value_set_int(&vh, h);

    gst_caps_set_value(largest_caps, "height", &vh);
    largest_caps = gst_caps_new_simple (gst_structure_get_name(s),
                                        "framerate", GST_TYPE_FRACTION, num, den,
                                        "width", G_TYPE_INT, w,
                                        "height", G_TYPE_INT, h,
                                        NULL);

    if (gst_structure_has_field(s, "format"))
    {
        gst_caps_set_value(largest_caps, "format", gst_structure_get_value(s, "format"));
    }

    return largest_caps;
}
Exemplo n.º 13
0
/**
 * Helper function to get a list of all available fourccs in caps
 */
std::vector<uint32_t> index_format_fourccs (const GstCaps* caps)
{
    std::vector<uint32_t> ret;

    /*
    gst_caps_is_empty acts erratic, thus we work arround the issue with gst_caps_to_string:

    --------
    (gdb) print (char*)gst_caps_to_string (caps)
    $4 = 0x555555a8f120 "EMPTY"
    (gdb) print (int)gst_caps_is_empty (caps)
    (process:5873): GStreamer-CRITICAL (recursed) **: gst_caps_is_empty: assertion 'GST_IS_CAPS (caps)' failed
    --------

    */
    if (!caps || (!g_strcmp0(gst_caps_to_string(caps), "EMPTY")) || gst_caps_is_any(caps))
    {
        return ret;
    }

    for (guint i = 0; i < gst_caps_get_size(caps); ++i)
    {
        GstStructure* struc = gst_caps_get_structure(caps, i);
        std::string format_string;
        std::vector<std::string> vec;

        if (gst_structure_get_field_type(struc, "format") == GST_TYPE_LIST)
        {
            vec = gst_list_to_vector(gst_structure_get_value(struc, "format"));
        }
        else if (gst_structure_get_field_type(struc, "format") == G_TYPE_STRING)
        {
            vec.push_back(gst_structure_get_string(struc, "format"));
        }

        // code in helper func is needed weither vec is empty or not
        // prevents code duplication
        auto helper_func = [&ret] (const char* name, const char* fmt)
            {
                uint32_t fourcc = tcam_fourcc_from_gst_1_0_caps_string(name,
                                                                       fmt);
                if (fourcc != 0)
                {
                    ret.push_back(fourcc);
                }
            };

        const char* name = gst_structure_get_name(struc);

        if (!vec.empty())
        {
            for (const auto& fmt : vec)
            {
                helper_func(name, fmt.c_str());
            }
        }
        else
        {
            // this will be the case for things like image/jpeg
            // such caps will have no format field and thus vec.empty() ==
            helper_func(name, "");
        }
    }

    // remove duplicate entries
    // probably never enough entries to make switch to std::set a good alternative
    sort( ret.begin(), ret.end() );
    ret.erase( unique( ret.begin(), ret.end() ), ret.end() );

    return ret;
}
/* if element caps already in list, will make sure Transform elements have
 * priority and replace old ones */
static GList *
create_codec_cap_list (GstElementFactory *factory,
                       GstPadDirection direction,
                       GList *list,
                       GstCaps *rtp_caps)
{
  const GList *pads = factory->staticpadtemplates;
  gint i;


  /* Let us look at each pad for stuff to add*/
  while (pads)
  {
    GstCaps *caps = NULL;
    GstStaticPadTemplate *padtemplate = NULL;

    padtemplate = (GstStaticPadTemplate *) (pads->data);
    pads = g_list_next (pads);

    if (padtemplate->direction != direction)
      continue;

    if (GST_PAD_TEMPLATE_PRESENCE (padtemplate) != GST_PAD_ALWAYS) {
      continue;
    }

    caps = gst_static_caps_get (&padtemplate->static_caps);
    /*
      DEBUG ("%s caps are %s", gst_plugin_feature_get_name (GST_PLUGIN_FEATURE
      (factory)), gst_caps_to_string (caps));
    */

    /* skips caps ANY */
    if (!caps || gst_caps_is_any (caps))
    {
      goto done;
    }

    /* let us add one entry to the list per media type */
    for (i = 0; i < gst_caps_get_size (caps); i++)
    {
      CodecCap *entry = NULL;
      GList *found_item = NULL;
      GstStructure *structure = gst_caps_get_structure (caps, i);
      GstCaps *cur_caps =
          gst_caps_new_full (gst_structure_copy (structure), NULL);

      /* FIXME fix this in gstreamer! The rtpdepay element is bogus, it claims to
       * be a depayloader yet has application/x-rtp on both sides and does
       * absolutely nothing */
      /* Let's check if media caps are really media caps, this is to deal with
       * wierd elements such as rtpdepay that says it's a depayloader but has
       * application/x-rtp on src and sink pads */
      const gchar *name = gst_structure_get_name (structure);
      if (g_ascii_strcasecmp (name, "application/x-rtp") == 0)
      {
        GST_DEBUG ("skipping %s",
            gst_plugin_feature_get_name (GST_PLUGIN_FEATURE (factory)));
        continue;
      }

      /* let's check if this caps is already in the list, if so let's replace
       * that CodecCap list instead of creating a new one */
      /* we need to compare both media caps and rtp caps */
      found_item = g_list_find_custom (list, cur_caps,
          (GCompareFunc)compare_media_caps);
      if (found_item)
      {
        entry = (CodecCap *)found_item->data;
        /* if RTP caps exist and don't match nullify entry */
        if (rtp_caps && compare_rtp_caps (found_item->data, rtp_caps))
        {
          entry = NULL;
        }
      }

      if (!entry)
      {
        entry = g_slice_new0 (CodecCap);

        entry->caps = cur_caps;
        if (rtp_caps)
        {
          entry->rtp_caps = rtp_caps;
          gst_caps_ref (rtp_caps);
        }
        list = g_list_append (list, entry);
        entry->element_list1 = g_list_prepend (NULL,
          g_list_prepend (NULL, factory));
        gst_object_ref (factory);
      }
      else
      {
        GstCaps *newcaps;

        entry->element_list1->data =
          g_list_append (entry->element_list1->data, factory);
        gst_object_ref (factory);

        if (rtp_caps) {
          if (entry->rtp_caps) {
            GstCaps *new_rtp_caps;
            new_rtp_caps = gst_caps_union (rtp_caps, entry->rtp_caps);
            gst_caps_unref (entry->rtp_caps);
            entry->rtp_caps = new_rtp_caps;
          } else {
            entry->rtp_caps = rtp_caps;
            /* This shouldn't happen, its we're looking at rtp elements
             * or we're not */
            g_assert_not_reached ();
          }
          gst_caps_unref (rtp_caps);
        }

        newcaps = gst_caps_union (cur_caps, entry->caps);
        gst_caps_unref (entry->caps);
        gst_caps_unref (cur_caps);
        entry->caps = newcaps;

      }
    }
  done:
    if (caps != NULL) {
      gst_caps_unref (caps);
    }

  }

  return list;
}
Exemplo n.º 15
0
QAudioFormat QGstUtils::audioFormatForCaps(const GstCaps *caps)
{
    QAudioFormat format;
#if GST_CHECK_VERSION(1,0,0)
    GstAudioInfo info;
    if (gst_audio_info_from_caps(&info, caps)) {
        for (int i = 0; i < lengthOf(qt_audioLookup); ++i) {
            if (qt_audioLookup[i].format != info.finfo->format)
                continue;

            format.setSampleType(qt_audioLookup[i].sampleType);
            format.setByteOrder(qt_audioLookup[i].byteOrder);
            format.setSampleSize(qt_audioLookup[i].sampleSize);
            format.setSampleRate(info.rate);
            format.setChannelCount(info.channels);
            format.setCodec(QStringLiteral("audio/pcm"));

            return format;
        }
    }
#else
    const GstStructure *structure = gst_caps_get_structure(caps, 0);

    if (qstrcmp(gst_structure_get_name(structure), "audio/x-raw-int") == 0) {

        format.setCodec("audio/pcm");

        int endianness = 0;
        gst_structure_get_int(structure, "endianness", &endianness);
        if (endianness == 1234)
            format.setByteOrder(QAudioFormat::LittleEndian);
        else if (endianness == 4321)
            format.setByteOrder(QAudioFormat::BigEndian);

        gboolean isSigned = FALSE;
        gst_structure_get_boolean(structure, "signed", &isSigned);
        if (isSigned)
            format.setSampleType(QAudioFormat::SignedInt);
        else
            format.setSampleType(QAudioFormat::UnSignedInt);

        // Number of bits allocated per sample.
        int width = 0;
        gst_structure_get_int(structure, "width", &width);

        // The number of bits used per sample. This must be less than or equal to the width.
        int depth = 0;
        gst_structure_get_int(structure, "depth", &depth);

        if (width != depth) {
            // Unsupported sample layout.
            return QAudioFormat();
        }
        format.setSampleSize(width);

        int rate = 0;
        gst_structure_get_int(structure, "rate", &rate);
        format.setSampleRate(rate);

        int channels = 0;
        gst_structure_get_int(structure, "channels", &channels);
        format.setChannelCount(channels);

    } else if (qstrcmp(gst_structure_get_name(structure), "audio/x-raw-float") == 0) {

        format.setCodec("audio/pcm");

        int endianness = 0;
        gst_structure_get_int(structure, "endianness", &endianness);
        if (endianness == 1234)
            format.setByteOrder(QAudioFormat::LittleEndian);
        else if (endianness == 4321)
            format.setByteOrder(QAudioFormat::BigEndian);

        format.setSampleType(QAudioFormat::Float);

        int width = 0;
        gst_structure_get_int(structure, "width", &width);

        format.setSampleSize(width);

        int rate = 0;
        gst_structure_get_int(structure, "rate", &rate);
        format.setSampleRate(rate);

        int channels = 0;
        gst_structure_get_int(structure, "channels", &channels);
        format.setChannelCount(channels);

    } else {
        return QAudioFormat();
    }
#endif
    return format;
}
/* Parses a set of caps and tags in st and populates a GstDiscovererStreamInfo
 * structure (parent, if !NULL, otherwise it allocates one)
 */
static GstDiscovererStreamInfo *
collect_information (GstDiscoverer * dc, const GstStructure * st,
    GstDiscovererStreamInfo * parent)
{
  GstCaps *caps;
  GstStructure *caps_st, *tags_st;
  const gchar *name;
  int tmp, tmp2;
  guint utmp;
  gboolean btmp;

  if (!st || !gst_structure_id_has_field (st, _CAPS_QUARK)) {
    GST_WARNING ("Couldn't find caps !");
    if (parent)
      return parent;
    else
      return (GstDiscovererStreamInfo *)
          gst_mini_object_new (GST_TYPE_DISCOVERER_STREAM_INFO);
  }

  gst_structure_id_get (st, _CAPS_QUARK, GST_TYPE_CAPS, &caps, NULL);
  caps_st = gst_caps_get_structure (caps, 0);
  name = gst_structure_get_name (caps_st);

  if (g_str_has_prefix (name, "audio/")) {
    GstDiscovererAudioInfo *info;

    if (parent)
      info = (GstDiscovererAudioInfo *) parent;
    else {
      info = (GstDiscovererAudioInfo *)
          gst_mini_object_new (GST_TYPE_DISCOVERER_AUDIO_INFO);
      info->parent.caps = caps;
    }

    if (gst_structure_get_int (caps_st, "rate", &tmp))
      info->sample_rate = (guint) tmp;

    if (gst_structure_get_int (caps_st, "channels", &tmp))
      info->channels = (guint) tmp;

    if (gst_structure_get_int (caps_st, "depth", &tmp))
      info->depth = (guint) tmp;

    if (gst_structure_id_has_field (st, _TAGS_QUARK)) {
      gst_structure_id_get (st, _TAGS_QUARK,
          GST_TYPE_STRUCTURE, &tags_st, NULL);
      if (gst_structure_get_uint (tags_st, GST_TAG_BITRATE, &utmp) ||
          gst_structure_get_uint (tags_st, GST_TAG_NOMINAL_BITRATE, &utmp))
        info->bitrate = utmp;

      if (gst_structure_get_uint (tags_st, GST_TAG_MAXIMUM_BITRATE, &utmp))
        info->max_bitrate = utmp;

      /* FIXME: Is it worth it to remove the tags we've parsed? */
      info->parent.tags = gst_tag_list_merge (info->parent.tags,
          (GstTagList *) tags_st, GST_TAG_MERGE_REPLACE);

      gst_structure_free (tags_st);
    }

    return (GstDiscovererStreamInfo *) info;

  } else if (g_str_has_prefix (name, "video/") ||
      g_str_has_prefix (name, "image/")) {
    GstDiscovererVideoInfo *info;
    GstVideoFormat format;

    if (parent)
      info = (GstDiscovererVideoInfo *) parent;
    else {
      info = (GstDiscovererVideoInfo *)
          gst_mini_object_new (GST_TYPE_DISCOVERER_VIDEO_INFO);
      info->parent.caps = caps;
    }

    if (gst_video_format_parse_caps (caps, &format, &tmp, &tmp2)) {
      info->width = (guint) tmp;
      info->height = (guint) tmp2;
    }

    if (gst_structure_get_int (caps_st, "depth", &tmp))
      info->depth = (guint) tmp;

    if (gst_video_parse_caps_pixel_aspect_ratio (caps, &tmp, &tmp2)) {
      info->par_num = tmp;
      info->par_denom = tmp2;
    }

    if (gst_video_parse_caps_framerate (caps, &tmp, &tmp2)) {
      info->framerate_num = tmp;
      info->framerate_denom = tmp2;
    }

    if (gst_video_format_parse_caps_interlaced (caps, &btmp))
      info->interlaced = btmp;

    if (gst_structure_id_has_field (st, _TAGS_QUARK)) {
      gst_structure_id_get (st, _TAGS_QUARK,
          GST_TYPE_STRUCTURE, &tags_st, NULL);
      if (gst_structure_get_uint (tags_st, GST_TAG_BITRATE, &utmp) ||
          gst_structure_get_uint (tags_st, GST_TAG_NOMINAL_BITRATE, &utmp))
        info->bitrate = utmp;

      if (gst_structure_get_uint (tags_st, GST_TAG_MAXIMUM_BITRATE, &utmp))
        info->max_bitrate = utmp;

      /* FIXME: Is it worth it to remove the tags we've parsed? */
      info->parent.tags = gst_tag_list_merge (info->parent.tags,
          (GstTagList *) tags_st, GST_TAG_MERGE_REPLACE);
      gst_structure_free (tags_st);
    }

    return (GstDiscovererStreamInfo *) info;

  } else {
    /* None of the above - populate what information we can */
    GstDiscovererStreamInfo *info;

    if (parent)
      info = parent;
    else {
      info = (GstDiscovererStreamInfo *)
          gst_mini_object_new (GST_TYPE_DISCOVERER_STREAM_INFO);
      info->caps = caps;
    }

    if (gst_structure_id_get (st, _TAGS_QUARK,
            GST_TYPE_STRUCTURE, &tags_st, NULL)) {
      info->tags = gst_tag_list_merge (info->tags, (GstTagList *) tags_st,
          GST_TAG_MERGE_REPLACE);
      gst_structure_free (tags_st);
    }

    return info;
  }

}
Exemplo n.º 17
0
// Obtains a list of supported extensions from typefind factories
// TODO: improve the list. It is obviously incomplete.
wxArrayString
GStreamerImportPlugin::GetSupportedExtensions()
{
   // We refresh the extensions each time this is called in case the
   // user had installed additional gstreamer plugins while Audacity
   // was active.
   mExtensions.Empty();

   // Gather extensions from all factories that support audio
   GList *factories = gst_type_find_factory_get_list();
   for (GList *list = factories; list != NULL; list = g_list_next(list))
   {
      GstTypeFindFactory *factory = GST_TYPE_FIND_FACTORY(list->data);

      // We need the capabilities to determine if it handles audio
      GstCaps *caps = gst_type_find_factory_get_caps(factory);
      if (!caps)
      {
         continue;
      }

      // Check each structure in the caps for audio
      for (guint c = 0, clen = gst_caps_get_size(caps); c < clen; c++)
      {
         // Bypass if it isn't for audio
         GstStructure *str = gst_caps_get_structure(caps, c);
         if (!g_str_has_prefix(gst_structure_get_name(str), "audio"))
         {
            continue;
         }

         // This factory can handle audio, so get the extensions
         const gchar *const *extensions = gst_type_find_factory_get_extensions(factory);
         if (!extensions)
         {
            continue;
         }

         // Add each extension to the list
         for (guint i = 0; extensions[i] != NULL; i++)
         {
            wxString extension = wxString::FromUTF8(extensions[i]);
            if (mExtensions.Index(extension.c_str(), false) == wxNOT_FOUND)
            {
               mExtensions.Add(extension);
            }
         }
      }
   }
   gst_plugin_feature_list_free(factories);

   // Get them in a decent order
   mExtensions.Sort();

   // Log it for debugging
   wxString extensions = wxT("Extensions:");
   for (size_t i = 0; i < mExtensions.GetCount(); i++)
   {
      extensions = extensions + wxT(" ") + mExtensions[i];
   }
   wxLogMessage(wxT("%s"), extensions.c_str());

   return mExtensions;
}
/* Called when pipeline is pre-rolled */
static void
discoverer_collect (GstDiscoverer * dc)
{
  GST_DEBUG ("Collecting information");

  /* Stop the timeout handler if present */
  if (dc->priv->timeoutid) {
    g_source_remove (dc->priv->timeoutid);
    dc->priv->timeoutid = 0;
  }

  if (dc->priv->streams) {
    /* FIXME : Make this querying optional */
    if (TRUE) {
      GstElement *pipeline = (GstElement *) dc->priv->pipeline;
      GstFormat format = GST_FORMAT_TIME;
      gint64 dur;

      GST_DEBUG ("Attempting to query duration");

      if (gst_element_query_duration (pipeline, &format, &dur)) {
        if (format == GST_FORMAT_TIME) {
          GST_DEBUG ("Got duration %" GST_TIME_FORMAT, GST_TIME_ARGS (dur));
          dc->priv->current_info->duration = (guint64) dur;
        }
      }

      if (dc->priv->seeking_query) {
        if (gst_element_query (pipeline, dc->priv->seeking_query)) {
          gboolean seekable;

          gst_query_parse_seeking (dc->priv->seeking_query, &format,
              &seekable, NULL, NULL);
          if (format == GST_FORMAT_TIME) {
            GST_DEBUG ("Got seekable %d", seekable);
            dc->priv->current_info->seekable = seekable;
          }
        }
      }
    }

    if (dc->priv->current_topology)
      dc->priv->current_info->stream_info = parse_stream_topology (dc,
          dc->priv->current_topology, NULL);

    /*
     * Images need some special handling. They do not have a duration, have
     * caps named image/<foo> (th exception being MJPEG video which is also
     * type image/jpeg), and should consist of precisely one stream (actually
     * initially there are 2, the image and raw stream, but we squash these
     * while parsing the stream topology). At some ponit, if we find that these
     * conditions are not sufficient, we can count the number of decoders and
     * parsers in the chain, and if there's more than one decoder, or any
     * parser at all, we should not mark this as an image.
     */
    if (dc->priv->current_info->duration == 0 &&
        dc->priv->current_info->stream_info != NULL &&
        dc->priv->current_info->stream_info->next == NULL) {
      GstStructure *st =
          gst_caps_get_structure (dc->priv->current_info->stream_info->caps, 0);

      if (g_str_has_prefix (gst_structure_get_name (st), "image/"))
        ((GstDiscovererVideoInfo *) dc->priv->current_info->
            stream_info)->is_image = TRUE;
    }
  }

  if (dc->priv->async) {
    GST_DEBUG ("Emitting 'discoverered'");
    g_signal_emit (dc, gst_discoverer_signals[SIGNAL_DISCOVERED], 0,
        dc->priv->current_info, dc->priv->current_error);
    /* Clients get a copy of current_info since it is a boxed type */
    gst_discoverer_info_unref (dc->priv->current_info);
  }
}
Exemplo n.º 19
0
static gboolean
gst_aravis_set_caps (GstBaseSrc *src, GstCaps *caps)
{
    GstAravis* gst_aravis = GST_ARAVIS(src);
    GstStructure *structure;
    ArvPixelFormat pixel_format;
    int height, width;
    int bpp, depth;
    const GValue *frame_rate;
    const char *caps_string;
    unsigned int i;
    guint32 fourcc;

    GST_LOG_OBJECT (gst_aravis, "Requested caps = %" GST_PTR_FORMAT, caps);

    arv_camera_stop_acquisition (gst_aravis->camera);

    if (gst_aravis->stream != NULL)
        g_object_unref (gst_aravis->stream);

    structure = gst_caps_get_structure (caps, 0);

    gst_structure_get_int (structure, "width", &width);
    gst_structure_get_int (structure, "height", &height);
    frame_rate = gst_structure_get_value (structure, "framerate");
    gst_structure_get_fourcc (structure, "format", &fourcc);
    gst_structure_get_int (structure, "bpp", &bpp);
    gst_structure_get_int (structure, "depth", &depth);

    pixel_format = arv_pixel_format_from_gst_caps (gst_structure_get_name (structure), bpp, depth, fourcc);

    arv_camera_set_region (gst_aravis->camera, 0, 0, width, height);
    arv_camera_set_binning (gst_aravis->camera, gst_aravis->h_binning, gst_aravis->v_binning);
    arv_camera_set_pixel_format (gst_aravis->camera, pixel_format);

    if (frame_rate != NULL) {
        double dbl_frame_rate;

        dbl_frame_rate = (double) gst_value_get_fraction_numerator (frame_rate) /
                         (double) gst_value_get_fraction_denominator (frame_rate);

        GST_DEBUG_OBJECT (gst_aravis, "Frame rate = %g Hz", dbl_frame_rate);
        arv_camera_set_frame_rate (gst_aravis->camera, dbl_frame_rate);

        if (dbl_frame_rate > 0.0)
            gst_aravis->buffer_timeout_us = MAX (GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT,
                                                 3e6 / dbl_frame_rate);
        else
            gst_aravis->buffer_timeout_us = GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT;
    } else
        gst_aravis->buffer_timeout_us = GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT;

    GST_DEBUG_OBJECT (gst_aravis, "Buffer timeout = %Ld µs", gst_aravis->buffer_timeout_us);

    GST_DEBUG_OBJECT (gst_aravis, "Actual frame rate = %g Hz", arv_camera_get_frame_rate (gst_aravis->camera));

    GST_DEBUG_OBJECT (gst_aravis, "Gain       = %d", gst_aravis->gain);
    arv_camera_set_gain (gst_aravis->camera, gst_aravis->gain);
    GST_DEBUG_OBJECT (gst_aravis, "Actual gain       = %d", arv_camera_get_gain (gst_aravis->camera));

    GST_DEBUG_OBJECT (gst_aravis, "Exposure   = %g µs", gst_aravis->exposure_time_us);
    arv_camera_set_exposure_time (gst_aravis->camera, gst_aravis->exposure_time_us);
    GST_DEBUG_OBJECT (gst_aravis, "Actual exposure   = %g µs", arv_camera_get_exposure_time (gst_aravis->camera));

    if (gst_aravis->fixed_caps != NULL)
        gst_caps_unref (gst_aravis->fixed_caps);

    caps_string = arv_pixel_format_to_gst_caps_string (pixel_format);
    if (caps_string != NULL) {
        GstStructure *structure;
        GstCaps *caps;

        caps = gst_caps_new_empty ();
        structure = gst_structure_from_string (caps_string, NULL);
        gst_structure_set (structure,
                           "width", G_TYPE_INT, width,
                           "height", G_TYPE_INT, height,
                           NULL);

        if (frame_rate != NULL)
            gst_structure_set_value (structure, "framerate", frame_rate);

        gst_caps_append_structure (caps, structure);

        gst_aravis->fixed_caps = caps;
    } else
        gst_aravis->fixed_caps = NULL;

    gst_aravis->payload = arv_camera_get_payload (gst_aravis->camera);
    gst_aravis->stream = arv_camera_create_stream (gst_aravis->camera, NULL, NULL);

    for (i = 0; i < GST_ARAVIS_N_BUFFERS; i++)
        arv_stream_push_buffer (gst_aravis->stream,
                                arv_buffer_new (gst_aravis->payload, NULL));

    GST_LOG_OBJECT (gst_aravis, "Start acquisition");
    arv_camera_start_acquisition (gst_aravis->camera);

    gst_aravis->timestamp_offset = 0;
    gst_aravis->last_timestamp = 0;

    return TRUE;
}
Exemplo n.º 20
0
static gboolean
gst_real_audio_dec_setcaps (GstPad * pad, GstCaps * caps)
{
  GstRealAudioDec *dec = GST_REAL_AUDIO_DEC (GST_PAD_PARENT (pad));
  GstStructure *s = gst_caps_get_structure (caps, 0);
  gint version, flavor, channels, rate, leaf_size, packet_size, width, height;
  guint16 res = 0;
  RAInit data;
  gboolean bres;
  const GValue *v;
  GstBuffer *buf = NULL;
  const gchar *name = gst_structure_get_name (s);

  if (!strcmp (name, "audio/x-sipro")) {
    version = GST_REAL_AUDIO_DEC_VERSION_SIPR;
  } else {
    if (!gst_structure_get_int (s, "raversion", &version))
      goto missing_keys;
  }

  if (!gst_structure_get_int (s, "flavor", &flavor) ||
      !gst_structure_get_int (s, "channels", &channels) ||
      !gst_structure_get_int (s, "width", &width) ||
      !gst_structure_get_int (s, "rate", &rate) ||
      !gst_structure_get_int (s, "height", &height) ||
      !gst_structure_get_int (s, "leaf_size", &leaf_size) ||
      !gst_structure_get_int (s, "packet_size", &packet_size))
    goto missing_keys;

  if ((v = gst_structure_get_value (s, "codec_data")))
    buf = g_value_peek_pointer (v);

  GST_LOG_OBJECT (dec, "opening code for version %d", version);

  /* first close existing decoder */
  close_library (dec, &dec->lib);

  if (!open_library (dec, version, &dec->lib))
    goto could_not_open;

  /* we have the module, no initialize with the caps data */
  data.samplerate = rate;
  data.width = width;
  data.channels = channels;
  data.quality = 100;
  data.leaf_size = leaf_size;
  data.packet_size = packet_size;
  data.datalen = buf ? GST_BUFFER_SIZE (buf) : 0;
  data.data = buf ? GST_BUFFER_DATA (buf) : NULL;

  if ((res = dec->lib.RAInitDecoder (dec->lib.context, &data))) {
    GST_WARNING_OBJECT (dec, "RAInitDecoder() failed");
    goto could_not_initialize;
  }

  if (dec->lib.RASetPwd) {
    dec->lib.RASetPwd (dec->lib.context, dec->pwd ? dec->pwd : DEFAULT_PWD);
  }

  if ((res = dec->lib.RASetFlavor (dec->lib.context, flavor))) {
    GST_WARNING_OBJECT (dec, "RASetFlavor(%d) failed", flavor);
    goto could_not_initialize;
  }

  caps = gst_caps_new_simple ("audio/x-raw-int",
      "endianness", G_TYPE_INT, G_BYTE_ORDER,
      "width", G_TYPE_INT, width,
      "depth", G_TYPE_INT, width,
      "rate", G_TYPE_INT, rate,
      "channels", G_TYPE_INT, channels, "signed", G_TYPE_BOOLEAN, TRUE, NULL);
  bres = gst_pad_set_caps (GST_PAD (dec->src), caps);
  gst_caps_unref (caps);
  if (!bres)
    goto could_not_set_caps;

  dec->width = width;
  dec->height = height;
  dec->leaf_size = leaf_size;

  GST_LOG_OBJECT (dec, "opened module");

  return TRUE;

missing_keys:
  {
    GST_DEBUG_OBJECT (dec, "Could not find all necessary keys in structure.");
    return FALSE;
  }
could_not_open:
  {
    GST_DEBUG_OBJECT (dec, "Could not find decoder");
    return FALSE;
  }
could_not_initialize:
  {
    close_library (dec, &dec->lib);
    GST_WARNING_OBJECT (dec, "Initialization of REAL driver failed (%i).", res);
    return FALSE;
  }
could_not_set_caps:
  {
    /* should normally not fail */
    close_library (dec, &dec->lib);
    GST_DEBUG_OBJECT (dec, "Could not convince peer to accept caps.");
    return FALSE;
  }
}
Exemplo n.º 21
0
static gboolean gst_avdtp_sink_init_sbc_pkt_conf(GstAvdtpSink *sink,
					GstCaps *caps,
					sbc_capabilities_t *pkt)
{
	sbc_capabilities_t *cfg;
	const GValue *value = NULL;
	const char *pref, *name;
	gint rate, subbands, blocks;
	GstStructure *structure = gst_caps_get_structure(caps, 0);

	cfg = (void *) gst_avdtp_find_caps(sink, BT_A2DP_SBC_SINK);
	name = gst_structure_get_name(structure);

	if (!(IS_SBC(name))) {
		GST_ERROR_OBJECT(sink, "Unexpected format %s, "
				"was expecting sbc", name);
		return FALSE;
	}

	value = gst_structure_get_value(structure, "rate");
	rate = g_value_get_int(value);
	if (rate == 44100)
		cfg->frequency = BT_SBC_SAMPLING_FREQ_44100;
	else if (rate == 48000)
		cfg->frequency = BT_SBC_SAMPLING_FREQ_48000;
	else if (rate == 32000)
		cfg->frequency = BT_SBC_SAMPLING_FREQ_32000;
	else if (rate == 16000)
		cfg->frequency = BT_SBC_SAMPLING_FREQ_16000;
	else {
		GST_ERROR_OBJECT(sink, "Invalid rate while setting caps");
		return FALSE;
	}

	value = gst_structure_get_value(structure, "mode");
	pref = g_value_get_string(value);
	if (strcmp(pref, "mono") == 0)
		cfg->channel_mode = BT_A2DP_CHANNEL_MODE_MONO;
	else if (strcmp(pref, "dual") == 0)
		cfg->channel_mode = BT_A2DP_CHANNEL_MODE_DUAL_CHANNEL;
	else if (strcmp(pref, "stereo") == 0)
		cfg->channel_mode = BT_A2DP_CHANNEL_MODE_STEREO;
	else if (strcmp(pref, "joint") == 0)
		cfg->channel_mode = BT_A2DP_CHANNEL_MODE_JOINT_STEREO;
	else {
		GST_ERROR_OBJECT(sink, "Invalid mode %s", pref);
		return FALSE;
	}

	value = gst_structure_get_value(structure, "allocation");
	pref = g_value_get_string(value);
	if (strcmp(pref, "loudness") == 0)
		cfg->allocation_method = BT_A2DP_ALLOCATION_LOUDNESS;
	else if (strcmp(pref, "snr") == 0)
		cfg->allocation_method = BT_A2DP_ALLOCATION_SNR;
	else {
		GST_ERROR_OBJECT(sink, "Invalid allocation: %s", pref);
		return FALSE;
	}

	value = gst_structure_get_value(structure, "subbands");
	subbands = g_value_get_int(value);
	if (subbands == 8)
		cfg->subbands = BT_A2DP_SUBBANDS_8;
	else if (subbands == 4)
		cfg->subbands = BT_A2DP_SUBBANDS_4;
	else {
		GST_ERROR_OBJECT(sink, "Invalid subbands %d", subbands);
		return FALSE;
	}

	value = gst_structure_get_value(structure, "blocks");
	blocks = g_value_get_int(value);
	if (blocks == 16)
		cfg->block_length = BT_A2DP_BLOCK_LENGTH_16;
	else if (blocks == 12)
		cfg->block_length = BT_A2DP_BLOCK_LENGTH_12;
	else if (blocks == 8)
		cfg->block_length = BT_A2DP_BLOCK_LENGTH_8;
	else if (blocks == 4)
		cfg->block_length = BT_A2DP_BLOCK_LENGTH_4;
	else {
		GST_ERROR_OBJECT(sink, "Invalid blocks %d", blocks);
		return FALSE;
	}

	value = gst_structure_get_value(structure, "bitpool");
	cfg->max_bitpool = cfg->min_bitpool = g_value_get_int(value);

	memcpy(pkt, cfg, sizeof(*pkt));

	return TRUE;
}
Exemplo n.º 22
0
/**
 * main:
 **/
int
main (int argc, gchar **argv)
{
	GOptionContext *context;
	guint i;
	guint len;
	gchar **codecs = NULL;
	gint xid = 0;
	const gchar *suffix;
	gchar *resource;
	_cleanup_error_free_ GError *error = NULL;
	_cleanup_object_unref_ GDBusProxy *proxy = NULL;
	_cleanup_ptrarray_unref_ GPtrArray *array = NULL;
	_cleanup_strv_free_ gchar **resources = NULL;
	_cleanup_variant_unref_ GVariant *value = NULL;

	const GOptionEntry options[] = {
		{ "transient-for", '\0', 0, G_OPTION_ARG_INT, &xid, "The XID of the parent window", NULL },
		{ G_OPTION_REMAINING, '\0', 0, G_OPTION_ARG_FILENAME_ARRAY, &codecs, "GStreamer install infos", NULL },
		{ NULL }
	};

#if (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 35)
	g_type_init ();
#endif

	gst_init (&argc, &argv);

	context = g_option_context_new ("Install missing codecs");
	g_option_context_add_main_entries (context, options, NULL);

	if (!g_option_context_parse (context, &argc, &argv, &error)) {
		g_print ("%s\nRun '%s --help' to see a full list of available command line options.\n",
			 error->message, argv[0]);
		return GST_INSTALL_PLUGINS_ERROR;
	}
	if (codecs == NULL) {
		g_print ("Missing codecs information\n");
		g_print ("Run 'with --help' to see a full list of available command line options.\n");
		return GST_INSTALL_PLUGINS_ERROR;
	}

	/* this is our parent window */
	g_message ("PackageKit: xid = %i", xid);

	/* get proxy */
	proxy = g_dbus_proxy_new_for_bus_sync (G_BUS_TYPE_SESSION,
					       G_DBUS_PROXY_FLAGS_DO_NOT_LOAD_PROPERTIES |
					       G_DBUS_PROXY_FLAGS_DO_NOT_CONNECT_SIGNALS,
					       NULL,
					       "org.freedesktop.PackageKit",
					       "/org/freedesktop/PackageKit",
					       "org.freedesktop.PackageKit.Modify",
					       NULL,
					       &error);
	if (proxy == NULL) {
		g_warning ("Cannot connect to PackageKit session service: %s",
			   error->message);
		return GST_INSTALL_PLUGINS_ERROR;
	}

	/* use a ()(64bit) suffix for 64 bit */
	suffix = pk_gst_get_arch_suffix ();

	array = g_ptr_array_new_with_free_func (g_free);
	len = g_strv_length (codecs);

	/* process argv */
	for (i = 0; i < len; i++) {
		PkGstCodecInfo *info;
		gchar *type;
		const gchar *gstreamer_version;

		info = pk_gst_parse_codec (codecs[i]);
		if (info == NULL) {
			g_message ("skipping %s", codecs[i]);
			continue;
		}

		/* gstreamer1 is the provide name used for the
		 * first version of the new release */
		if (g_strcmp0 (info->gstreamer_version, "1.0") == 0)
			gstreamer_version = "1";
		else
			gstreamer_version = info->gstreamer_version;

		g_message ("PackageKit: Codec nice name: %s", info->codec_name);
		if (info->structure != NULL) {
			_cleanup_free_ gchar *s = NULL;
			s = pk_gst_structure_to_provide (info->structure);
			type = g_strdup_printf ("gstreamer%s(%s-%s)%s%s",
						gstreamer_version,
						info->type_name,
						gst_structure_get_name (info->structure),
						s, suffix);
			g_message ("PackageKit: structure: %s", type);
		} else {
			type = g_strdup_printf ("gstreamer%s(%s)%s",
						gstreamer_version,
						info->type_name,
						suffix);
			g_message ("PackageKit: non-structure: %s", type);
		}

		/* "encode" */
		resource = g_strdup_printf ("%s|%s", info->codec_name, type);
		g_ptr_array_add (array, resource);

		/* free codec structure */
		pk_gst_codec_free (info);
	}

	/* nothing parsed */
	if (array->len == 0) {
		g_message ("no codec lines could be parsed");
		return GST_INSTALL_PLUGINS_ERROR;
	}

	/* convert to a GStrv */
	resources = pk_ptr_array_to_strv (array);

	/* invoke the method */
	value = g_dbus_proxy_call_sync (proxy,
					"InstallGStreamerResources",
					g_variant_new ("(u^a&ss)",
						       xid,
						       resources,
						       "hide-finished"),
					G_DBUS_CALL_FLAGS_NONE,
					60 * 60 * 1000, /* 1 hour */
					NULL,
					&error);
	if (value == NULL) {
		/* use the error string to return a good GStreamer exit code */
		g_message ("PackageKit: Did not install codec: %s", error->message);
		if (g_strrstr (error->message, "did not agree to search") != NULL)
			return GST_INSTALL_PLUGINS_USER_ABORT;
		if (g_strrstr (error->message, "not all codecs were installed") != NULL)
			return GST_INSTALL_PLUGINS_PARTIAL_SUCCESS;
		return GST_INSTALL_PLUGINS_NOT_FOUND;
	}
	return GST_INSTALL_PLUGINS_SUCCESS;
}
Exemplo n.º 23
0
static gboolean
gst_dvd_spu_subpic_event (GstPad * pad, GstObject * parent, GstEvent * event)
{
  GstDVDSpu *dvdspu = (GstDVDSpu *) parent;
  gboolean res = TRUE;

  /* Some events on the subpicture sink pad just get ignored, like 
   * FLUSH_START */
  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_CAPS:
    {
      GstCaps *caps;

      gst_event_parse_caps (event, &caps);
      res = gst_dvd_spu_subpic_set_caps (dvdspu, pad, caps);
      gst_event_unref (event);
      break;
    }
    case GST_EVENT_CUSTOM_DOWNSTREAM:
    case GST_EVENT_CUSTOM_DOWNSTREAM_STICKY:
    case GST_EVENT_CUSTOM_DOWNSTREAM_OOB:
    {
      const GstStructure *structure = gst_event_get_structure (event);
      const gchar *name = gst_structure_get_name (structure);
      gboolean need_push;

      if (!g_str_has_prefix (name, "application/x-gst-dvd")) {
        res = gst_pad_event_default (pad, parent, event);
        break;
      }

      DVD_SPU_LOCK (dvdspu);
      if (GST_EVENT_IS_SERIALIZED (event)) {
        SpuPacket *spu_packet = g_new0 (SpuPacket, 1);
        GST_DEBUG_OBJECT (dvdspu,
            "Enqueueing DVD event on subpicture pad for later");
        spu_packet->event = event;
        g_queue_push_tail (dvdspu->pending_spus, spu_packet);
      } else {
        gst_dvd_spu_handle_dvd_event (dvdspu, event);
      }

      /* If the handle_dvd_event generated a pending frame, we
       * need to synchronise with the video pad's stream lock and push it.
       * This requires some dancing to preserve locking order and handle
       * flushes correctly */
      need_push = (dvdspu->pending_frame != NULL);
      DVD_SPU_UNLOCK (dvdspu);
      if (need_push) {
        GstBuffer *to_push = NULL;
        gboolean flushing;

        GST_LOG_OBJECT (dvdspu, "Going for stream lock");
        GST_PAD_STREAM_LOCK (dvdspu->videosinkpad);
        GST_LOG_OBJECT (dvdspu, "Got stream lock");
        GST_OBJECT_LOCK (dvdspu->videosinkpad);
        flushing = GST_PAD_IS_FLUSHING (dvdspu->videosinkpad);
        GST_OBJECT_UNLOCK (dvdspu->videosinkpad);

        DVD_SPU_LOCK (dvdspu);
        if (dvdspu->pending_frame == NULL || flushing) {
          /* Got flushed while waiting for the stream lock */
          DVD_SPU_UNLOCK (dvdspu);
        } else {
          to_push = dvdspu->pending_frame;
          dvdspu->pending_frame = NULL;

          DVD_SPU_UNLOCK (dvdspu);
          gst_pad_push (dvdspu->srcpad, to_push);
        }
        GST_LOG_OBJECT (dvdspu, "Dropping stream lock");
        GST_PAD_STREAM_UNLOCK (dvdspu->videosinkpad);
      }

      break;
    }
    case GST_EVENT_SEGMENT:
    {
      GstSegment seg;

      gst_event_copy_segment (event, &seg);

      /* Only print updates if they have an end time (don't print start_time
       * updates */
      GST_DEBUG_OBJECT (dvdspu, "subpic pad Segment: %" GST_SEGMENT_FORMAT,
          &seg);

      DVD_SPU_LOCK (dvdspu);

      dvdspu->subp_seg = seg;
      GST_LOG_OBJECT (dvdspu, "Subpicture segment now: %" GST_SEGMENT_FORMAT,
          &dvdspu->subp_seg);
      DVD_SPU_UNLOCK (dvdspu);

      gst_event_unref (event);
      break;
    }
    case GST_EVENT_GAP:
    {
      GstClockTime timestamp, duration;
      gst_event_parse_gap (event, &timestamp, &duration);
      if (GST_CLOCK_TIME_IS_VALID (duration))
        timestamp += duration;

      DVD_SPU_LOCK (dvdspu);
      dvdspu->subp_seg.position = timestamp;
      GST_LOG_OBJECT (dvdspu, "Received GAP. Segment now: %" GST_SEGMENT_FORMAT,
          &dvdspu->subp_seg);
      DVD_SPU_UNLOCK (dvdspu);

      gst_event_unref (event);
      break;
    }
    case GST_EVENT_FLUSH_START:
      gst_event_unref (event);
      goto done;
    case GST_EVENT_FLUSH_STOP:
      GST_DEBUG_OBJECT (dvdspu, "Have flush-stop event on SPU pad");
      DVD_SPU_LOCK (dvdspu);
      gst_segment_init (&dvdspu->subp_seg, GST_FORMAT_UNDEFINED);
      gst_dvd_spu_flush_spu_info (dvdspu, TRUE);
      DVD_SPU_UNLOCK (dvdspu);

      /* We don't forward flushes on the spu pad */
      gst_event_unref (event);
      goto done;
    case GST_EVENT_EOS:
      /* drop EOS on the subtitle pad, it means there are no more subtitles,
       * video might still continue, though */
      gst_event_unref (event);
      goto done;
    default:
      res = gst_pad_event_default (pad, parent, event);
      break;
  }

done:

  return res;
}
Exemplo n.º 24
0
static gboolean
bus_message (GstBus * bus, GstMessage * message, App * app)
{
    gchar *sourceName;
    GstObject *source;
    gchar *string;
    GstState current_state;

    if (!message)
        return FALSE;
    source = GST_MESSAGE_SRC (message);
    if (!GST_IS_OBJECT (source))
        return FALSE;
    sourceName = gst_object_get_name (source);

    if (gst_message_get_structure (message))
        string = gst_structure_to_string (gst_message_get_structure (message));
    else
        string = g_strdup (GST_MESSAGE_TYPE_NAME (message));
    GST_DEBUG("gst_message from %s: %s", sourceName, string);
    g_free (string);

    switch (GST_MESSAGE_TYPE (message)) {
    case GST_MESSAGE_ERROR:
    {
        GError *gerror;
        gchar *debug;

        gst_message_parse_error (message, &gerror, &debug);
        gst_object_default_error (GST_MESSAGE_SRC (message), gerror, debug);
        g_error_free (gerror);
        g_free (debug);

        g_main_loop_quit (app->loop);
        break;
    }
    case GST_MESSAGE_WARNING:
    {
        GError *gerror;
        gchar *debug;

        gst_message_parse_warning (message, &gerror, &debug);
        gst_object_default_error (GST_MESSAGE_SRC (message), gerror, debug);
        g_error_free (gerror);
        g_free (debug);

//       g_main_loop_quit (app->loop);
        break;
    }
    case GST_MESSAGE_EOS:
        g_message ("received EOS");
        g_main_loop_quit (app->loop);
        break;
    case GST_MESSAGE_ASYNC_DONE:
        break;
    case GST_MESSAGE_ELEMENT:
    {
        const GstStructure *msgstruct = gst_message_get_structure (message);
        if (msgstruct) {
            const gchar *eventname = gst_structure_get_name (msgstruct);
            if (!strcmp (eventname, "seekable"))
                app->is_seekable = TRUE;
        }
        break;
    }
    case GST_MESSAGE_STATE_CHANGED:
    {
        GstState old_state, new_state;
        GstStateChange transition;
        if (GST_MESSAGE_SRC (message) != GST_OBJECT (app->tsdemux))
            break;

        gst_message_parse_state_changed (message, &old_state, &new_state, NULL);
        transition = (GstStateChange) GST_STATE_TRANSITION (old_state, new_state);

        switch (transition) {
        case GST_STATE_CHANGE_NULL_TO_READY:
            break;
        case GST_STATE_CHANGE_READY_TO_PAUSED:
            break;
        case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
        {

        }
        break;
        case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
            break;
        case GST_STATE_CHANGE_PAUSED_TO_READY:
            break;
        case GST_STATE_CHANGE_READY_TO_NULL:
            break;
        }
        break;
    }
    case GST_MESSAGE_SEGMENT_DONE:
    {
        GST_DEBUG ("GST_MESSAGE_SEGMENT_DONE!!!");
        do_seek (app);
    }
    default:
        break;
    }
    gst_element_get_state (app->pipeline, &current_state, NULL, 0);
    if (app->current_segment == 0 && app->segment_count /*&& app->is_seekable*/
            && current_state == GST_STATE_PLAYING)
        do_seek (app);
    GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(app->pipeline),GST_DEBUG_GRAPH_SHOW_ALL,"bdremux_pipelinegraph_message");
    return TRUE;
}
Exemplo n.º 25
0
static gboolean
gst_visual_gl_src_query (GstPad * pad, GstQuery * query)
{
  gboolean res;
  GstVisualGL *visual;

  visual = GST_VISUAL_GL (gst_pad_get_parent (pad));

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_LATENCY:
    {
      /* We need to send the query upstream and add the returned latency to our
       * own */
      GstClockTime min_latency, max_latency;
      gboolean us_live;
      GstClockTime our_latency;
      guint max_samples;

      if ((res = gst_pad_peer_query (visual->sinkpad, query))) {
        gst_query_parse_latency (query, &us_live, &min_latency, &max_latency);

        GST_DEBUG_OBJECT (visual, "Peer latency: min %"
            GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
            GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));

        /* the max samples we must buffer buffer */
        max_samples = MAX (VISUAL_SAMPLES, visual->spf);
        our_latency =
            gst_util_uint64_scale_int (max_samples, GST_SECOND, visual->rate);

        GST_DEBUG_OBJECT (visual, "Our latency: %" GST_TIME_FORMAT,
            GST_TIME_ARGS (our_latency));

        /* we add some latency but only if we need to buffer more than what
         * upstream gives us */
        min_latency += our_latency;
        if (max_latency != -1)
          max_latency += our_latency;

        GST_DEBUG_OBJECT (visual, "Calculated total latency : min %"
            GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
            GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));

        gst_query_set_latency (query, TRUE, min_latency, max_latency);
      }
      break;
    }
    case GST_QUERY_CUSTOM:
    {
      GstStructure *structure = gst_query_get_structure (query);
      gchar *name = gst_element_get_name (visual);

      res = g_strcmp0 (name, gst_structure_get_name (structure)) == 0;
      g_free (name);

      if (!res)
        res = gst_pad_query_default (pad, query);
      break;
    }
    default:
      res = gst_pad_peer_query (visual->sinkpad, query);
      break;
  }

  gst_object_unref (visual);

  return res;
}
Exemplo n.º 26
0
static gboolean
gst_wavenc_sink_setcaps (GstPad * pad, GstCaps * caps)
{
    GstWavEnc *wavenc;
    GstStructure *structure;
    const gchar *name;
    gint chans, rate, width;

    wavenc = GST_WAVENC (gst_pad_get_parent (pad));

    if (wavenc->sent_header) {
        GST_WARNING_OBJECT (wavenc, "cannot change format in middle of stream");
        goto fail;
    }

    GST_DEBUG_OBJECT (wavenc, "got caps: %" GST_PTR_FORMAT, caps);

    structure = gst_caps_get_structure (caps, 0);
    name = gst_structure_get_name (structure);

    if (!gst_structure_get_int (structure, "channels", &chans) ||
            !gst_structure_get_int (structure, "rate", &rate)) {
        GST_WARNING_OBJECT (wavenc, "caps incomplete");
        goto fail;
    }

    if (strcmp (name, "audio/x-raw-int") == 0) {
        if (!gst_structure_get_int (structure, "width", &width)) {
            GST_WARNING_OBJECT (wavenc, "caps incomplete");
            goto fail;
        }
        wavenc->format = GST_RIFF_WAVE_FORMAT_PCM;
        wavenc->width = width;
    } else if (strcmp (name, "audio/x-raw-float") == 0) {
        if (!gst_structure_get_int (structure, "width", &width)) {
            GST_WARNING_OBJECT (wavenc, "caps incomplete");
            goto fail;
        }
        wavenc->format = GST_RIFF_WAVE_FORMAT_IEEE_FLOAT;
        wavenc->width = width;
    } else if (strcmp (name, "audio/x-alaw") == 0) {
        wavenc->format = GST_RIFF_WAVE_FORMAT_ALAW;
        wavenc->width = 8;
    } else if (strcmp (name, "audio/x-mulaw") == 0) {
        wavenc->format = GST_RIFF_WAVE_FORMAT_MULAW;
        wavenc->width = 8;
    } else {
        GST_WARNING_OBJECT (wavenc, "Unsupported format %s", name);
        goto fail;
    }

    wavenc->channels = chans;
    wavenc->rate = rate;

    GST_LOG_OBJECT (wavenc,
                    "accepted caps: format=0x%04x chans=%u width=%u rate=%u",
                    wavenc->format, wavenc->channels, wavenc->width, wavenc->rate);

    gst_object_unref (wavenc);
    return TRUE;

fail:
    gst_object_unref (wavenc);
    return FALSE;
}
Exemplo n.º 27
0
static GstAmcFormat *
create_amc_format (GstAmcVideoEnc * encoder, GstVideoCodecState * input_state,
    GstCaps * src_caps)
{
  GstAmcVideoEncClass *klass;
  GstStructure *s;
  const gchar *name;
  const gchar *mime = NULL;
  const gchar *profile_string = NULL;
  const gchar *level_string = NULL;
  struct
  {
    const gchar *key;
    gint id;
  } amc_profile = {
  NULL, -1};
  struct
  {
    const gchar *key;
    gint id;
  } amc_level = {
  NULL, -1};
  gint color_format;
  gint stride, slice_height;
  GstAmcFormat *format = NULL;
  GstVideoInfo *info = &input_state->info;
  GError *err = NULL;

  klass = GST_AMC_VIDEO_ENC_GET_CLASS (encoder);
  s = gst_caps_get_structure (src_caps, 0);
  if (!s)
    return NULL;

  name = gst_structure_get_name (s);
  profile_string = gst_structure_get_string (s, "profile");
  level_string = gst_structure_get_string (s, "level");

  if (strcmp (name, "video/mpeg") == 0) {
    gint mpegversion;

    if (!gst_structure_get_int (s, "mpegversion", &mpegversion))
      return NULL;

    if (mpegversion == 4) {
      mime = "video/mp4v-es";

      if (profile_string) {
        amc_profile.key = "profile";    /* named profile ? */
        amc_profile.id = gst_amc_mpeg4_profile_from_string (profile_string);
      }

      if (level_string) {
        amc_level.key = "level";        /* named level ? */
        amc_level.id = gst_amc_mpeg4_level_from_string (level_string);
      }
    } else if ( /* mpegversion == 1 || */ mpegversion == 2)
      mime = "video/mpeg2";
  } else if (strcmp (name, "video/x-h263") == 0) {
    mime = "video/3gpp";
  } else if (strcmp (name, "video/x-h264") == 0) {
    mime = "video/avc";

    if (profile_string) {
      amc_profile.key = "profile";      /* named profile ? */
      amc_profile.id = gst_amc_avc_profile_from_string (profile_string);
    }

    if (level_string) {
      amc_level.key = "level";  /* named level ? */
      amc_level.id = gst_amc_avc_level_from_string (level_string);
    }
  } else if (strcmp (name, "video/x-vp8") == 0) {
    mime = "video/x-vnd.on2.vp8";
  } else {
    GST_ERROR_OBJECT (encoder, "Failed to convert caps(%s/...) to any mime",
        name);
    return NULL;
  }

  format = gst_amc_format_new_video (mime, info->width, info->height, &err);
  if (!format) {
    GST_ERROR_OBJECT (encoder, "Failed to create a \"%s,%dx%d\" MediaFormat",
        mime, info->width, info->height);
    GST_ELEMENT_ERROR_FROM_ERROR (encoder, err);
    return NULL;
  }

  color_format =
      gst_amc_video_format_to_color_format (klass->codec_info,
      mime, info->finfo->format);
  if (color_format == -1)
    goto video_format_failed_to_convert;

  gst_amc_format_set_int (format, "bitrate", encoder->bitrate, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);
  gst_amc_format_set_int (format, "color-format", color_format, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);
  stride = GST_ROUND_UP_4 (info->width);        /* safe (?) */
  gst_amc_format_set_int (format, "stride", stride, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);
  slice_height = info->height;
  gst_amc_format_set_int (format, "slice-height", slice_height, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);

  if (profile_string) {
    if (amc_profile.id == -1)
      goto unsupported_profile;

    /* FIXME: Set to any value in AVCProfile* leads to
     * codec configuration fail */
    /* gst_amc_format_set_int (format, amc_profile.key, 0x40); */
  }

  if (level_string) {
    if (amc_level.id == -1)
      goto unsupported_level;

    /* gst_amc_format_set_int (format, amc_level.key, amc_level.id); */
  }

  gst_amc_format_set_int (format, "i-frame-interval", encoder->i_frame_int,
      &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);

  if (info->fps_d)
    gst_amc_format_set_float (format, "frame-rate",
        ((gfloat) info->fps_n) / info->fps_d, &err);
  if (err)
    GST_ELEMENT_WARNING_FROM_ERROR (encoder, err);

  encoder->format = info->finfo->format;
  if (!gst_amc_color_format_info_set (&encoder->color_format_info,
          klass->codec_info, mime, color_format, info->width, info->height,
          stride, slice_height, 0, 0, 0, 0))
    goto color_format_info_failed_to_set;

  GST_DEBUG_OBJECT (encoder,
      "Color format info: {color_format=%d, width=%d, height=%d, "
      "stride=%d, slice-height=%d, crop-left=%d, crop-top=%d, "
      "crop-right=%d, crop-bottom=%d, frame-size=%d}",
      encoder->color_format_info.color_format, encoder->color_format_info.width,
      encoder->color_format_info.height, encoder->color_format_info.stride,
      encoder->color_format_info.slice_height,
      encoder->color_format_info.crop_left, encoder->color_format_info.crop_top,
      encoder->color_format_info.crop_right,
      encoder->color_format_info.crop_bottom,
      encoder->color_format_info.frame_size);

  return format;

video_format_failed_to_convert:
  GST_ERROR_OBJECT (encoder, "Failed to convert video format");
  gst_amc_format_free (format);
  return NULL;

color_format_info_failed_to_set:
  GST_ERROR_OBJECT (encoder, "Failed to set up GstAmcColorFormatInfo");
  gst_amc_format_free (format);
  return NULL;

unsupported_profile:
  GST_ERROR_OBJECT (encoder, "Unsupport profile '%s'", profile_string);
  gst_amc_format_free (format);
  return NULL;

unsupported_level:
  GST_ERROR_OBJECT (encoder, "Unsupport level '%s'", level_string);
  gst_amc_format_free (format);
  return NULL;
}
Exemplo n.º 28
0
/*!
 * \brief CvCapture_GStreamer::retrieveFrame
 * \return IplImage pointer. [Transfer Full]
 *  Retreive the previously grabbed buffer, and wrap it in an IPLImage structure
 */
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
    if(!buffer)
        return 0;

    //construct a frame header if we did not have any yet
    if(!frame)
    {
        gint height, width;

        //reuse the caps ptr
        if (buffer_caps)
            gst_caps_unref(buffer_caps);

#if GST_VERSION_MAJOR == 0
        buffer_caps = gst_buffer_get_caps(buffer);
#else
        buffer_caps = gst_sample_get_caps(sample);
#endif
        // bail out in no caps
        assert(gst_caps_get_size(buffer_caps) == 1);
        GstStructure* structure = gst_caps_get_structure(buffer_caps, 0);

        // bail out if width or height are 0
        if(!gst_structure_get_int(structure, "width", &width) ||
                !gst_structure_get_int(structure, "height", &height))
        {
            return 0;
        }


        int depth = 3;
#if GST_VERSION_MAJOR > 0
        depth = 0;
        const gchar* name = gst_structure_get_name(structure);
        const gchar* format = gst_structure_get_string(structure, "format");

        if (!name || !format)
            return 0;

        // we support 3 types of data:
        //     video/x-raw, format=BGR   -> 8bit, 3 channels
        //     video/x-raw, format=GRAY8 -> 8bit, 1 channel
        //     video/x-bayer             -> 8bit, 1 channel
        // bayer data is never decoded, the user is responsible for that
        // everything is 8 bit, so we just test the caps for bit depth

        if (strcasecmp(name, "video/x-raw") == 0)
        {
            if (strcasecmp(format, "BGR") == 0) {
                depth = 3;
            }
            else if(strcasecmp(format, "GRAY8") == 0){
                depth = 1;
            }
        }
        else if (strcasecmp(name, "video/x-bayer") == 0)
        {
            depth = 1;
        }
#endif
        if (depth > 0) {
            frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, depth);
        }else{
            return 0;
        }
    }

    // gstreamer expects us to handle the memory at this point
    // so we can just wrap the raw buffer and be done with it
#if GST_VERSION_MAJOR == 0
    frame->imageData = (char *)GST_BUFFER_DATA(buffer);
#else
    // the data ptr in GstMapInfo is only valid throughout the mapifo objects life.
    // TODO: check if reusing the mapinfo object is ok.

    gboolean success = gst_buffer_map(buffer,info, (GstMapFlags)GST_MAP_READ);
    if (!success){
        //something weird went wrong here. abort. abort.
        //fprintf(stderr,"GStreamer: unable to map buffer");
        return 0;
    }
    frame->imageData = (char*)info->data;
    gst_buffer_unmap(buffer,info);
#endif

    return frame;
}
Exemplo n.º 29
0
static GstFlowReturn
vorbis_handle_identification_packet (GstVorbisDec * vd)
{
  GstCaps *caps;
  const GstAudioChannelPosition *pos = NULL;
  gint width = GST_VORBIS_DEC_DEFAULT_SAMPLE_WIDTH;

  switch (vd->vi.channels) {
    case 1:
    case 2:
      /* nothing */
      break;
    case 3:
    case 4:
    case 5:
    case 6:
    case 7:
    case 8:
      pos = gst_vorbis_channel_positions[vd->vi.channels - 1];
      break;
    default:{
      gint i;
      GstAudioChannelPosition *posn =
          g_new (GstAudioChannelPosition, vd->vi.channels);

      GST_ELEMENT_WARNING (GST_ELEMENT (vd), STREAM, DECODE,
          (NULL), ("Using NONE channel layout for more than 8 channels"));

      for (i = 0; i < vd->vi.channels; i++)
        posn[i] = GST_AUDIO_CHANNEL_POSITION_NONE;

      pos = posn;
    }
  }

  /* negotiate width with downstream */
  caps = gst_pad_get_allowed_caps (vd->srcpad);
  if (caps) {
    if (!gst_caps_is_empty (caps)) {
      GstStructure *s;

      s = gst_caps_get_structure (caps, 0);
      /* template ensures 16 or 32 */
      gst_structure_get_int (s, "width", &width);

      GST_INFO_OBJECT (vd, "using %s with %d channels and %d bit audio depth",
          gst_structure_get_name (s), vd->vi.channels, width);
    }
    gst_caps_unref (caps);
  }
  vd->width = width >> 3;

  /* select a copy_samples function, this way we can have specialized versions
   * for mono/stereo and avoid the depth switch in tremor case */
  vd->copy_samples = get_copy_sample_func (vd->vi.channels, vd->width);

  caps = gst_caps_copy (gst_pad_get_pad_template_caps (vd->srcpad));
  gst_caps_set_simple (caps, "rate", G_TYPE_INT, vd->vi.rate,
      "channels", G_TYPE_INT, vd->vi.channels,
      "width", G_TYPE_INT, width, NULL);

  if (pos) {
    gst_audio_set_channel_positions (gst_caps_get_structure (caps, 0), pos);
  }

  if (vd->vi.channels > 8) {
    g_free ((GstAudioChannelPosition *) pos);
  }

  gst_pad_set_caps (vd->srcpad, caps);
  gst_caps_unref (caps);

  return GST_FLOW_OK;
}
Exemplo n.º 30
0
void eServiceMP3::gstBusCall(GstMessage *msg)
{
	if (!msg)
		return;
	gchar *sourceName;
	GstObject *source;
	source = GST_MESSAGE_SRC(msg);
	if (!GST_IS_OBJECT(source))
		return;
	sourceName = gst_object_get_name(source);
#if 0
	gchar *string;
	if (gst_message_get_structure(msg))
		string = gst_structure_to_string(gst_message_get_structure(msg));
	else
		string = g_strdup(GST_MESSAGE_TYPE_NAME(msg));
	eDebug("eTsRemoteSource::gst_message from %s: %s", sourceName, string);
	g_free(string);
#endif
	switch (GST_MESSAGE_TYPE (msg))
	{
		case GST_MESSAGE_EOS:
			m_event((iPlayableService*)this, evEOF);
			break;
		case GST_MESSAGE_STATE_CHANGED:
		{
			if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
				break;

			GstState old_state, new_state;
			gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
		
			if(old_state == new_state)
				break;
	
			eDebug("eServiceMP3::state transition %s -> %s", gst_element_state_get_name(old_state), gst_element_state_get_name(new_state));
	
			GstStateChange transition = (GstStateChange)GST_STATE_TRANSITION(old_state, new_state);
	
			switch(transition)
			{
				case GST_STATE_CHANGE_NULL_TO_READY:
				{
				}	break;
				case GST_STATE_CHANGE_READY_TO_PAUSED:
				{
					GstElement *subsink = gst_bin_get_by_name(GST_BIN(m_gst_playbin), "subtitle_sink");
					if (subsink)
					{
#ifdef GSTREAMER_SUBTITLE_SYNC_MODE_BUG
						/* 
						 * HACK: disable sync mode for now, gstreamer suffers from a bug causing sparse streams to loose sync, after pause/resume / skip
						 * see: https://bugzilla.gnome.org/show_bug.cgi?id=619434
						 * Sideeffect of using sync=false is that we receive subtitle buffers (far) ahead of their
						 * display time.
						 * Not too far ahead for subtitles contained in the media container.
						 * But for external srt files, we could receive all subtitles at once.
						 * And not just once, but after each pause/resume / skip.
						 * So as soon as gstreamer has been fixed to keep sync in sparse streams, sync needs to be re-enabled.
						 */
						g_object_set (G_OBJECT (subsink), "sync", FALSE, NULL);
#endif
#if 0
						/* we should not use ts-offset to sync with the decoder time, we have to do our own decoder timekeeping */
						g_object_set (G_OBJECT (subsink), "ts-offset", -2L * GST_SECOND, NULL);
						/* late buffers probably will not occur very often */
						g_object_set (G_OBJECT (subsink), "max-lateness", 0L, NULL);
						/* avoid prerolling (it might not be a good idea to preroll a sparse stream) */
						g_object_set (G_OBJECT (subsink), "async", TRUE, NULL);
#endif
						eDebug("eServiceMP3::subsink properties set!");
						gst_object_unref(subsink);
					}
					setAC3Delay(ac3_delay);
					setPCMDelay(pcm_delay);
				}	break;
				case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
				{
					if ( m_sourceinfo.is_streaming && m_streamingsrc_timeout )
						m_streamingsrc_timeout->stop();
				}	break;
				case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
				{
				}	break;
				case GST_STATE_CHANGE_PAUSED_TO_READY:
				{
				}	break;
				case GST_STATE_CHANGE_READY_TO_NULL:
				{
				}	break;
			}
			break;
		}
		case GST_MESSAGE_ERROR:
		{
			gchar *debug;
			GError *err;
			gst_message_parse_error (msg, &err, &debug);
			g_free (debug);
			eWarning("Gstreamer error: %s (%i) from %s", err->message, err->code, sourceName );
			if ( err->domain == GST_STREAM_ERROR )
			{
				if ( err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND )
				{
					if ( g_strrstr(sourceName, "videosink") )
						m_event((iPlayableService*)this, evUser+11);
					else if ( g_strrstr(sourceName, "audiosink") )
						m_event((iPlayableService*)this, evUser+10);
				}
			}
			g_error_free(err);
			break;
		}
		case GST_MESSAGE_INFO:
		{
			gchar *debug;
			GError *inf;
	
			gst_message_parse_info (msg, &inf, &debug);
			g_free (debug);
			if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE )
			{
				if ( g_strrstr(sourceName, "videosink") )
					m_event((iPlayableService*)this, evUser+14);
			}
			g_error_free(inf);
			break;
		}
		case GST_MESSAGE_TAG:
		{
			GstTagList *tags, *result;
			gst_message_parse_tag(msg, &tags);
	
			result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_REPLACE);
			if (result)
			{
				if (m_stream_tags)
					gst_tag_list_free(m_stream_tags);
				m_stream_tags = result;
			}
	
			const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
			if ( gv_image )
			{
				GstBuffer *buf_image;
				buf_image = gst_value_get_buffer (gv_image);
				int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
				int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
				close(fd);
				eDebug("eServiceMP3::/tmp/.id3coverart %d bytes written ", ret);
				m_event((iPlayableService*)this, evUser+13);
			}
			gst_tag_list_free(tags);
			m_event((iPlayableService*)this, evUpdatedInfo);
			break;
		}
		case GST_MESSAGE_ASYNC_DONE:
		{
			if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
				break;

			GstTagList *tags;
			gint i, active_idx, n_video = 0, n_audio = 0, n_text = 0;

			g_object_get (m_gst_playbin, "n-video", &n_video, NULL);
			g_object_get (m_gst_playbin, "n-audio", &n_audio, NULL);
			g_object_get (m_gst_playbin, "n-text", &n_text, NULL);

			eDebug("eServiceMP3::async-done - %d video, %d audio, %d subtitle", n_video, n_audio, n_text);

			if ( n_video + n_audio <= 0 )
				stop();

			active_idx = 0;

			m_audioStreams.clear();
			m_subtitleStreams.clear();

			for (i = 0; i < n_audio; i++)
			{
				audioStream audio;
				gchar *g_codec, *g_lang;
				GstPad* pad = 0;
				g_signal_emit_by_name (m_gst_playbin, "get-audio-pad", i, &pad);
				GstCaps* caps = gst_pad_get_negotiated_caps(pad);
				if (!caps)
					continue;
				GstStructure* str = gst_caps_get_structure(caps, 0);
				const gchar *g_type = gst_structure_get_name(str);
				eDebug("AUDIO STRUCT=%s", g_type);
				audio.type = gstCheckAudioPad(str);
				g_codec = g_strdup(g_type);
				g_lang = g_strdup_printf ("und");
				g_signal_emit_by_name (m_gst_playbin, "get-audio-tags", i, &tags);
				if ( tags && gst_is_tag_list(tags) )
				{
					gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_codec);
					gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
					gst_tag_list_free(tags);
				}
				audio.language_code = std::string(g_lang);
				audio.codec = std::string(g_codec);
				eDebug("eServiceMP3::audio stream=%i codec=%s language=%s", i, g_codec, g_lang);
				m_audioStreams.push_back(audio);
				g_free (g_lang);
				g_free (g_codec);
				gst_caps_unref(caps);
			}

			for (i = 0; i < n_text; i++)
			{	
				gchar *g_codec = NULL, *g_lang = NULL;
				g_signal_emit_by_name (m_gst_playbin, "get-text-tags", i, &tags);
				subtitleStream subs;

				g_lang = g_strdup_printf ("und");
				if ( tags && gst_is_tag_list(tags) )
				{
					gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
					gst_tag_list_get_string(tags, GST_TAG_SUBTITLE_CODEC, &g_codec);
					gst_tag_list_free(tags);
				}

				subs.language_code = std::string(g_lang);
				eDebug("eServiceMP3::subtitle stream=%i language=%s codec=%s", i, g_lang, g_codec);
				
				GstPad* pad = 0;
				g_signal_emit_by_name (m_gst_playbin, "get-text-pad", i, &pad);
				if ( pad )
					g_signal_connect (G_OBJECT (pad), "notify::caps", G_CALLBACK (gstTextpadHasCAPS), this);
				subs.type = getSubtitleType(pad, g_codec);

				m_subtitleStreams.push_back(subs);
				g_free (g_lang);
			}
			m_event((iPlayableService*)this, evUpdatedInfo);

			if ( m_errorInfo.missing_codec != "" )
			{
				if ( m_errorInfo.missing_codec.find("video/") == 0 || ( m_errorInfo.missing_codec.find("audio/") == 0 && getNumberOfTracks() == 0 ) )
					m_event((iPlayableService*)this, evUser+12);
			}
			break;
		}
		case GST_MESSAGE_ELEMENT:
		{
			if (const GstStructure *msgstruct = gst_message_get_structure(msg))
			{
				if ( gst_is_missing_plugin_message(msg) )
				{
					GstCaps *caps;
					gst_structure_get (msgstruct, "detail", GST_TYPE_CAPS, &caps, NULL); 
					std::string codec = (const char*) gst_caps_to_string(caps);
					gchar *description = gst_missing_plugin_message_get_description(msg);
					if ( description )
					{
						eDebug("eServiceMP3::m_errorInfo.missing_codec = %s", codec.c_str());
						m_errorInfo.error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
						m_errorInfo.missing_codec = codec.substr(0,(codec.find_first_of(',')));
						g_free(description);
					}
					gst_caps_unref(caps);
				}
				else
				{
					const gchar *eventname = gst_structure_get_name(msgstruct);
					if ( eventname )
					{
						if (!strcmp(eventname, "eventSizeChanged") || !strcmp(eventname, "eventSizeAvail"))
						{
							gst_structure_get_int (msgstruct, "aspect_ratio", &m_aspect);
							gst_structure_get_int (msgstruct, "width", &m_width);
							gst_structure_get_int (msgstruct, "height", &m_height);
							if (strstr(eventname, "Changed"))
								m_event((iPlayableService*)this, evVideoSizeChanged);
						}
						else if (!strcmp(eventname, "eventFrameRateChanged") || !strcmp(eventname, "eventFrameRateAvail"))
						{
							gst_structure_get_int (msgstruct, "frame_rate", &m_framerate);
							if (strstr(eventname, "Changed"))
								m_event((iPlayableService*)this, evVideoFramerateChanged);
						}
						else if (!strcmp(eventname, "eventProgressiveChanged") || !strcmp(eventname, "eventProgressiveAvail"))
						{
							gst_structure_get_int (msgstruct, "progressive", &m_progressive);
							if (strstr(eventname, "Changed"))
								m_event((iPlayableService*)this, evVideoProgressiveChanged);
						}
					}
				}
			}
			break;
		}
		case GST_MESSAGE_BUFFERING:
		{
			GstBufferingMode mode;
			gst_message_parse_buffering(msg, &(m_bufferInfo.bufferPercent));
			gst_message_parse_buffering_stats(msg, &mode, &(m_bufferInfo.avgInRate), &(m_bufferInfo.avgOutRate), &(m_bufferInfo.bufferingLeft));
			m_event((iPlayableService*)this, evBuffering);
			break;
		}
		case GST_MESSAGE_STREAM_STATUS:
		{
			GstStreamStatusType type;
			GstElement *owner;
			gst_message_parse_stream_status (msg, &type, &owner);
			if ( type == GST_STREAM_STATUS_TYPE_CREATE && m_sourceinfo.is_streaming )
			{
				if ( GST_IS_PAD(source) )
					owner = gst_pad_get_parent_element(GST_PAD(source));
				else if ( GST_IS_ELEMENT(source) )
					owner = GST_ELEMENT(source);
				else
					owner = 0;
				if ( owner )
				{
					GstElementFactory *factory = gst_element_get_factory(GST_ELEMENT(owner));
					const gchar *name = gst_plugin_feature_get_name(GST_PLUGIN_FEATURE(factory));
					if (!strcmp(name, "souphttpsrc"))
					{
						m_streamingsrc_timeout->start(HTTP_TIMEOUT*1000, true);
						g_object_set (G_OBJECT (owner), "timeout", HTTP_TIMEOUT, NULL);
						eDebug("eServiceMP3::GST_STREAM_STATUS_TYPE_CREATE -> setting timeout on %s to %is", name, HTTP_TIMEOUT);
					}
					
				}
				if ( GST_IS_PAD(source) )
					gst_object_unref(owner);
			}
			break;
		}
		default:
			break;
	}
	g_free (sourceName);
}