static gboolean
gst_musepack_stream_init (GstMusepackDec * musepackdec)
{
    mpc_streaminfo i;
    GstTagList *tags;
    GstCaps *caps;

    /* set up reading */
    gst_musepack_init_reader (musepackdec->r, musepackdec);

#ifdef MPC_IS_OLD_API
    /* streaminfo */
    mpc_streaminfo_init (&i);
    if (mpc_streaminfo_read (&i, musepackdec->r) < 0) {
        GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
        return FALSE;
    }

    /* decoding */
    mpc_decoder_setup (musepackdec->d, musepackdec->r);
    mpc_decoder_scale_output (musepackdec->d, 1.0);
    if (!mpc_decoder_initialize (musepackdec->d, &i)) {
        GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
        return FALSE;
    }
#else
    musepackdec->d = mpc_demux_init (musepackdec->r);
    if (!musepackdec->d) {
        GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
        return FALSE;
    }

    mpc_demux_get_info (musepackdec->d, &i);
#endif

    /* capsnego */
    caps = gst_caps_from_string (BASE_CAPS);
    gst_caps_set_simple (caps,
                         "endianness", G_TYPE_INT, G_BYTE_ORDER,
                         "channels", G_TYPE_INT, i.channels,
                         "rate", G_TYPE_INT, i.sample_freq, NULL);
    gst_pad_use_fixed_caps (musepackdec->srcpad);
    if (!gst_pad_set_caps (musepackdec->srcpad, caps)) {
        GST_ELEMENT_ERROR (musepackdec, CORE, NEGOTIATION, (NULL), (NULL));
        return FALSE;
    }

    g_atomic_int_set (&musepackdec->bps, 4 * i.channels);
    g_atomic_int_set (&musepackdec->rate, i.sample_freq);

    gst_segment_set_last_stop (&musepackdec->segment, GST_FORMAT_DEFAULT, 0);
    gst_segment_set_duration (&musepackdec->segment, GST_FORMAT_DEFAULT,
                              mpc_streaminfo_get_length_samples (&i));

    /* send basic tags */
    tags = gst_tag_list_new ();
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                      GST_TAG_AUDIO_CODEC, "Musepack", NULL);

    if (i.encoder[0] != '\0' && i.encoder_version > 0) {
        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                          GST_TAG_ENCODER, i.encoder,
                          GST_TAG_ENCODER_VERSION, i.encoder_version, NULL);
    }

    if (i.bitrate > 0) {
        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                          GST_TAG_BITRATE, i.bitrate, NULL);
    } else if (i.average_bitrate > 0.0) {
        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                          GST_TAG_BITRATE, (guint) i.average_bitrate, NULL);
    }

    if (i.gain_title != 0 || i.gain_album != 0) {
        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                          GST_TAG_TRACK_GAIN, (gdouble) i.gain_title / 100.0,
                          GST_TAG_ALBUM_GAIN, (gdouble) i.gain_album / 100.0, NULL);
    }

    if (i.peak_title != 0 && i.peak_title != 32767 &&
            i.peak_album != 0 && i.peak_album != 32767) {
        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
                          GST_TAG_TRACK_PEAK, (gdouble) i.peak_title / 32767.0,
                          GST_TAG_ALBUM_PEAK, (gdouble) i.peak_album / 32767.0, NULL);
    }

    GST_LOG_OBJECT (musepackdec, "Posting tags: %" GST_PTR_FORMAT, tags);
    gst_element_found_tags (GST_ELEMENT (musepackdec), tags);

    return TRUE;
}
static GstPadLinkReturn
gst_hermes_colorspace_link (GstPad * pad, const GstCaps * caps)
{
  GstHermesColorspace *space;
  GstPad *otherpad;
  GstStructure *structure;
  GstPadLinkReturn link_ret;
  int width, height;
  double fps;
  int i;

  space = GST_HERMES_COLORSPACE (gst_pad_get_parent (pad));
  otherpad = (pad == space->sinkpad) ? space->srcpad : space->sinkpad;

  link_ret = gst_pad_try_set_caps (otherpad, caps);
  if (link_ret == GST_PAD_LINK_OK) {
    space->passthru = TRUE;
    return link_ret;
  }

  structure = gst_caps_get_structure (caps, 0);

  for (i = 0; i < G_N_ELEMENTS (gst_hermes_colorspace_formats); i++) {
    GstCaps *icaps;
    GstCaps *fcaps;

    fcaps =
        gst_caps_copy (gst_static_caps_get (&gst_hermes_colorspace_formats
            [i].caps));

    icaps = gst_caps_intersect (caps, fcaps);
    if (!gst_caps_is_empty (icaps)) {
      break;
    }
    gst_caps_free (icaps);
  }
  if (i == G_N_ELEMENTS (gst_hermes_colorspace_formats)) {
    g_assert_not_reached ();
    return GST_PAD_LINK_REFUSED;
  }

  gst_structure_get_int (structure, "width", &width);
  gst_structure_get_int (structure, "height", &height);
  gst_structure_get_double (structure, "framerate", &fps);

  GST_INFO ("size: %dx%d", space->width, space->height);

  if (gst_pad_is_negotiated (otherpad)) {
    GstCaps *othercaps;

    othercaps = gst_caps_copy (gst_pad_get_negotiated_caps (otherpad));

    gst_caps_set_simple (othercaps,
        "width", G_TYPE_INT, width,
        "height", G_TYPE_INT, height, "framerate", G_TYPE_DOUBLE, fps, NULL);

    link_ret = gst_pad_try_set_caps (otherpad, othercaps);
    if (link_ret != GST_PAD_LINK_OK) {
      return link_ret;
    }
  }

  if (pad == space->srcpad) {
    space->src_format_index = i;
    gst_hermes_colorspace_structure_to_hermes_format (&space->src_format,
        structure);
  } else {
    space->sink_format_index = i;
    gst_hermes_colorspace_structure_to_hermes_format (&space->sink_format,
        structure);
  }

  space->sink_stride = width * (space->sink_format.bits / 8);
  space->src_stride = width * (space->src_format.bits / 8);
  space->sink_size = space->sink_stride * height;
  space->src_size = space->src_stride * height;
  space->width = width;
  space->height = height;
  space->fps = fps;

  if (gst_pad_is_negotiated (otherpad)) {
    if (!Hermes_ConverterRequest (space->h_handle, &space->sink_format,
            &space->src_format)) {
      g_warning ("Hermes: could not get converter\n");
      return GST_PAD_LINK_REFUSED;
    }
    g_print ("inited\n");
  }

  return GST_PAD_LINK_OK;
}
Beispiel #3
0
static void
gst_soup_http_src_got_headers_cb (SoupMessage * msg, GstSoupHTTPSrc * src)
{
  const char *value;
  GstTagList *tag_list;
  GstBaseSrc *basesrc;
  guint64 newsize;
  GHashTable *params = NULL;

  GST_DEBUG_OBJECT (src, "got headers:");
  soup_message_headers_foreach (msg->response_headers,
      gst_soup_http_src_headers_foreach, src);

  if (msg->status_code == 407 && src->proxy_id && src->proxy_pw)
    return;

  if (src->automatic_redirect && SOUP_STATUS_IS_REDIRECTION (msg->status_code)) {
    GST_DEBUG_OBJECT (src, "%u redirect to \"%s\"", msg->status_code,
        soup_message_headers_get_one (msg->response_headers, "Location"));
    return;
  }

  if (msg->status_code == SOUP_STATUS_UNAUTHORIZED)
    return;

  src->session_io_status = GST_SOUP_HTTP_SRC_SESSION_IO_STATUS_RUNNING;

  /* Parse Content-Length. */
  if (soup_message_headers_get_encoding (msg->response_headers) ==
      SOUP_ENCODING_CONTENT_LENGTH) {
    newsize = src->request_position +
        soup_message_headers_get_content_length (msg->response_headers);
    if (!src->have_size || (src->content_size != newsize)) {
      src->content_size = newsize;
      src->have_size = TRUE;
      src->seekable = TRUE;
      GST_DEBUG_OBJECT (src, "size = %" G_GUINT64_FORMAT, src->content_size);

      basesrc = GST_BASE_SRC_CAST (src);
      gst_segment_set_duration (&basesrc->segment, GST_FORMAT_BYTES,
          src->content_size);
      gst_element_post_message (GST_ELEMENT (src),
          gst_message_new_duration (GST_OBJECT (src), GST_FORMAT_BYTES,
              src->content_size));
    }
  }

  /* Icecast stuff */
  tag_list = gst_tag_list_new ();

  if ((value =
          soup_message_headers_get_one (msg->response_headers,
              "icy-metaint")) != NULL) {
    gint icy_metaint = atoi (value);

    GST_DEBUG_OBJECT (src, "icy-metaint: %s (parsed: %d)", value, icy_metaint);
    if (icy_metaint > 0) {
      if (src->src_caps)
        gst_caps_unref (src->src_caps);

      src->src_caps = gst_caps_new_simple ("application/x-icy",
          "metadata-interval", G_TYPE_INT, icy_metaint, NULL);
    }
  }
  if ((value =
          soup_message_headers_get_content_type (msg->response_headers,
              &params)) != NULL) {
    GST_DEBUG_OBJECT (src, "Content-Type: %s", value);
    if (g_ascii_strcasecmp (value, "audio/L16") == 0) {
      gint channels = 2;
      gint rate = 44100;
      char *param;

      if (src->src_caps)
        gst_caps_unref (src->src_caps);

      param = g_hash_table_lookup (params, "channels");
      if (param != NULL)
        channels = atol (param);

      param = g_hash_table_lookup (params, "rate");
      if (param != NULL)
        rate = atol (param);

      src->src_caps = gst_caps_new_simple ("audio/x-raw-int",
          "channels", G_TYPE_INT, channels,
          "rate", G_TYPE_INT, rate,
          "width", G_TYPE_INT, 16,
          "depth", G_TYPE_INT, 16,
          "signed", G_TYPE_BOOLEAN, TRUE,
          "endianness", G_TYPE_INT, G_BIG_ENDIAN, NULL);
    } else {
      /* Set the Content-Type field on the caps */
      if (src->src_caps)
        gst_caps_set_simple (src->src_caps, "content-type", G_TYPE_STRING,
            value, NULL);
    }
  }

  if (params != NULL)
    g_hash_table_destroy (params);

  if ((value =
          soup_message_headers_get_one (msg->response_headers,
              "icy-name")) != NULL) {
    g_free (src->iradio_name);
    src->iradio_name = gst_soup_http_src_unicodify (value);
    if (src->iradio_name) {
      g_object_notify (G_OBJECT (src), "iradio-name");
      gst_tag_list_add (tag_list, GST_TAG_MERGE_REPLACE, GST_TAG_ORGANIZATION,
          src->iradio_name, NULL);
    }
  }
  if ((value =
          soup_message_headers_get_one (msg->response_headers,
              "icy-genre")) != NULL) {
    g_free (src->iradio_genre);
    src->iradio_genre = gst_soup_http_src_unicodify (value);
    if (src->iradio_genre) {
      g_object_notify (G_OBJECT (src), "iradio-genre");
      gst_tag_list_add (tag_list, GST_TAG_MERGE_REPLACE, GST_TAG_GENRE,
          src->iradio_genre, NULL);
    }
  }
  if ((value = soup_message_headers_get_one (msg->response_headers, "icy-url"))
      != NULL) {
    g_free (src->iradio_url);
    src->iradio_url = gst_soup_http_src_unicodify (value);
    if (src->iradio_url) {
      g_object_notify (G_OBJECT (src), "iradio-url");
      gst_tag_list_add (tag_list, GST_TAG_MERGE_REPLACE, GST_TAG_LOCATION,
          src->iradio_url, NULL);
    }
  }
  if (!gst_tag_list_is_empty (tag_list)) {
    GST_DEBUG_OBJECT (src,
        "calling gst_element_found_tags with %" GST_PTR_FORMAT, tag_list);
    gst_element_found_tags (GST_ELEMENT_CAST (src), tag_list);
  } else {
    gst_tag_list_free (tag_list);
  }

  /* Handle HTTP errors. */
  gst_soup_http_src_parse_status (msg, src);

  /* Check if Range header was respected. */
  if (src->ret == GST_FLOW_CUSTOM_ERROR &&
      src->read_position && msg->status_code != SOUP_STATUS_PARTIAL_CONTENT) {
    src->seekable = FALSE;
    GST_ELEMENT_ERROR (src, RESOURCE, SEEK,
        (_("Server does not support seeking.")),
        ("Server does not accept Range HTTP header, URL: %s", src->location));
    src->ret = GST_FLOW_ERROR;
  }
}
Beispiel #4
0
static void
run_test (const GstCaps * caps, gint src_width, gint src_height,
    gint dest_width, gint dest_height, gint method,
    GCallback src_handoff, gpointer src_handoff_user_data,
    GCallback sink_handoff, gpointer sink_handoff_user_data)
{
  GstElement *pipeline;
  GstElement *src, *capsfilter1, *identity, *scale, *capsfilter2, *sink;
  GstBus *bus;
  GMainLoop *loop;
  GstCaps *copy;
  guint n_buffers = 0;
  OnMessageUserData omud = { NULL, };

  pipeline = gst_element_factory_make ("pipeline", "pipeline");
  fail_unless (pipeline != NULL);

  src = gst_element_factory_make ("videotestsrc", "src");
  fail_unless (src != NULL);
  g_object_set (G_OBJECT (src), "num-buffers", 5, NULL);

  capsfilter1 = gst_element_factory_make ("capsfilter", "filter1");
  fail_unless (capsfilter1 != NULL);
  copy = gst_caps_copy (caps);
  gst_caps_set_simple (copy, "width", G_TYPE_INT, src_width, "height",
      G_TYPE_INT, src_height, "framerate", GST_TYPE_FRACTION, 30, 1, NULL);
  g_object_set (G_OBJECT (capsfilter1), "caps", copy, NULL);
  gst_caps_unref (copy);

  identity = gst_element_factory_make ("identity", "identity");
  fail_unless (identity != NULL);
  if (src_handoff) {
    g_object_set (G_OBJECT (identity), "signal-handoffs", TRUE, NULL);
    g_signal_connect (identity, "handoff", G_CALLBACK (src_handoff),
        src_handoff_user_data);
  }

  scale = gst_element_factory_make ("videoscale", "scale");
  fail_unless (scale != NULL);
  g_object_set (G_OBJECT (scale), "method", method, NULL);

  capsfilter2 = gst_element_factory_make ("capsfilter", "filter2");
  fail_unless (capsfilter2 != NULL);
  copy = gst_caps_copy (caps);
  gst_caps_set_simple (copy, "width", G_TYPE_INT, dest_width, "height",
      G_TYPE_INT, dest_height, NULL);
  g_object_set (G_OBJECT (capsfilter2), "caps", copy, NULL);
  gst_caps_unref (copy);

  sink = gst_element_factory_make ("fakesink", "sink");
  fail_unless (sink != NULL);
  g_object_set (G_OBJECT (sink), "signal-handoffs", TRUE, "async", FALSE, NULL);
  g_signal_connect (sink, "handoff", G_CALLBACK (on_sink_handoff), &n_buffers);
  if (sink_handoff) {
    g_signal_connect (sink, "handoff", G_CALLBACK (sink_handoff),
        sink_handoff_user_data);
  }

  gst_bin_add_many (GST_BIN (pipeline), src, capsfilter1, identity, scale,
      capsfilter2, sink, NULL);
  fail_unless (gst_element_link_many (src, capsfilter1, identity, scale,
          capsfilter2, sink, NULL));

  loop = g_main_loop_new (NULL, FALSE);

  bus = gst_element_get_bus (pipeline);
  fail_unless (bus != NULL);
  gst_bus_add_signal_watch (bus);

  omud.loop = loop;
  omud.eos = FALSE;

  g_signal_connect (bus, "message", (GCallback) on_message_cb, &omud);

  gst_object_unref (bus);

  fail_unless (gst_element_set_state (pipeline,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS);

  g_main_loop_run (loop);

  fail_unless (gst_element_set_state (pipeline,
          GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS);

  fail_unless (omud.eos == TRUE);
  fail_unless (n_buffers == 5);

  gst_object_unref (pipeline);
  g_main_loop_unref (loop);
}
Beispiel #5
0
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
/*
 * owr_local_media_source_get_pad
 *
 * The beginning of a media source chain in the pipeline looks like this:
 *                                                             +------------+
 *                                                         /---+ inter*sink |
 * +--------+    +--------+   +------------+   +-----+    /    +------------+
 * | source +----+ scale? +---+ capsfilter +---+ tee +---/
 * +--------+    +--------+   +------------+   +-----+   \
 *                                                        \    +------------+
 *                                                         \---+ inter*sink |
 *                                                             +------------+
 *
 * For each newly requested pad a new inter*sink is added to the tee.
 * Note that this is a completely independent pipeline, and the complete
 * pipeline is only created once for a specific media source.
 *
 * Then for each newly requested pad another bin with a inter*src is
 * created, which is then going to be part of the transport agent
 * pipeline. The ghostpad of it is what we return here.
 *
 * +-----------+   +-------------------------------+   +----------+
 * | inter*src +---+ converters/queues/capsfilters +---+ ghostpad |
 * +-----------+   +-------------------------------+   +----------+
 *
 */
static GstElement *owr_local_media_source_request_source(OwrMediaSource *media_source, GstCaps *caps)
{
    OwrLocalMediaSource *local_source;
    OwrLocalMediaSourcePrivate *priv;
    GstElement *source_element = NULL;
    GstElement *source_pipeline;
    GHashTable *event_data;
    GValue *value;
#if defined(__linux__) && !defined(__ANDROID__)
    gchar *tmp;
#endif

    g_assert(media_source);
    local_source = OWR_LOCAL_MEDIA_SOURCE(media_source);
    priv = local_source->priv;

    /* only create the source bin for this media source once */
    if ((source_pipeline = _owr_media_source_get_source_bin(media_source)))
        GST_DEBUG_OBJECT(media_source, "Re-using existing source element/bin");
    else {
        OwrMediaType media_type = OWR_MEDIA_TYPE_UNKNOWN;
        OwrSourceType source_type = OWR_SOURCE_TYPE_UNKNOWN;
        GstElement *source, *source_process = NULL, *capsfilter = NULL, *tee;
        GstPad *sinkpad, *source_pad;
        GEnumClass *media_enum_class, *source_enum_class;
        GEnumValue *media_enum_value, *source_enum_value;
        gchar *bin_name;
        GstCaps *source_caps;
        GstBus *bus;
        GSource *bus_source;

        event_data = _owr_value_table_new();
        value = _owr_value_table_add(event_data, "start_time", G_TYPE_INT64);
        g_value_set_int64(value, g_get_monotonic_time());

        g_object_get(media_source, "media-type", &media_type, "type", &source_type, NULL);

        media_enum_class = G_ENUM_CLASS(g_type_class_ref(OWR_TYPE_MEDIA_TYPE));
        source_enum_class = G_ENUM_CLASS(g_type_class_ref(OWR_TYPE_SOURCE_TYPE));
        media_enum_value = g_enum_get_value(media_enum_class, media_type);
        source_enum_value = g_enum_get_value(source_enum_class, source_type);

        bin_name = g_strdup_printf("local-%s-%s-source-bin-%u",
            media_enum_value ? media_enum_value->value_nick : "unknown",
            source_enum_value ? source_enum_value->value_nick : "unknown",
            g_atomic_int_add(&unique_bin_id, 1));

        g_type_class_unref(media_enum_class);
        g_type_class_unref(source_enum_class);

        source_pipeline = gst_pipeline_new(bin_name);
        gst_pipeline_use_clock(GST_PIPELINE(source_pipeline), gst_system_clock_obtain());
        gst_element_set_base_time(source_pipeline, _owr_get_base_time());
        gst_element_set_start_time(source_pipeline, GST_CLOCK_TIME_NONE);
        g_free(bin_name);
        bin_name = NULL;

#ifdef OWR_DEBUG
        g_signal_connect(source_pipeline, "deep-notify", G_CALLBACK(_owr_deep_notify), NULL);
#endif

        bus = gst_pipeline_get_bus(GST_PIPELINE(source_pipeline));
        bus_source = gst_bus_create_watch(bus);
        g_source_set_callback(bus_source, (GSourceFunc) bus_call, media_source, NULL);
        g_source_attach(bus_source, _owr_get_main_context());
        g_source_unref(bus_source);

        GST_DEBUG_OBJECT(local_source, "media_type: %d, type: %d", media_type, source_type);

        if (media_type == OWR_MEDIA_TYPE_UNKNOWN || source_type == OWR_SOURCE_TYPE_UNKNOWN) {
            GST_ERROR_OBJECT(local_source,
                "Cannot connect source with unknown type or media type to other component");
            goto done;
        }

        switch (media_type) {
        case OWR_MEDIA_TYPE_AUDIO:
            {
            switch (source_type) {
            case OWR_SOURCE_TYPE_CAPTURE:
                CREATE_ELEMENT(source, AUDIO_SRC, "audio-source");
#if !defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR
/*
    Default values for buffer-time and latency-time on android are 200ms and 20ms.
    The minimum latency-time that can be used on Android is 20ms, and using
    a 40ms buffer-time with a 20ms latency-time causes crackling audio.
    So let's just stick with the defaults.
*/
#if !defined(__ANDROID__)
                g_object_set(source, "buffer-time", G_GINT64_CONSTANT(40000),
                    "latency-time", G_GINT64_CONSTANT(10000), NULL);
#endif
                if (priv->device_index > -1) {
#ifdef __APPLE__
                    g_object_set(source, "device", priv->device_index, NULL);
#elif defined(__linux__) && !defined(__ANDROID__)
                    tmp = g_strdup_printf("%d", priv->device_index);
                    g_object_set(source, "device", tmp, NULL);
                    g_free(tmp);
#endif
                }
#endif
                break;
            case OWR_SOURCE_TYPE_TEST:
                CREATE_ELEMENT(source, "audiotestsrc", "audio-source");
                g_object_set(source, "is-live", TRUE, NULL);
                break;
            case OWR_SOURCE_TYPE_UNKNOWN:
            default:
                g_assert_not_reached();
                goto done;
            }

            break;
            }
        case OWR_MEDIA_TYPE_VIDEO:
        {
            GstPad *srcpad;
            GstCaps *device_caps;

            switch (source_type) {
            case OWR_SOURCE_TYPE_CAPTURE:
                CREATE_ELEMENT(source, VIDEO_SRC, "video-source");
                if (priv->device_index > -1) {
#if defined(__APPLE__) && !TARGET_IPHONE_SIMULATOR
                    g_object_set(source, "device-index", priv->device_index, NULL);
#elif defined(__ANDROID__)
                    g_object_set(source, "cam-index", priv->device_index, NULL);
#elif defined(__linux__)
                    tmp = g_strdup_printf("/dev/video%d", priv->device_index);
                    g_object_set(source, "device", tmp, NULL);
                    g_free(tmp);
#endif
                }
                break;
            case OWR_SOURCE_TYPE_TEST: {
                GstElement *src, *time;
                GstPad *srcpad;

                source = gst_bin_new("video-source");

                CREATE_ELEMENT(src, "videotestsrc", "videotestsrc");
                g_object_set(src, "is-live", TRUE, NULL);
                gst_bin_add(GST_BIN(source), src);

                time = gst_element_factory_make("timeoverlay", "timeoverlay");
                if (time) {
                    g_object_set(time, "font-desc", "Sans 60", NULL);
                    gst_bin_add(GST_BIN(source), time);
                    gst_element_link(src, time);
                    srcpad = gst_element_get_static_pad(time, "src");
                } else
                    srcpad = gst_element_get_static_pad(src, "src");

                gst_element_add_pad(source, gst_ghost_pad_new("src", srcpad));
                gst_object_unref(srcpad);

                break;
            }
            case OWR_SOURCE_TYPE_UNKNOWN:
            default:
                g_assert_not_reached();
                goto done;
            }

            /* First try to see if we can just get the format we want directly */

            source_caps = gst_caps_new_empty();
#if GST_CHECK_VERSION(1, 5, 0)
            gst_caps_foreach(caps, fix_video_caps_framerate, source_caps);
#else
            _owr_gst_caps_foreach(caps, fix_video_caps_framerate, source_caps);
#endif
            /* Now see what the device can really produce */
            srcpad = gst_element_get_static_pad(source, "src");
            gst_element_set_state(source, GST_STATE_READY);
            device_caps = gst_pad_query_caps(srcpad, source_caps);

            if (gst_caps_is_empty(device_caps)) {
                /* Let's see if it works when we drop format constraints (which can be dealt with downsteram) */
                GstCaps *tmp = source_caps;
                source_caps = gst_caps_new_empty();
#if GST_CHECK_VERSION(1, 5, 0)
                gst_caps_foreach(tmp, fix_video_caps_format, source_caps);
#else
                _owr_gst_caps_foreach(tmp, fix_video_caps_format, source_caps);
#endif
                gst_caps_unref(tmp);

                gst_caps_unref(device_caps);
                device_caps = gst_pad_query_caps(srcpad, source_caps);

                if (gst_caps_is_empty(device_caps)) {
                    /* Accepting any format didn't work, we're going to hope that scaling fixes it */
                    CREATE_ELEMENT(source_process, "videoscale", "video-source-scale");
                    gst_bin_add(GST_BIN(source_pipeline), source_process);
                }
            }

            gst_caps_unref(device_caps);
            gst_object_unref(srcpad);

#if defined(__APPLE__) && TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
            /* Force NV12 on iOS else the source can negotiate BGRA
             * ercolorspace can do NV12 -> BGRA and NV12 -> I420 which is what
             * is needed for Bowser */
            gst_caps_set_simple(source_caps, "format", G_TYPE_STRING, "NV12", NULL);
#endif

            CREATE_ELEMENT(capsfilter, "capsfilter", "video-source-capsfilter");
            g_object_set(capsfilter, "caps", source_caps, NULL);
            gst_caps_unref(source_caps);
            gst_bin_add(GST_BIN(source_pipeline), capsfilter);

            break;
        }
        case OWR_MEDIA_TYPE_UNKNOWN:
        default:
            g_assert_not_reached();
            goto done;
        }
        g_assert(source);

        source_pad = gst_element_get_static_pad(source, "src");
        g_signal_connect(source_pad, "notify::caps", G_CALLBACK(on_caps), media_source);
        gst_object_unref(source_pad);

        CREATE_ELEMENT(tee, "tee", "source-tee");
        g_object_set(tee, "allow-not-linked", TRUE, NULL);

        gst_bin_add_many(GST_BIN(source_pipeline), source, tee, NULL);

        /* Many sources don't like reconfiguration and it's pointless
         * here anyway right now. No need to reconfigure whenever something
         * is added to the tee or removed.
         * We will have to implement reconfiguration differently later by
         * selecting the best caps based on all consumers.
         */
        sinkpad = gst_element_get_static_pad(tee, "sink");
        gst_pad_add_probe(sinkpad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM, drop_reconfigure_event, NULL, NULL);
        gst_object_unref(sinkpad);

        if (!source)
            GST_ERROR_OBJECT(media_source, "Failed to create source element!");

        if (capsfilter) {
            LINK_ELEMENTS(capsfilter, tee);
            if (source_process) {
                LINK_ELEMENTS(source_process, capsfilter);
                LINK_ELEMENTS(source, source_process);
            } else
                LINK_ELEMENTS(source, capsfilter);
        } else if (source_process) {
            LINK_ELEMENTS(source_process, tee);
            LINK_ELEMENTS(source, source_process);
        } else
            LINK_ELEMENTS(source, tee);

        gst_element_sync_state_with_parent(tee);
        if (capsfilter)
            gst_element_sync_state_with_parent(capsfilter);
        if (source_process)
            gst_element_sync_state_with_parent(source_process);
        gst_element_sync_state_with_parent(source);

        _owr_media_source_set_source_bin(media_source, source_pipeline);
        _owr_media_source_set_source_tee(media_source, tee);
        if (gst_element_set_state(source_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
            GST_ERROR("Failed to set local source pipeline %s to playing", GST_OBJECT_NAME(source_pipeline));
            /* FIXME: We should handle this and don't expose the source */
        }

        value = _owr_value_table_add(event_data, "end_time", G_TYPE_INT64);
        g_value_set_int64(value, g_get_monotonic_time());
        OWR_POST_EVENT(media_source, LOCAL_SOURCE_STARTED, event_data);

        g_signal_connect(tee, "pad-removed", G_CALLBACK(tee_pad_removed_cb), media_source);
    }
    gst_object_unref(source_pipeline);

    source_element = OWR_MEDIA_SOURCE_CLASS(owr_local_media_source_parent_class)->request_source(media_source, caps);

done:
    return source_element;
}
static gboolean
gst_opus_dec_negotiate (GstOpusDec * dec, const GstAudioChannelPosition * pos)
{
  GstCaps *caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (dec));
  GstStructure *s;
  GstAudioInfo info;

  if (caps) {
    gint rate = dec->sample_rate, channels = dec->n_channels;
    GstCaps *constraint, *inter;

    constraint = gst_caps_from_string ("audio/x-raw");
    if (dec->n_channels <= 2) { /* including 0 */
      gst_caps_set_simple (constraint, "channels", GST_TYPE_INT_RANGE, 1, 2,
          NULL);
    } else {
      gst_caps_set_simple (constraint, "channels", G_TYPE_INT, dec->n_channels,
          NULL);
    }

    inter = gst_caps_intersect (caps, constraint);
    gst_caps_unref (constraint);

    if (gst_caps_is_empty (inter)) {
      GST_DEBUG_OBJECT (dec, "Empty intersection, failed to negotiate");
      gst_caps_unref (inter);
      gst_caps_unref (caps);
      return FALSE;
    }

    inter = gst_caps_truncate (inter);
    s = gst_caps_get_structure (inter, 0);
    rate = dec->sample_rate > 0 ? dec->sample_rate : 48000;
    gst_structure_fixate_field_nearest_int (s, "rate", dec->sample_rate);
    gst_structure_get_int (s, "rate", &rate);
    channels = dec->n_channels > 0 ? dec->n_channels : 2;
    gst_structure_fixate_field_nearest_int (s, "channels", dec->n_channels);
    gst_structure_get_int (s, "channels", &channels);

    gst_caps_unref (inter);

    dec->sample_rate = rate;
    dec->n_channels = channels;
    gst_caps_unref (caps);
  }

  if (dec->n_channels == 0) {
    GST_DEBUG_OBJECT (dec, "Using a default of 2 channels");
    dec->n_channels = 2;
    pos = NULL;
  }

  if (dec->sample_rate == 0) {
    GST_DEBUG_OBJECT (dec, "Using a default of 48kHz sample rate");
    dec->sample_rate = 48000;
  }

  GST_INFO_OBJECT (dec, "Negotiated %d channels, %d Hz", dec->n_channels,
      dec->sample_rate);

  /* pass valid order to audio info */
  if (pos) {
    memcpy (dec->opus_pos, pos, sizeof (pos[0]) * dec->n_channels);
    gst_audio_channel_positions_to_valid_order (dec->opus_pos, dec->n_channels);
  }

  /* set up source format */
  gst_audio_info_init (&info);
  gst_audio_info_set_format (&info, GST_AUDIO_FORMAT_S16,
      dec->sample_rate, dec->n_channels, pos ? dec->opus_pos : NULL);
  gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (dec), &info);

  /* but we still need the opus order for later reordering */
  if (pos) {
    memcpy (dec->opus_pos, pos, sizeof (pos[0]) * dec->n_channels);
  } else {
    dec->opus_pos[0] = GST_AUDIO_CHANNEL_POSITION_INVALID;
  }

  dec->info = info;

  return TRUE;
}
/**
 * update_aspect_filter:
 * @self: camerasrc object
 * @new_caps: new caps of next buffers arriving to view finder sink element
 *
 * Updates aspect ratio capsfilter to maintain aspect ratio, if we need to
 * scale frames for showing them in view finder.
 */
static void
update_aspect_filter (GstWrapperCameraBinSrc * self, GstCaps * new_caps)
{
  // XXX why not instead add a preserve-aspect-ratio property to videoscale?
#if 0
  if (camera->flags & GST_CAMERABIN_FLAG_VIEWFINDER_SCALE) {
    GstCaps *sink_caps, *ar_caps;
    GstStructure *st;
    gint in_w = 0, in_h = 0, sink_w = 0, sink_h = 0, target_w = 0, target_h = 0;
    gdouble ratio_w, ratio_h;
    GstPad *sink_pad;
    const GValue *range;

    sink_pad = gst_element_get_static_pad (camera->view_sink, "sink");

    if (sink_pad) {
      sink_caps = gst_pad_get_caps (sink_pad);
      gst_object_unref (sink_pad);
      if (sink_caps) {
        if (!gst_caps_is_any (sink_caps)) {
          GST_DEBUG_OBJECT (camera, "sink element caps %" GST_PTR_FORMAT,
              sink_caps);
          /* Get maximum resolution that view finder sink accepts */
          st = gst_caps_get_structure (sink_caps, 0);
          if (gst_structure_has_field_typed (st, "width", GST_TYPE_INT_RANGE)) {
            range = gst_structure_get_value (st, "width");
            sink_w = gst_value_get_int_range_max (range);
          }
          if (gst_structure_has_field_typed (st, "height", GST_TYPE_INT_RANGE)) {
            range = gst_structure_get_value (st, "height");
            sink_h = gst_value_get_int_range_max (range);
          }
          GST_DEBUG_OBJECT (camera, "sink element accepts max %dx%d", sink_w,
              sink_h);

          /* Get incoming frames' resolution */
          if (sink_h && sink_w) {
            st = gst_caps_get_structure (new_caps, 0);
            gst_structure_get_int (st, "width", &in_w);
            gst_structure_get_int (st, "height", &in_h);
            GST_DEBUG_OBJECT (camera, "new caps with %dx%d", in_w, in_h);
          }
        }
        gst_caps_unref (sink_caps);
      }
    }

    /* If we get bigger frames than view finder sink accepts, then we scale.
       If we scale we need to adjust aspect ratio capsfilter caps in order
       to maintain aspect ratio while scaling. */
    if (in_w && in_h && (in_w > sink_w || in_h > sink_h)) {
      ratio_w = (gdouble) sink_w / in_w;
      ratio_h = (gdouble) sink_h / in_h;

      if (ratio_w < ratio_h) {
        target_w = sink_w;
        target_h = (gint) (ratio_w * in_h);
      } else {
        target_w = (gint) (ratio_h * in_w);
        target_h = sink_h;
      }

      GST_DEBUG_OBJECT (camera, "setting %dx%d filter to maintain aspect ratio",
          target_w, target_h);
      ar_caps = gst_caps_copy (new_caps);
      gst_caps_set_simple (ar_caps, "width", G_TYPE_INT, target_w, "height",
          G_TYPE_INT, target_h, NULL);
    } else {
      GST_DEBUG_OBJECT (camera, "no scaling");
      ar_caps = new_caps;
    }

    GST_DEBUG_OBJECT (camera, "aspect ratio filter caps %" GST_PTR_FORMAT,
        ar_caps);
    g_object_set (G_OBJECT (camera->aspect_filter), "caps", ar_caps, NULL);
    if (ar_caps != new_caps)
      gst_caps_unref (ar_caps);
  }
#endif
}
Beispiel #9
0
static GstFlowReturn
gst_speex_enc_chain (GstPad * pad, GstBuffer * buf)
{
  GstSpeexEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;

  enc = GST_SPEEX_ENC (GST_PAD_PARENT (pad));

  if (!enc->setup)
    goto not_setup;

  if (!enc->header_sent) {
    /* Speex streams begin with two headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.
       We merely need to make the headers, then pass them to libspeex 
       one at a time; libspeex handles the additional Ogg bitstream 
       constraints */
    GstBuffer *buf1, *buf2;
    GstCaps *caps;
    guchar *data;
    gint data_len;

    /* create header buffer */
    data = (guint8 *) speex_header_to_packet (&enc->header, &data_len);
    buf1 = gst_speex_enc_buffer_from_data (enc, data, data_len, 0);
    free (data);

    /* create comment buffer */
    buf2 = gst_speex_enc_create_metadata_buffer (enc);

    /* mark and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = gst_speex_enc_set_header_on_caps (caps, buf1, buf2);

    gst_caps_set_simple (caps,
        "rate", G_TYPE_INT, enc->rate,
        "channels", G_TYPE_INT, enc->channels, NULL);

    /* negotiate with these caps */
    GST_DEBUG_OBJECT (enc, "here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_caps_unref (caps);

    /* push out buffers */
    ret = gst_speex_enc_push_buffer (enc, buf1);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      goto done;
    }

    ret = gst_speex_enc_push_buffer (enc, buf2);

    if (ret != GST_FLOW_OK)
      goto done;

    speex_bits_reset (&enc->bits);

    enc->header_sent = TRUE;
  }

  /* Save the timestamp of the first buffer. This will be later
   * used as offset for all following buffers */
  if (enc->start_ts == GST_CLOCK_TIME_NONE) {
    if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
      enc->start_ts = GST_BUFFER_TIMESTAMP (buf);
      enc->granulepos_offset = gst_util_uint64_scale
          (GST_BUFFER_TIMESTAMP (buf), enc->rate, GST_SECOND);
    } else {
      enc->start_ts = 0;
      enc->granulepos_offset = 0;
    }
  }

  /* Check if we have a continous stream, if not drop some samples or the buffer or
   * insert some silence samples */
  if (enc->next_ts != GST_CLOCK_TIME_NONE &&
      GST_BUFFER_TIMESTAMP (buf) < enc->next_ts) {
    guint64 diff = enc->next_ts - GST_BUFFER_TIMESTAMP (buf);
    guint64 diff_bytes;

    GST_WARNING_OBJECT (enc, "Buffer is older than previous "
        "timestamp + duration (%" GST_TIME_FORMAT "< %" GST_TIME_FORMAT
        "), cannot handle. Clipping buffer.",
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (enc->next_ts));

    diff_bytes = GST_CLOCK_TIME_TO_FRAMES (diff, enc->rate) * enc->channels * 2;
    if (diff_bytes >= GST_BUFFER_SIZE (buf)) {
      gst_buffer_unref (buf);
      return GST_FLOW_OK;
    }
    buf = gst_buffer_make_metadata_writable (buf);
    GST_BUFFER_DATA (buf) += diff_bytes;
    GST_BUFFER_SIZE (buf) -= diff_bytes;

    GST_BUFFER_TIMESTAMP (buf) += diff;
    if (GST_BUFFER_DURATION_IS_VALID (buf))
      GST_BUFFER_DURATION (buf) -= diff;
  }

  if (enc->next_ts != GST_CLOCK_TIME_NONE
      && GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    guint64 max_diff =
        gst_util_uint64_scale (enc->frame_size, GST_SECOND, enc->rate);

    if (GST_BUFFER_TIMESTAMP (buf) != enc->next_ts &&
        GST_BUFFER_TIMESTAMP (buf) - enc->next_ts > max_diff) {
      GST_WARNING_OBJECT (enc,
          "Discontinuity detected: %" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT,
          GST_BUFFER_TIMESTAMP (buf) - enc->next_ts, max_diff);

      gst_speex_enc_encode (enc, TRUE);

      enc->frameno_out = 0;
      enc->start_ts = GST_BUFFER_TIMESTAMP (buf);
      enc->granulepos_offset = gst_util_uint64_scale
          (GST_BUFFER_TIMESTAMP (buf), enc->rate, GST_SECOND);
    }
  }

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)
      && GST_BUFFER_DURATION_IS_VALID (buf))
    enc->next_ts = GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
  else
    enc->next_ts = GST_CLOCK_TIME_NONE;

  GST_DEBUG_OBJECT (enc, "received buffer of %u bytes", GST_BUFFER_SIZE (buf));

  /* push buffer to adapter */
  gst_adapter_push (enc->adapter, buf);
  buf = NULL;

  ret = gst_speex_enc_encode (enc, FALSE);

done:

  if (buf)
    gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
not_setup:
  {
    GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL),
        ("encoder not initialized (input is not audio?)"));
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }

}
static gboolean
gst_vdp_vpp_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstVdpVideoPostProcess *vpp =
      GST_VDP_VIDEO_POST_PROCESS (gst_pad_get_parent (pad));
  GstStructure *structure;
  GstCaps *video_caps = NULL;
  gboolean res = FALSE;

  GstCaps *allowed_caps, *output_caps, *src_caps;

  /* check if the input is non native */
  structure = gst_caps_get_structure (caps, 0);
  if (gst_structure_has_name (structure, "video/x-raw-yuv")) {
    if (!gst_structure_get_fourcc (structure, "format", &vpp->fourcc))
      goto done;
    vpp->native_input = FALSE;
    video_caps = gst_vdp_yuv_to_video_caps (caps);
    if (!video_caps)
      goto done;

    if (!vpp->vpool)
      vpp->vpool = gst_vdp_video_buffer_pool_new (vpp->device);

    gst_vdp_buffer_pool_set_caps (vpp->vpool, video_caps);

  } else {
    vpp->native_input = TRUE;
    video_caps = gst_caps_ref (caps);

    if (vpp->vpool) {
      g_object_unref (vpp->vpool);
      vpp->vpool = NULL;
    }
  }

  structure = gst_caps_get_structure (video_caps, 0);
  if (!gst_structure_get_int (structure, "width", &vpp->width) ||
      !gst_structure_get_int (structure, "height", &vpp->height) ||
      !gst_structure_get_int (structure, "chroma-type",
          (gint *) & vpp->chroma_type))
    goto done;


  /* get interlaced flag */
  gst_structure_get_boolean (structure, "interlaced", &vpp->interlaced);

  /* extract par */
  if (gst_structure_has_field_typed (structure, "pixel-aspect-ratio",
          GST_TYPE_FRACTION)) {
    gst_structure_get_fraction (structure, "pixel-aspect-ratio", &vpp->par_n,
        &vpp->par_d);
    vpp->got_par = TRUE;
  } else
    vpp->got_par = FALSE;

  allowed_caps = gst_pad_get_allowed_caps (vpp->srcpad);
  if (G_UNLIKELY (!allowed_caps))
    goto null_allowed_caps;
  if (G_UNLIKELY (gst_caps_is_empty (allowed_caps)))
    goto empty_allowed_caps;
  GST_DEBUG ("allowed_caps: %" GST_PTR_FORMAT, allowed_caps);

  output_caps = gst_vdp_video_to_output_caps (video_caps);
  src_caps = gst_caps_intersect (output_caps, allowed_caps);
  gst_caps_unref (allowed_caps);
  gst_caps_unref (output_caps);

  if (gst_caps_is_empty (src_caps))
    goto not_negotiated;

  gst_pad_fixate_caps (vpp->srcpad, src_caps);


  if (gst_vdp_vpp_is_interlaced (vpp)) {
    gint fps_n, fps_d;

    if (gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d)) {
      gst_fraction_double (&fps_n, &fps_d);
      gst_caps_set_simple (src_caps, "framerate", GST_TYPE_FRACTION, fps_n,
          fps_d, NULL);
      vpp->field_duration = gst_util_uint64_scale (GST_SECOND, fps_d, fps_n);
    }

    gst_caps_set_simple (src_caps, "interlaced", G_TYPE_BOOLEAN, FALSE, NULL);
  }

  GST_DEBUG ("src_caps: %" GST_PTR_FORMAT, src_caps);

  res = gst_pad_set_caps (vpp->srcpad, src_caps);
  gst_caps_unref (src_caps);

done:
  gst_object_unref (vpp);
  if (video_caps)
    gst_caps_unref (video_caps);

  return res;

null_allowed_caps:
  GST_ERROR_OBJECT (vpp, "Got null from gst_pad_get_allowed_caps");
  goto done;

empty_allowed_caps:
  GST_ERROR_OBJECT (vpp, "Got EMPTY caps from gst_pad_get_allowed_caps");

  gst_caps_unref (allowed_caps);
  goto done;

not_negotiated:
  gst_caps_unref (src_caps);
  GST_ERROR_OBJECT (vpp, "Couldn't find suitable output format");
  goto done;
}
/**
 * adapt_image_capture:
 * @self: camerasrc object
 * @in_caps: caps object that describes incoming image format
 *
 * Adjust capsfilters and crop according image capture caps if necessary.
 * The captured image format from video source might be different from
 * what application requested, so we can try to fix that in camerabin.
 *
 */
static void
adapt_image_capture (GstWrapperCameraBinSrc * self, GstCaps * in_caps)
{
  GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (self);
  GstStructure *in_st, *new_st, *req_st;
  gint in_width = 0, in_height = 0, req_width = 0, req_height = 0, crop = 0;
  gdouble ratio_w, ratio_h;
  GstCaps *filter_caps = NULL;

  GST_LOG_OBJECT (self, "in caps: %" GST_PTR_FORMAT, in_caps);
  GST_LOG_OBJECT (self, "requested caps: %" GST_PTR_FORMAT,
      self->image_capture_caps);

  in_st = gst_caps_get_structure (in_caps, 0);
  gst_structure_get_int (in_st, "width", &in_width);
  gst_structure_get_int (in_st, "height", &in_height);

  req_st = gst_caps_get_structure (self->image_capture_caps, 0);
  gst_structure_get_int (req_st, "width", &req_width);
  gst_structure_get_int (req_st, "height", &req_height);

  GST_INFO_OBJECT (self, "we requested %dx%d, and got %dx%d", req_width,
      req_height, in_width, in_height);

  new_st = gst_structure_copy (req_st);
  /* If new fields have been added, we need to copy them */
  gst_structure_foreach (in_st, copy_missing_fields, new_st);

  gst_structure_set (new_st, "width", G_TYPE_INT, in_width, "height",
      G_TYPE_INT, in_height, NULL);

  GST_LOG_OBJECT (self, "new image capture caps: %" GST_PTR_FORMAT, new_st);

  /* Crop if requested aspect ratio differs from incoming frame aspect ratio */
  if (self->src_zoom_crop) {

    ratio_w = (gdouble) in_width / req_width;
    ratio_h = (gdouble) in_height / req_height;

    if (ratio_w < ratio_h) {
      crop = in_height - (req_height * ratio_w);
      self->base_crop_top = crop / 2;
      self->base_crop_bottom = crop / 2;
    } else {
      crop = in_width - (req_width * ratio_h);
      self->base_crop_left = crop / 2;
      self->base_crop_right += crop / 2;
    }

    GST_INFO_OBJECT (self,
        "setting base crop: left:%d, right:%d, top:%d, bottom:%d",
        self->base_crop_left, self->base_crop_right, self->base_crop_top,
        self->base_crop_bottom);
    g_object_set (G_OBJECT (self->src_zoom_crop),
        "top", self->base_crop_top,
        "bottom", self->base_crop_bottom,
        "left", self->base_crop_left, "right", self->base_crop_right, NULL);
  }

  /* Update capsfilters */
  if (self->image_capture_caps) {
    gst_caps_unref (self->image_capture_caps);
  }
  self->image_capture_caps = gst_caps_new_full (new_st, NULL);
  set_capsfilter_caps (self, self->image_capture_caps);

  /* Adjust the capsfilter before crop and videoscale elements if necessary */
  if (in_width == bcamsrc->width && in_height == bcamsrc->height) {
    GST_DEBUG_OBJECT (self, "no adaptation with resolution needed");
  } else {
    GST_DEBUG_OBJECT (self,
        "changing %" GST_PTR_FORMAT " from %dx%d to %dx%d", self->src_filter,
        bcamsrc->width, bcamsrc->height, in_width, in_height);
    /* Apply the width and height to filter caps */
    g_object_get (G_OBJECT (self->src_filter), "caps", &filter_caps, NULL);
    filter_caps = gst_caps_make_writable (filter_caps);
    gst_caps_set_simple (filter_caps, "width", G_TYPE_INT, in_width, "height",
        G_TYPE_INT, in_height, NULL);
    g_object_set (G_OBJECT (self->src_filter), "caps", filter_caps, NULL);
    gst_caps_unref (filter_caps);
  }
}
Beispiel #12
0
static GstCaps *
caps_from_video_stream_info (GstDiscovererStreamInfo *info)
{
        GstCaps *temp = gst_discoverer_stream_info_get_caps (info);
        GstCaps *caps = gst_caps_copy (temp);
        const GstDiscovererVideoInfo *video_info =
                GST_DISCOVERER_VIDEO_INFO (info);
        const GstTagList *stream_tag_list;
        guint n, d, data;
        gboolean value;

        gst_caps_unref (temp);

        data = gst_discoverer_video_info_get_height (video_info);
        if (data)
                gst_caps_set_simple (caps, "height", G_TYPE_INT, data, NULL);

        data = gst_discoverer_video_info_get_width (video_info);
        if (data)
                gst_caps_set_simple (caps, "width", G_TYPE_INT, data, NULL);

        data = gst_discoverer_video_info_get_depth (video_info);
        if (data)
                gst_caps_set_simple (caps, "depth", G_TYPE_INT, data, NULL);

        n = gst_discoverer_video_info_get_framerate_num (video_info);
        d = gst_discoverer_video_info_get_framerate_denom (video_info);
        if (n && d)
                gst_caps_set_simple (caps,
                                     "framerate",
                                     GST_TYPE_FRACTION, n, d,
                                     NULL);

        n = gst_discoverer_video_info_get_par_num (video_info);
        d = gst_discoverer_video_info_get_par_denom (video_info);
        if (n && d)
                gst_caps_set_simple (caps,
                                     "pixel-aspect-ratio",
                                     GST_TYPE_FRACTION, n, d,
                                     NULL);

        value = gst_discoverer_video_info_is_interlaced (video_info);
        if (value)
                gst_caps_set_simple
                        (caps, "interlaced", G_TYPE_BOOLEAN, value, NULL);

        stream_tag_list = gst_discoverer_stream_info_get_tags (info);
        if (stream_tag_list) {
                guint bitrate;
                if (gst_tag_list_get_uint (stream_tag_list, "bitrate", &bitrate))
                        gst_caps_set_simple
                             (caps, "bitrate", G_TYPE_INT, (int) bitrate, NULL);

                if (gst_tag_list_get_uint (stream_tag_list,
                                           "maximum-bitrate",
                                           &bitrate))
                        gst_caps_set_simple (caps,
                                             "maximum-bitrate",
                                             G_TYPE_INT,
                                             (int) bitrate,
                                             NULL);
        }

        return caps;
}
Beispiel #13
0
static GstCaps *
gst_tinyalsa_sink_getcaps (GstBaseSink * bsink, GstCaps * filter)
{
  GstTinyalsaSink *sink = GST_TINYALSA_SINK (bsink);
  GstCaps *caps = NULL;
  GValue formats = { 0, };
  GValue format = { 0, };
  struct pcm_params *params = NULL;
  struct pcm_mask *mask;
  int rate_min, rate_max, channels_min, channels_max;
  guint16 m;

  GST_DEBUG_OBJECT (sink, "Querying caps");

  GST_OBJECT_LOCK (sink);

  if (sink->cached_caps) {
    GST_DEBUG_OBJECT (sink, "Returning cached caps");
    caps = gst_caps_ref (sink->cached_caps);
    goto done;
  }

  if (sink->pcm) {
    /* We can't query the device while it's open, so return current caps */
    caps = gst_pad_get_current_caps (GST_BASE_SINK_PAD (bsink));
    goto done;
  }

  params = pcm_params_get (sink->card, sink->device, PCM_OUT);
  if (!params) {
    GST_ERROR_OBJECT (sink, "Could not get PCM params");
    goto done;
  }

  mask = pcm_params_get_mask (params, PCM_PARAM_FORMAT);
  m = (mask->bits[1] << 8) | mask->bits[0];

  if (!(m & SNDRV_PCM_FORMAT_ANY)) {
    GST_ERROR_OBJECT (sink, "Could not find any supported format");
    goto done;
  }

  caps = gst_caps_new_empty_simple ("audio/x-raw");

  g_value_init (&formats, GST_TYPE_LIST);
  g_value_init (&format, G_TYPE_STRING);

  if (m & (1 << SNDRV_PCM_FORMAT_S8)) {
    g_value_set_static_string (&format, "S8");
    gst_value_list_prepend_value (&formats, &format);
  }
  if (m & (1 << SNDRV_PCM_FORMAT_S16_LE)) {
    g_value_set_static_string (&format, "S16LE");
    gst_value_list_prepend_value (&formats, &format);
  }
  if (m & (1 << SNDRV_PCM_FORMAT_S24_LE)) {
    g_value_set_static_string (&format, "S24_32LE");
    gst_value_list_prepend_value (&formats, &format);
  }
  if (m & (1 << SNDRV_PCM_FORMAT_S32_LE)) {
    g_value_set_static_string (&format, "S32LE");
    gst_value_list_prepend_value (&formats, &format);
  }

  gst_caps_set_value (caps, "format", &formats);

  g_value_unset (&format);
  g_value_unset (&formats);

  /* This is a bit of a lie, since the device likely only supports some
   * standard rates in this range. We should probably filter the range to
   * those, standard audio rates but even that isn't guaranteed to be accurate.
   */
  rate_min = pcm_params_get_min (params, PCM_PARAM_RATE);
  rate_max = pcm_params_get_max (params, PCM_PARAM_RATE);

  if (rate_min == rate_max)
    gst_caps_set_simple (caps, "rate", G_TYPE_INT, rate_min, NULL);
  else
    gst_caps_set_simple (caps, "rate", GST_TYPE_INT_RANGE, rate_min, rate_max,
        NULL);

  channels_min = pcm_params_get_min (params, PCM_PARAM_CHANNELS);
  channels_max = pcm_params_get_max (params, PCM_PARAM_CHANNELS);

  if (channels_min == channels_max)
    gst_caps_set_simple (caps, "channels", G_TYPE_INT, channels_min, NULL);
  else
    gst_caps_set_simple (caps, "channels", GST_TYPE_INT_RANGE, channels_min,
        channels_max, NULL);

  gst_caps_replace (&sink->cached_caps, caps);

done:
  GST_OBJECT_UNLOCK (sink);

  GST_DEBUG_OBJECT (sink, "Got caps %" GST_PTR_FORMAT, caps);

  if (caps && filter) {
    GstCaps *intersection =
        gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);

    gst_caps_unref (caps);
    caps = intersection;
  }

  if (params)
    pcm_params_free (params);

  return caps;
}
Beispiel #14
0
static gboolean
gst_jpegenc_setcaps (GstPad * pad, GstCaps * caps)
{
  GstJpegEnc *enc = GST_JPEGENC (gst_pad_get_parent (pad));
  GstVideoFormat format;
  gint width, height;
  gint fps_num, fps_den;
  gint par_num, par_den;
  gint i;
  GstCaps *othercaps;
  gboolean ret;

  /* get info from caps */
  if (!gst_video_format_parse_caps (caps, &format, &width, &height))
    goto refuse_caps;
  /* optional; pass along if present */
  fps_num = fps_den = -1;
  par_num = par_den = -1;
  gst_video_parse_caps_framerate (caps, &fps_num, &fps_den);
  gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den);

  if (width == enc->width && height == enc->height && enc->format == format
      && fps_num == enc->fps_num && fps_den == enc->fps_den
      && par_num == enc->par_num && par_den == enc->par_den)
    return TRUE;

  /* store input description */
  enc->format = format;
  enc->width = width;
  enc->height = height;
  enc->fps_num = fps_num;
  enc->fps_den = fps_den;
  enc->par_num = par_num;
  enc->par_den = par_den;

  /* prepare a cached image description  */
  enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0);
  /* ... but any alpha is disregarded in encoding */
  if (gst_video_format_is_gray (format))
    enc->channels = 1;
  else
    enc->channels = 3;
  enc->h_max_samp = 0;
  enc->v_max_samp = 0;
  for (i = 0; i < enc->channels; ++i) {
    enc->cwidth[i] = gst_video_format_get_component_width (format, i, width);
    enc->cheight[i] = gst_video_format_get_component_height (format, i, height);
    enc->offset[i] = gst_video_format_get_component_offset (format, i, width,
        height);
    enc->stride[i] = gst_video_format_get_row_stride (format, i, width);
    enc->inc[i] = gst_video_format_get_pixel_stride (format, i);
    enc->h_samp[i] = GST_ROUND_UP_4 (width) / enc->cwidth[i];
    enc->h_max_samp = MAX (enc->h_max_samp, enc->h_samp[i]);
    enc->v_samp[i] = GST_ROUND_UP_4 (height) / enc->cheight[i];
    enc->v_max_samp = MAX (enc->v_max_samp, enc->v_samp[i]);
  }
  /* samp should only be 1, 2 or 4 */
  g_assert (enc->h_max_samp <= 4);
  g_assert (enc->v_max_samp <= 4);
  /* now invert */
  /* maximum is invariant, as one of the components should have samp 1 */
  for (i = 0; i < enc->channels; ++i) {
    enc->h_samp[i] = enc->h_max_samp / enc->h_samp[i];
    enc->v_samp[i] = enc->v_max_samp / enc->v_samp[i];
  }
  enc->planar = (enc->inc[0] == 1 && enc->inc[1] == 1 && enc->inc[2] == 1);

  othercaps = gst_caps_copy (gst_pad_get_pad_template_caps (enc->srcpad));
  gst_caps_set_simple (othercaps,
      "width", G_TYPE_INT, enc->width, "height", G_TYPE_INT, enc->height, NULL);
  if (enc->fps_den > 0)
    gst_caps_set_simple (othercaps,
        "framerate", GST_TYPE_FRACTION, enc->fps_num, enc->fps_den, NULL);
  if (enc->par_den > 0)
    gst_caps_set_simple (othercaps,
        "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_num, enc->par_den,
        NULL);

  ret = gst_pad_set_caps (enc->srcpad, othercaps);
  gst_caps_unref (othercaps);

  if (ret)
    gst_jpegenc_resync (enc);

  gst_object_unref (enc);

  return ret;

  /* ERRORS */
refuse_caps:
  {
    GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
    gst_object_unref (enc);
    return FALSE;
  }
}
static GstCaps *
gst_fbdevsink_getcaps (GstBaseSink * bsink, GstCaps * filter)
{
  GstFBDEVSink *fbdevsink;
  GstVideoFormat format;
  GstCaps *caps;
  uint32_t rmask;
  uint32_t gmask;
  uint32_t bmask;
  uint32_t tmask;
  int endianness, depth, bpp;

  fbdevsink = GST_FBDEVSINK (bsink);

  caps = gst_static_pad_template_get_caps (&sink_template);

  /* FIXME: locking */
  if (!fbdevsink->framebuffer)
    goto done;

  bpp = fbdevsink->varinfo.bits_per_pixel;

  rmask = ((1 << fbdevsink->varinfo.red.length) - 1)
      << fbdevsink->varinfo.red.offset;
  gmask = ((1 << fbdevsink->varinfo.green.length) - 1)
      << fbdevsink->varinfo.green.offset;
  bmask = ((1 << fbdevsink->varinfo.blue.length) - 1)
      << fbdevsink->varinfo.blue.offset;
  tmask = ((1 << fbdevsink->varinfo.transp.length) - 1)
      << fbdevsink->varinfo.transp.offset;

  depth = fbdevsink->varinfo.red.length + fbdevsink->varinfo.green.length
      + fbdevsink->varinfo.blue.length;

  switch (fbdevsink->varinfo.bits_per_pixel) {
    case 32:
      /* swap endianness of masks */
      rmask = GUINT32_SWAP_LE_BE (rmask);
      gmask = GUINT32_SWAP_LE_BE (gmask);
      bmask = GUINT32_SWAP_LE_BE (bmask);
      tmask = GUINT32_SWAP_LE_BE (tmask);
      depth += fbdevsink->varinfo.transp.length;
      endianness = G_BIG_ENDIAN;
      break;
    case 24:{
      /* swap red and blue masks */
      tmask = rmask;
      rmask = bmask;
      bmask = tmask;
      tmask = 0;
      endianness = G_BIG_ENDIAN;
      break;
    }
    case 15:
    case 16:
      tmask = 0;
      endianness = G_LITTLE_ENDIAN;
      break;
    default:
      goto unsupported_bpp;
  }

  format = gst_video_format_from_masks (depth, bpp, endianness, rmask, gmask,
      bmask, tmask);

  if (format == GST_VIDEO_FORMAT_UNKNOWN)
    goto unknown_format;

  caps = gst_caps_make_writable (caps);
  gst_caps_set_simple (caps, "format", G_TYPE_STRING,
      gst_video_format_to_string (format), NULL);

done:

  if (filter != NULL) {
    GstCaps *icaps;

    icaps = gst_caps_intersect (caps, filter);
    gst_caps_unref (caps);
    caps = icaps;
  }

  return caps;

/* ERRORS */
unsupported_bpp:
  {
    GST_WARNING_OBJECT (bsink, "unsupported bit depth: %d", bpp);
    return NULL;
  }
unknown_format:
  {
    GST_WARNING_OBJECT (bsink, "could not map fbdev format to GstVideoFormat: "
        "depth=%u, bpp=%u, endianness=%u, rmask=0x%08x, gmask=0x%08x, "
        "bmask=0x%08x, tmask=0x%08x", depth, bpp, endianness, rmask, gmask,
        bmask, tmask);
    return NULL;
  }
}
Beispiel #16
0
static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* media_type)
{
	TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder*) decoder;

	if (!mdecoder)
		return FALSE;

	DEBUG_TSMF("");

	switch (media_type->MajorType)
	{
		case TSMF_MAJOR_TYPE_VIDEO:
			mdecoder->media_type = TSMF_MAJOR_TYPE_VIDEO;
			break;
		case TSMF_MAJOR_TYPE_AUDIO:
			mdecoder->media_type = TSMF_MAJOR_TYPE_AUDIO;
			break;
		default:
			return FALSE;
	}

	switch (media_type->SubType)
	{
		case TSMF_SUB_TYPE_WVC1:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 3,
								 "format", G_TYPE_STRING, "WVC1",
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP4S:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-divx",
								  "divxversion", G_TYPE_INT, 5,
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP42:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-msmpeg",
								  "msmpegversion", G_TYPE_INT, 42,
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP43:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-msmpeg",
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMA9:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-wma",
								  "wmaversion", G_TYPE_INT, 3,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  "bitrate", G_TYPE_INT, media_type->BitRate,
								  "depth", G_TYPE_INT, media_type->BitsPerSample,
								  "width", G_TYPE_INT, media_type->BitsPerSample,
								  "block_align", G_TYPE_INT, media_type->BlockAlign,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMA2:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-wma",
								  "wmaversion", G_TYPE_INT, 2,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  "bitrate", G_TYPE_INT, media_type->BitRate,
								  "depth", G_TYPE_INT, media_type->BitsPerSample,
								  "width", G_TYPE_INT, media_type->BitsPerSample,
								  "block_align", G_TYPE_INT, media_type->BlockAlign,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP3:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 1,
								  "layer", G_TYPE_INT, 3,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMV1:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "bitrate", G_TYPE_UINT, media_type->BitRate,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 1,
								 NULL);
			break;
		case TSMF_SUB_TYPE_WMV2:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 2,
								 NULL);
			break;
		case TSMF_SUB_TYPE_WMV3:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "bitrate", G_TYPE_UINT, media_type->BitRate,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 3,
								 NULL);
			break;
		case TSMF_SUB_TYPE_AVC1:
		case TSMF_SUB_TYPE_H264:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-h264",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
			break;
		case TSMF_SUB_TYPE_AC3:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-ac3",
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_AAC:

			/* For AAC the pFormat is a HEAACWAVEINFO struct, and the codec data
			   is at the end of it. See
			   http://msdn.microsoft.com/en-us/library/dd757806.aspx */
			if (media_type->ExtraData)
			{
				media_type->ExtraData += 12;
				media_type->ExtraDataSize -= 12;
			}

			mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg",
								 "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								 "channels", G_TYPE_INT, media_type->Channels,
								 "mpegversion", G_TYPE_INT, 4,
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP1A:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 1,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP1V:
			mdecoder->gst_caps = gst_caps_new_simple("video/mpeg",
								 "mpegversion", G_TYPE_INT, 1,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "systemstream", G_TYPE_BOOLEAN, FALSE,
								 NULL);
			break;
		case TSMF_SUB_TYPE_YUY2:
#if GST_VERSION_MAJOR > 0
			mdecoder->gst_caps = gst_caps_new_simple("video/x-raw",
								 "format", G_TYPE_STRING, "YUY2",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
#else
			mdecoder->gst_caps = gst_caps_new_simple("video/x-raw-yuv",
								 "format", G_TYPE_STRING, "YUY2",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
#endif
			break;
		case TSMF_SUB_TYPE_MP2V:
			mdecoder->gst_caps = gst_caps_new_simple("video/mpeg",
								 "mpegversion", G_TYPE_INT, 2,
								 "systemstream", G_TYPE_BOOLEAN, FALSE,
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP2A:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 2,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		default:
			WLog_ERR(TAG, "unknown format:(%d).", media_type->SubType);
			return FALSE;
	}

	if (media_type->ExtraDataSize > 0)
	{
		GstBuffer *buffer;
		DEBUG_TSMF("Extra data available (%d)", media_type->ExtraDataSize);
		buffer = tsmf_get_buffer_from_data(media_type->ExtraData, media_type->ExtraDataSize);

		if (!buffer)
		{
			WLog_ERR(TAG, "could not allocate GstBuffer!");
			return FALSE;
		}

		gst_caps_set_simple(mdecoder->gst_caps, "codec_data", GST_TYPE_BUFFER, buffer, NULL);
	}

	DEBUG_TSMF("%p format '%s'", mdecoder, gst_caps_to_string(mdecoder->gst_caps));
	tsmf_platform_set_format(mdecoder);

	/* Create the pipeline... */
	if (!tsmf_gstreamer_pipeline_build(mdecoder))
		return FALSE;

	return TRUE;
}
static gboolean
gst_dshowvideodec_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  gboolean ret = FALSE;
  HRESULT hres;
  GstStructure *s = gst_caps_get_structure (caps, 0);
  GstDshowVideoDec *vdec = (GstDshowVideoDec *) gst_pad_get_parent (pad);
  GstDshowVideoDecClass *klass =
      (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (vdec);
  GstBuffer *extradata = NULL;
  const GValue *v = NULL;
  guint size = 0;
  GstCaps *caps_out;
  AM_MEDIA_TYPE output_mediatype, input_mediatype;
  VIDEOINFOHEADER *input_vheader = NULL, *output_vheader = NULL;
  CComPtr<IPin> output_pin;
  CComPtr<IPin> input_pin;
  IBaseFilter *srcfilter = NULL;
  IBaseFilter *sinkfilter = NULL;
  const GValue *fps, *par;

  /* read data */
  if (!gst_structure_get_int (s, "width", &vdec->width) ||
      !gst_structure_get_int (s, "height", &vdec->height)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("error getting video width or height from caps"), (NULL));
    goto end;
  }
  fps = gst_structure_get_value (s, "framerate");
  if (fps) {
    vdec->fps_n = gst_value_get_fraction_numerator (fps);
    vdec->fps_d = gst_value_get_fraction_denominator (fps);
  }
  else {
    /* Invent a sane default framerate; the timestamps matter
     * more anyway. */
    vdec->fps_n = 25;
    vdec->fps_d = 1;
  }

  par = gst_structure_get_value (s, "pixel-aspect-ratio");
  if (par) {
    vdec->par_n = gst_value_get_fraction_numerator (par);
    vdec->par_d = gst_value_get_fraction_denominator (par);
  }
  else {
    vdec->par_n = vdec->par_d = 1;
  }

  if ((v = gst_structure_get_value (s, "codec_data")))
    extradata = gst_value_get_buffer (v);

  /* define the input type format */
  memset (&input_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  input_mediatype.majortype = klass->entry->input_majortype;
  input_mediatype.subtype = klass->entry->input_subtype;
  input_mediatype.bFixedSizeSamples = FALSE;
  input_mediatype.bTemporalCompression = TRUE;

  if (strstr (klass->entry->sinkcaps, "video/mpeg, mpegversion= (int) 1")) {
    size =
        sizeof (MPEG1VIDEOINFO) + (extradata ? GST_BUFFER_SIZE (extradata) -
        1 : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {
      MPEG1VIDEOINFO *mpeg_info = (MPEG1VIDEOINFO *) input_vheader;

      memcpy (mpeg_info->bSequenceHeader,
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      mpeg_info->cbSequenceHeader = GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_MPEGVideo;
  } else {
    size =
        sizeof (VIDEOINFOHEADER) +
        (extradata ? GST_BUFFER_SIZE (extradata) : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {            /* Codec data is appended after our header */
      memcpy (((guchar *) input_vheader) + sizeof (VIDEOINFOHEADER),
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      input_vheader->bmiHeader.biSize += GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_VideoInfo;
  }
  input_vheader->rcSource.top = input_vheader->rcSource.left = 0;
  input_vheader->rcSource.right = vdec->width;
  input_vheader->rcSource.bottom = vdec->height;
  input_vheader->rcTarget = input_vheader->rcSource;
  input_vheader->bmiHeader.biWidth = vdec->width;
  input_vheader->bmiHeader.biHeight = vdec->height;
  input_vheader->bmiHeader.biPlanes = 1;
  input_vheader->bmiHeader.biBitCount = 16;
  input_vheader->bmiHeader.biCompression = klass->entry->format;
  input_vheader->bmiHeader.biSizeImage =
      (vdec->width * vdec->height) * (input_vheader->bmiHeader.biBitCount / 8);

  input_mediatype.cbFormat = size;
  input_mediatype.pbFormat = (BYTE *) input_vheader;
  input_mediatype.lSampleSize = input_vheader->bmiHeader.biSizeImage;

  vdec->fakesrc->GetOutputPin()->SetMediaType(&input_mediatype);

  /* set the sample size for fakesrc filter to the output buffer size */
  vdec->fakesrc->GetOutputPin()->SetSampleSize(input_mediatype.lSampleSize);

  /* connect our fake src to decoder */
  hres = vdec->fakesrc->QueryInterface(IID_IBaseFilter,
      (void **) &srcfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesrc to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  output_pin = gst_dshow_get_pin_from_filter (srcfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our directshow fakesrc filter"), (NULL));
    goto end;
  }
  input_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect (output_pin, input_pin, NULL);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect fakesrc with decoder (error=%x)", hres), (NULL));
    goto end;
  }

  /* get decoder output video format */
  if (!gst_dshowvideodec_get_filter_output_format (vdec,
          klass->entry->output_subtype, &output_vheader, &size)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get decoder output video format"), (NULL));
    goto end;
  }

  memset (&output_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  output_mediatype.majortype = klass->entry->output_majortype;
  output_mediatype.subtype = klass->entry->output_subtype;
  output_mediatype.bFixedSizeSamples = TRUE;
  output_mediatype.bTemporalCompression = FALSE;
  output_mediatype.lSampleSize = output_vheader->bmiHeader.biSizeImage;
  output_mediatype.formattype = FORMAT_VideoInfo;
  output_mediatype.cbFormat = size;
  output_mediatype.pbFormat = (BYTE *) output_vheader;

  vdec->fakesink->SetMediaType (&output_mediatype);

  /* connect decoder to our fake sink */
  output_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->fakesink->QueryInterface(IID_IBaseFilter,
      (void **) &sinkfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesink to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  input_pin = gst_dshow_get_pin_from_filter (sinkfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from our directshow fakesink filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect(output_pin, input_pin,
      &output_mediatype);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect decoder with fakesink (error=%x)", hres), (NULL));
    goto end;
  }

  /* negotiate output */
  caps_out = gst_caps_from_string (klass->entry->srccaps);
  gst_caps_set_simple (caps_out,
      "width", G_TYPE_INT, vdec->width,
      "height", G_TYPE_INT, vdec->height, NULL);

  if (vdec->fps_n && vdec->fps_d) {
      gst_caps_set_simple (caps_out,
          "framerate", GST_TYPE_FRACTION, vdec->fps_n, vdec->fps_d, NULL);
  }

  gst_caps_set_simple (caps_out, 
      "pixel-aspect-ratio", GST_TYPE_FRACTION, vdec->par_n, vdec->par_d, NULL);

  if (!gst_pad_set_caps (vdec->srcpad, caps_out)) {
    gst_caps_unref (caps_out);
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Failed to negotiate output"), (NULL));
    goto end;
  }
  gst_caps_unref (caps_out);

  hres = vdec->mediafilter->Run (-1);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't run the directshow graph (error=%d)", hres), (NULL));
    goto end;
  }

  ret = TRUE;
end:
  gst_object_unref (vdec);
  if (input_vheader)
    g_free (input_vheader);
  if (srcfilter)
    srcfilter->Release();
  if (sinkfilter)
    sinkfilter->Release();
  return ret;
}
Beispiel #18
0
bool GstEnginePipeline::Init() {
  // Here we create all the parts of the gstreamer pipeline - from the source
  // to the sink.  The parts of the pipeline are split up into bins:
  //   uri decode bin -> audio bin
  // The uri decode bin is a gstreamer builtin that automatically picks the
  // right type of source and decoder for the URI.

  // The audio bin gets created here and contains:
  //   queue ! audioconvert ! <caps32>
  //         ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee
  // rgvolume and rglimiter are only created when replaygain is enabled.

  // After the tee the pipeline splits.  One split is converted to 16-bit int
  // samples for the scope, the other is kept as float32 and sent to the
  // speaker.
  //   tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink
  //   tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale
  //        ! convert ! audiosink

  // Audio bin
  audiobin_ = gst_bin_new("audiobin");
  gst_bin_add(GST_BIN(pipeline_), audiobin_);

  // Create the sink
  if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false;

  if (g_object_class_find_property(G_OBJECT_GET_CLASS(audiosink_), "device") &&
      !device_.toString().isEmpty()) {
    switch (device_.type()) {
      case QVariant::Int:
        g_object_set(G_OBJECT(audiosink_),
                     "device", device_.toInt(),
                     nullptr);
        break;
      case QVariant::String:
        g_object_set(G_OBJECT(audiosink_),
                     "device", device_.toString().toUtf8().constData(),
                     nullptr);
        break;

      #ifdef Q_OS_WIN32
      case QVariant::ByteArray: {
        GUID guid = QUuid(device_.toByteArray());
        g_object_set(G_OBJECT(audiosink_),
                     "device", &guid,
                     nullptr);
        break;
      }
      #endif  // Q_OS_WIN32

      default:
        qLog(Warning) << "Unknown device type" << device_;
        break;
    }
  }

  // Create all the other elements
  GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue,
      *convert;

  queue_ = engine_->CreateElement("queue2", audiobin_);
  audioconvert_ = engine_->CreateElement("audioconvert", audiobin_);
  tee = engine_->CreateElement("tee", audiobin_);

  probe_queue = engine_->CreateElement("queue", audiobin_);
  probe_converter = engine_->CreateElement("audioconvert", audiobin_);
  probe_sink = engine_->CreateElement("fakesink", audiobin_);

  audio_queue = engine_->CreateElement("queue", audiobin_);
  equalizer_preamp_ = engine_->CreateElement("volume", audiobin_);
  equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_);
  stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_);
  volume_ = engine_->CreateElement("volume", audiobin_);
  audioscale_ = engine_->CreateElement("audioresample", audiobin_);
  convert = engine_->CreateElement("audioconvert", audiobin_);

  if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter ||
      !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ ||
      !stereo_panorama_ || !volume_ || !audioscale_ || !convert) {
    return false;
  }

  // Create the replaygain elements if it's enabled.  event_probe is the
  // audioconvert element we attach the probe to, which will change depending
  // on whether replaygain is enabled.  convert_sink is the element after the
  // first audioconvert, which again will change.
  GstElement* event_probe = audioconvert_;
  GstElement* convert_sink = tee;

  if (rg_enabled_) {
    rgvolume_ = engine_->CreateElement("rgvolume", audiobin_);
    rglimiter_ = engine_->CreateElement("rglimiter", audiobin_);
    audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_);
    event_probe = audioconvert2_;
    convert_sink = rgvolume_;

    if (!rgvolume_ || !rglimiter_ || !audioconvert2_) {
      return false;
    }

    // Set replaygain settings
    g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr);
    g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr);
    g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_),
                 nullptr);
  }

  // Create a pad on the outside of the audiobin and connect it to the pad of
  // the first element.
  GstPad* pad = gst_element_get_static_pad(queue_, "sink");
  gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
  gst_object_unref(pad);

  // Add a data probe on the src pad of the audioconvert element for our scope.
  // We do it here because we want pre-equalized and pre-volume samples
  // so that our visualization are not be affected by them.
  pad = gst_element_get_static_pad(event_probe, "src");
  gst_pad_add_event_probe(pad, G_CALLBACK(EventHandoffCallback), this);
  gst_object_unref(pad);

  // Configure the fakesink properly
  g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr);

  // Set the equalizer bands
  g_object_set(G_OBJECT(equalizer_), "num-bands", 10, nullptr);

  int last_band_frequency = 0;
  for (int i = 0; i < kEqBandCount; ++i) {
    GstObject* band =
        gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), i);

    const float frequency = kEqBandFrequencies[i];
    const float bandwidth = frequency - last_band_frequency;
    last_band_frequency = frequency;

    g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth,
                 "gain", 0.0f, nullptr);
    g_object_unref(G_OBJECT(band));
  }

  // Set the stereo balance.
  g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_,
               nullptr);

  // Set the buffer duration.  We set this on this queue instead of the
  // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin
  // only affects network sources.
  // Disable the default buffer and byte limits, so we only buffer based on
  // time.
  g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_,
               nullptr);
  g_object_set(G_OBJECT(queue_), "low-percent", buffer_min_fill_, nullptr);

  if (buffer_duration_nanosec_ > 0) {
    g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr);
  }

  gst_element_link(queue_, audioconvert_);

  // Create the caps to put in each path in the tee.  The scope path gets 16-bit
  // ints and the audiosink path gets float32.
  GstCaps* caps16 =
      gst_caps_new_simple("audio/x-raw-int", "width", G_TYPE_INT, 16, "signed",
                          G_TYPE_BOOLEAN, true, nullptr);
  GstCaps* caps32 = gst_caps_new_simple("audio/x-raw-float", "width",
                                        G_TYPE_INT, 32, nullptr);
  if (mono_playback_) {
    gst_caps_set_simple(caps32, "channels", G_TYPE_INT, 1, nullptr);
  }

  // Link the elements with special caps
  gst_element_link_filtered(probe_converter, probe_sink, caps16);
  gst_element_link_filtered(audioconvert_, convert_sink, caps32);
  gst_caps_unref(caps16);
  gst_caps_unref(caps32);

  // Link the outputs of tee to the queues on each path.
  gst_pad_link(gst_element_get_request_pad(tee, "src%d"),
               gst_element_get_static_pad(probe_queue, "sink"));
  gst_pad_link(gst_element_get_request_pad(tee, "src%d"),
               gst_element_get_static_pad(audio_queue, "sink"));

  // Link replaygain elements if enabled.
  if (rg_enabled_) {
    gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr);
  }

  // Link everything else.
  gst_element_link(probe_queue, probe_converter);
  gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_,
                        stereo_panorama_, volume_, audioscale_, convert,
                        audiosink_, nullptr);

  // Add probes and handlers.
  gst_pad_add_buffer_probe(gst_element_get_static_pad(probe_converter, "src"),
                           G_CALLBACK(HandoffCallback), this);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this);
  bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                                 BusCallback, this);

  MaybeLinkDecodeToAudio();

  return true;
}
Beispiel #19
0
/* This function normalizes all of the cached r,g,b data and 
 * finally pushes a monster buffer with all of our output.
 */
static void 
gst_moodbar_finish (GstMoodbar *mood)
{
  GstBuffer *buf;
  guchar *data;
  guint line;
  guint output_width;

  if (mood->max_width == 0
        || mood->numframes <= mood->max_width)
    output_width = mood->numframes;
  else
    output_width = mood->max_width;

  normalize (mood->r, mood->numframes);
  normalize (mood->g, mood->numframes);
  normalize (mood->b, mood->numframes);

  buf = gst_buffer_new_and_alloc 
            (output_width * mood->height * 3 * sizeof (guchar));
  if (!buf)
    return;
  /* Don't set the timestamp, duration, etc. since it's irrelevant */
  GST_BUFFER_OFFSET (buf) = 0;
  data = (guchar *) GST_BUFFER_DATA (buf);

  gdouble r, g, b;
  guint i, j, n;
  guint start, end;
  for (line = 0; line < mood->height; ++line)
    {
      for (i = 0; i < output_width; ++i)
	{
	  r = 0.f;  g = 0.f;  b = 0.f;
	  start = i * mood->numframes / output_width;
	  end = (i + 1) * mood->numframes / output_width;
	  if ( start == end )
	    end = start + 1;

	  for( j = start; j < end; j++ )
	    {
	      r += mood->r[j] * 255.f;
	      g += mood->g[j] * 255.f;
	      b += mood->b[j] * 255.f;
	    }

	  n = end - start;

    *(data++) = (guchar) (r / ((gdouble) n));
    *(data++) = (guchar) (g / ((gdouble) n));
    *(data++) = (guchar) (b / ((gdouble) n));
	}
    }

  { /* Now we (finally) know the width of the image we're pushing */
    GstCaps *caps = gst_caps_copy (gst_pad_get_caps (mood->srcpad));
    gboolean res;
    gst_caps_set_simple (caps, "width", G_TYPE_INT, output_width, NULL);
    gst_caps_set_simple (caps, "height", G_TYPE_INT, mood->height, NULL);
    res = gst_pad_set_caps (mood->srcpad, caps);
    if (res)
      gst_buffer_set_caps (buf, caps);
    gst_caps_unref (caps);
    if (!res)
      return;
  }

  gst_pad_push (mood->srcpad, buf);
}
Beispiel #20
0
GstCaps * _owr_payload_create_rtp_caps(OwrPayload *payload)
{
    OwrPayloadPrivate *priv;
    OwrMediaType media_type;
    GstCaps *caps = NULL;
    GEnumClass *enum_class;
    GEnumValue *enum_value;
    gchar *encoding_name;
    gboolean ccm_fir = FALSE, nack_pli = FALSE;

    g_return_val_if_fail(payload, NULL);
    priv = payload->priv;

    switch (priv->codec_type) {
    case OWR_CODEC_TYPE_PCMU:
        encoding_name = "PCMU";
        break;

    case OWR_CODEC_TYPE_PCMA:
        encoding_name = "PCMA";
        break;

    case OWR_CODEC_TYPE_OPUS:
        encoding_name = "X-GST-OPUS-DRAFT-SPITTKA-00";
        break;

    case OWR_CODEC_TYPE_H264:
        encoding_name = "H264";
        break;

    case OWR_CODEC_TYPE_VP8:
        encoding_name = "VP8-DRAFT-IETF-01";
        break;

    default:
        g_return_val_if_reached(NULL);
    }

    caps = gst_caps_new_simple("application/x-rtp",
        "encoding-name", G_TYPE_STRING, encoding_name,
        "payload", G_TYPE_INT, priv->payload_type,
        "clock-rate", G_TYPE_INT, priv->clock_rate,
        NULL);

    g_object_get(payload, "media-type", &media_type, NULL);
    enum_class = G_ENUM_CLASS(g_type_class_ref(OWR_TYPE_MEDIA_TYPE));
    enum_value = g_enum_get_value(enum_class, media_type);
    if (enum_value)
        gst_caps_set_simple(caps, "media", G_TYPE_STRING, enum_value->value_nick, NULL);
    g_type_class_unref(enum_class);

    if (OWR_IS_VIDEO_PAYLOAD(payload)) {
        g_object_get(OWR_VIDEO_PAYLOAD(payload), "ccm-fir", &ccm_fir, "nack-pli", &nack_pli, NULL);
        gst_caps_set_simple(caps, "rtcp-fb-ccm-fir", G_TYPE_BOOLEAN, ccm_fir, "rtcp-fb-nack-pli",
            G_TYPE_BOOLEAN, nack_pli, NULL);
    } else if (media_type == OWR_MEDIA_TYPE_AUDIO) {
        guint channels = 0;
        if (OWR_IS_AUDIO_PAYLOAD(payload))
            g_object_get(OWR_AUDIO_PAYLOAD(payload), "channels", &channels, NULL);
        if (channels > 0) {
            gst_caps_set_simple(caps,
                "channels", G_TYPE_INT, channels,
                NULL);
        }
    }

    return caps;
}
Beispiel #21
0
static gboolean
gst_vaapidecode_update_src_caps (GstVaapiDecode * decode)
{
  GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode);
  GstPad *const srcpad = GST_VIDEO_DECODER_SRC_PAD (vdec);
  GstCaps *allowed;
  GstVideoCodecState *state, *ref_state;
  GstVaapiCapsFeature feature;
  GstCapsFeatures *features;
  GstCaps *allocation_caps;
  GstVideoInfo *vi;
  GstVideoFormat format;
  GstClockTime latency;
  gint fps_d, fps_n;
  guint width, height;
  const gchar *format_str, *feature_str;

  if (!decode->input_state)
    return FALSE;

  ref_state = decode->input_state;

  format = GST_VIDEO_INFO_FORMAT (&decode->decoded_info);
  allowed = gst_vaapidecode_get_allowed_srcpad_caps (decode);
  feature = gst_vaapi_find_preferred_caps_feature (srcpad, allowed, &format);
  gst_caps_unref (allowed);

  if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED)
    return FALSE;

#if (!USE_GLX && !USE_EGL)
  /* This is a very pathological situation. Should not happen. */
  if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META)
    return FALSE;
#endif

  if ((feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY ||
          feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE)
      && format != GST_VIDEO_INFO_FORMAT (&decode->decoded_info)) {
    GST_FIXME_OBJECT (decode, "validate if driver can convert from %s to %s",
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT
            (&decode->decoded_info)), gst_video_format_to_string (format));
  }

  width = decode->display_width;
  height = decode->display_height;

  if (!width || !height) {
    width = GST_VIDEO_INFO_WIDTH (&ref_state->info);
    height = GST_VIDEO_INFO_HEIGHT (&ref_state->info);
  }

  state = gst_video_decoder_set_output_state (vdec, format, width, height,
      ref_state);
  if (!state)
    return FALSE;

  if (GST_VIDEO_INFO_WIDTH (&state->info) == 0
      || GST_VIDEO_INFO_HEIGHT (&state->info) == 0) {
    gst_video_codec_state_unref (state);
    return FALSE;
  }

  vi = &state->info;
  state->caps = gst_video_info_to_caps (vi);

  switch (feature) {
    case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
    case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:{
      GstStructure *structure = gst_caps_get_structure (state->caps, 0);

      /* Remove chroma-site and colorimetry from src caps,
       * which is unnecessary on downstream if using VASurface
       */
      gst_structure_remove_fields (structure, "chroma-site", "colorimetry",
          NULL);

      feature_str = gst_vaapi_caps_feature_to_string (feature);
      features = gst_caps_features_new (feature_str, NULL);
      gst_caps_set_features (state->caps, 0, features);
      break;
    }
    default:
      break;
  }

  /* Allocation query is different from pad's caps */
  allocation_caps = NULL;
  if (GST_VIDEO_INFO_WIDTH (&decode->decoded_info) != width
      || GST_VIDEO_INFO_HEIGHT (&decode->decoded_info) != height) {
    allocation_caps = gst_caps_copy (state->caps);
    format_str = gst_video_format_to_string (format);
    gst_caps_set_simple (allocation_caps,
        "width", G_TYPE_INT, GST_VIDEO_INFO_WIDTH (&decode->decoded_info),
        "height", G_TYPE_INT, GST_VIDEO_INFO_HEIGHT (&decode->decoded_info),
        "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (decode, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
  }
  gst_caps_replace (&state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps);
  gst_caps_replace (&decode->srcpad_caps, state->caps);
  gst_video_codec_state_unref (state);

  fps_n = GST_VIDEO_INFO_FPS_N (vi);
  fps_d = GST_VIDEO_INFO_FPS_D (vi);
  if (fps_n <= 0 || fps_d <= 0) {
    GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation");
    fps_n = 25;
    fps_d = 1;
  }

  /* For parsing/preparation purposes we'd need at least 1 frame
   * latency in general, with perfectly known unit boundaries (NALU,
   * AU), and up to 2 frames when we need to wait for the second frame
   * start to determine the first frame is complete */
  latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n);
  gst_video_decoder_set_latency (vdec, latency, latency);

  return TRUE;
}
Beispiel #22
0
static void
gst_h264_parse_update_src_caps (GstH264Parse * h264parse, GstCaps * caps)
{
  GstH264ParamsSPS *sps;
  GstCaps *sink_caps;
  gboolean modified = FALSE;
  GstBuffer *buf = NULL;

  if (G_UNLIKELY (!GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (h264parse))))
    modified = TRUE;
  else if (G_UNLIKELY (!h264parse->update_caps))
    return;

  /* if this is being called from the first _setcaps call, caps on the sinkpad
   * aren't set yet and so they need to be passed as an argument */
  if (caps)
    sink_caps = caps;
  else
    sink_caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (h264parse));

  /* carry over input caps as much as possible; override with our own stuff */
  if (sink_caps)
    gst_caps_ref (sink_caps);
  else
    sink_caps = gst_caps_new_simple ("video/x-h264", NULL);

  sps = h264parse->params->sps;
  GST_DEBUG_OBJECT (h264parse, "sps: %p", sps);

  /* only codec-data for nice-and-clean au aligned packetized avc format */
  if (h264parse->format == GST_H264_PARSE_FORMAT_AVC &&
      h264parse->align == GST_H264_PARSE_ALIGN_AU) {
    buf = gst_h264_parse_make_codec_data (h264parse);
    if (buf && h264parse->codec_data) {
      if (GST_BUFFER_SIZE (buf) != GST_BUFFER_SIZE (h264parse->codec_data) ||
          memcmp (GST_BUFFER_DATA (buf),
              GST_BUFFER_DATA (h264parse->codec_data), GST_BUFFER_SIZE (buf)))
        modified = TRUE;
    } else {
      if (h264parse->codec_data)
        buf = gst_buffer_ref (h264parse->codec_data);
      modified = TRUE;
    }
  }

  caps = NULL;
  if (G_UNLIKELY (!sps)) {
    caps = gst_caps_copy (sink_caps);
  } else if (G_UNLIKELY (h264parse->width != sps->width ||
          h264parse->height != sps->height || h264parse->fps_num != sps->fps_num
          || h264parse->fps_den != sps->fps_den || modified)) {
    caps = gst_caps_copy (sink_caps);
    /* sps should give this */
    gst_caps_set_simple (caps, "width", G_TYPE_INT, sps->width,
        "height", G_TYPE_INT, sps->height, NULL);
    h264parse->height = sps->height;
    h264parse->width = sps->width;
    /* but not necessarily or reliably this */
    if ((!h264parse->fps_num || !h264parse->fps_den) &&
        sps->fps_num > 0 && sps->fps_den > 0) {
      gst_caps_set_simple (caps, "framerate",
          GST_TYPE_FRACTION, sps->fps_num, sps->fps_den, NULL);
      h264parse->fps_num = sps->fps_num;
      h264parse->fps_den = sps->fps_den;
      gst_base_parse_set_frame_rate (GST_BASE_PARSE (h264parse),
          h264parse->fps_num, h264parse->fps_den, 0, 0);
    }
  }

  if (caps) {
    gst_caps_set_simple (caps, "parsed", G_TYPE_BOOLEAN, TRUE,
        "stream-format", G_TYPE_STRING,
        gst_h264_parse_get_string (h264parse, TRUE, h264parse->format),
        "alignment", G_TYPE_STRING,
        gst_h264_parse_get_string (h264parse, FALSE, h264parse->align), NULL);
    if (buf) {
      gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, buf, NULL);
      gst_buffer_replace (&h264parse->codec_data, buf);
      gst_buffer_unref (buf);
      buf = NULL;
    } else {
      GstStructure *s;
      /* remove any left-over codec-data hanging around */
      s = gst_caps_get_structure (caps, 0);
      gst_structure_remove_field (s, "codec_data");
    }
    gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (h264parse), caps);
    gst_caps_unref (caps);
  }

  gst_caps_unref (sink_caps);
  if (buf)
    gst_buffer_unref (buf);
}
static void
gst_mpegv_parse_update_src_caps (GstMpegvParse * mpvparse)
{
  GstCaps *caps = NULL;

  /* only update if no src caps yet or explicitly triggered */
  if (G_LIKELY (gst_pad_has_current_caps (GST_BASE_PARSE_SRC_PAD (mpvparse)) &&
          !mpvparse->update_caps))
    return;

  /* carry over input caps as much as possible; override with our own stuff */
  caps = gst_pad_get_current_caps (GST_BASE_PARSE_SINK_PAD (mpvparse));
  if (caps) {
    caps = gst_caps_make_writable (caps);
  } else {
    caps = gst_caps_new_empty_simple ("video/mpeg");
  }

  /* typically we don't output buffers until we have properly parsed some
   * config data, so we should at least know about version.
   * If not, it means it has been requested not to drop data, and
   * upstream and/or app must know what they are doing ... */
  gst_caps_set_simple (caps,
      "mpegversion", G_TYPE_INT, (mpvparse->config_flags & FLAG_MPEG2) ? 2 : 1,
      NULL);

  gst_caps_set_simple (caps, "systemstream", G_TYPE_BOOLEAN, FALSE,
      "parsed", G_TYPE_BOOLEAN, TRUE, NULL);

  if (mpvparse->sequencehdr.width > 0 && mpvparse->sequencehdr.height > 0) {
    gst_caps_set_simple (caps, "width", G_TYPE_INT, mpvparse->sequencehdr.width,
        "height", G_TYPE_INT, mpvparse->sequencehdr.height, NULL);
  }

  /* perhaps we have  a framerate */
  if (mpvparse->fps_num > 0 && mpvparse->fps_den > 0) {
    gint fps_num = mpvparse->fps_num;
    gint fps_den = mpvparse->fps_den;
    GstClockTime latency = gst_util_uint64_scale (GST_SECOND, fps_den, fps_num);

    gst_caps_set_simple (caps, "framerate",
        GST_TYPE_FRACTION, fps_num, fps_den, NULL);
    gst_base_parse_set_frame_rate (GST_BASE_PARSE (mpvparse),
        fps_num, fps_den, 0, 0);
    gst_base_parse_set_latency (GST_BASE_PARSE (mpvparse), latency, latency);
  }

  /* or pixel-aspect-ratio */
  if (mpvparse->sequencehdr.par_w && mpvparse->sequencehdr.par_h > 0) {
    gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION,
        mpvparse->sequencehdr.par_w, mpvparse->sequencehdr.par_h, NULL);
  }

  if (mpvparse->config != NULL) {
    gst_caps_set_simple (caps, "codec_data",
        GST_TYPE_BUFFER, mpvparse->config, NULL);
  }

  if (mpvparse->config_flags & FLAG_SEQUENCE_EXT) {
    const guint profile_c = mpvparse->sequenceext.profile;
    const guint level_c = mpvparse->sequenceext.level;
    const gchar *profile = NULL, *level = NULL;
    /*
     * Profile indication - 1 => High, 2 => Spatially Scalable,
     *                      3 => SNR Scalable, 4 => Main, 5 => Simple
     * 4:2:2 and Multi-view have profile = 0, with the escape bit set to 1
     */
    const gchar *const profiles[] =
        { "high", "spatial", "snr", "main", "simple" };
    /*
     * Level indication - 4 => High, 6 => High-1440, 8 => Main, 10 => Low,
     *                    except in the case of profile = 0
     */
    const gchar *const levels[] = { "high", "high-1440", "main", "low" };

    if (profile_c > 0 && profile_c < 6)
      profile = profiles[profile_c - 1];

    if ((level_c > 3) && (level_c < 11) && (level_c % 2 == 0))
      level = levels[(level_c >> 1) - 2];

    if (profile_c == 8) {
      /* Non-hierarchical profile */
      switch (level_c) {
        case 2:
          level = levels[0];
        case 5:
          level = levels[2];
          profile = "4:2:2";
          break;
        case 10:
          level = levels[0];
        case 11:
          level = levels[1];
        case 13:
          level = levels[2];
        case 14:
          level = levels[3];
          profile = "multiview";
          break;
        default:
          break;
      }
    }

    /* FIXME does it make sense to expose profile/level in the caps ? */

    GST_DEBUG_OBJECT (mpvparse, "profile:'%s' level:'%s'", profile, level);

    if (profile)
      gst_caps_set_simple (caps, "profile", G_TYPE_STRING, profile, NULL);
    else
      GST_DEBUG_OBJECT (mpvparse, "Invalid profile - %u", profile_c);

    if (level)
      gst_caps_set_simple (caps, "level", G_TYPE_STRING, level, NULL);
    else
      GST_DEBUG_OBJECT (mpvparse, "Invalid level - %u", level_c);

    gst_caps_set_simple (caps, "interlace-mode",
        G_TYPE_STRING,
        (mpvparse->sequenceext.progressive ? "progressive" : "mixed"), NULL);
  }

  gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (mpvparse), caps);
  gst_caps_unref (caps);

  mpvparse->update_caps = FALSE;
}
Beispiel #24
0
static void
test_basic (const gchar * elem_name, int count, check_cb cb)
{
  GstElement *rtpmux = NULL;
  GstPad *reqpad1 = NULL;
  GstPad *reqpad2 = NULL;
  GstPad *src1 = NULL;
  GstPad *src2 = NULL;
  GstPad *sink = NULL;
  GstBuffer *inbuf = NULL;
  GstCaps *src1caps = NULL;
  GstCaps *src2caps = NULL;
  GstCaps *sinkcaps = NULL;
  GstCaps *caps;
  int i;

  rtpmux = gst_check_setup_element (elem_name);

  reqpad1 = gst_element_get_request_pad (rtpmux, "sink_1");
  fail_unless (reqpad1 != NULL);
  reqpad2 = gst_element_get_request_pad (rtpmux, "sink_2");
  fail_unless (reqpad2 != NULL);
  sink = gst_check_setup_sink_pad_by_name (rtpmux, &sinktemplate, "src");

  src1 = gst_pad_new_from_static_template (&srctemplate, "src");
  src2 = gst_pad_new_from_static_template (&srctemplate, "src");
  fail_unless (gst_pad_link (src1, reqpad1) == GST_PAD_LINK_OK);
  fail_unless (gst_pad_link (src2, reqpad2) == GST_PAD_LINK_OK);
  gst_pad_set_getcaps_function (src1, getcaps_func);
  gst_pad_set_getcaps_function (src2, getcaps_func);
  gst_pad_set_getcaps_function (sink, getcaps_func);
  gst_pad_set_setcaps_function (sink, setcaps_func);
  g_object_set_data (G_OBJECT (src1), "caps", &src1caps);
  g_object_set_data (G_OBJECT (src2), "caps", &src2caps);
  g_object_set_data (G_OBJECT (sink), "caps", &sinkcaps);

  src1caps = gst_caps_new_simple ("application/x-rtp",
      "clock-rate", G_TYPE_INT, 1, "ssrc", G_TYPE_UINT, 11, NULL);
  src2caps = gst_caps_new_simple ("application/x-rtp",
      "clock-rate", G_TYPE_INT, 2, "ssrc", G_TYPE_UINT, 12, NULL);
  sinkcaps = gst_caps_new_simple ("application/x-rtp",
      "clock-rate", G_TYPE_INT, 3, "ssrc", G_TYPE_UINT, 13, NULL);

  caps = gst_pad_peer_get_caps (src1);
  fail_unless (gst_caps_is_empty (caps));
  gst_caps_unref (caps);

  gst_caps_set_simple (src2caps, "clock-rate", G_TYPE_INT, 3, NULL);
  caps = gst_pad_peer_get_caps (src1);
  fail_unless (gst_caps_is_equal (caps, sinkcaps));
  gst_caps_unref (caps);

  g_object_set (rtpmux, "seqnum-offset", 100, "timestamp-offset", 1000,
      "ssrc", 55, NULL);

  fail_unless (gst_element_set_state (rtpmux,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS);
  gst_pad_set_active (sink, TRUE);
  gst_pad_set_active (src1, TRUE);
  gst_pad_set_active (src2, TRUE);

  gst_caps_set_simple (sinkcaps,
      "payload", G_TYPE_INT, 98, "seqnum-base", G_TYPE_UINT, 100,
      "clock-base", G_TYPE_UINT, 1000, "ssrc", G_TYPE_UINT, 66, NULL);
  caps = gst_caps_new_simple ("application/x-rtp",
      "payload", G_TYPE_INT, 98, "clock-rate", G_TYPE_INT, 3,
      "seqnum-base", G_TYPE_UINT, 56, "clock-base", G_TYPE_UINT, 57,
      "ssrc", G_TYPE_UINT, 66, NULL);
  fail_unless (gst_pad_set_caps (src1, caps));

  for (i = 0; i < count; i++) {
    inbuf = gst_rtp_buffer_new_allocate (10, 0, 0);
    gst_buffer_set_caps (inbuf, caps);
    gst_rtp_buffer_set_version (inbuf, 2);
    gst_rtp_buffer_set_payload_type (inbuf, 98);
    gst_rtp_buffer_set_ssrc (inbuf, 44);
    gst_rtp_buffer_set_timestamp (inbuf, 200 + i);
    gst_rtp_buffer_set_seq (inbuf, 2000 + i);
    fail_unless (gst_pad_push (src1, inbuf) == GST_FLOW_OK);

    cb (src2, i);

    g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
    g_list_free (buffers);
    buffers = NULL;
  }


  gst_pad_set_active (sink, FALSE);
  gst_pad_set_active (src1, FALSE);
  gst_pad_set_active (src2, FALSE);
  fail_unless (gst_element_set_state (rtpmux,
          GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS);
  gst_check_teardown_pad_by_name (rtpmux, "src");
  gst_object_unref (reqpad1);
  gst_object_unref (reqpad2);
  gst_check_teardown_pad_by_name (rtpmux, "sink_1");
  gst_check_teardown_pad_by_name (rtpmux, "sink_2");
  gst_element_release_request_pad (rtpmux, reqpad1);
  gst_element_release_request_pad (rtpmux, reqpad2);

  gst_caps_unref (caps);
  gst_caps_replace (&src1caps, NULL);
  gst_caps_replace (&src2caps, NULL);
  gst_caps_replace (&sinkcaps, NULL);

  gst_check_teardown_element (rtpmux);
}
static gboolean
gst_rtp_dtmf_src_negotiate (GstBaseSrc * basesrc)
{
  GstCaps *srccaps, *peercaps;
  GstRTPDTMFSrc *dtmfsrc = GST_RTP_DTMF_SRC (basesrc);
  gboolean ret;

  /* fill in the defaults, there properties cannot be negotiated. */
  srccaps = gst_caps_new_simple ("application/x-rtp",
      "media", G_TYPE_STRING, "audio",
      "encoding-name", G_TYPE_STRING, "TELEPHONE-EVENT", NULL);

  /* the peer caps can override some of the defaults */
  peercaps = gst_pad_peer_query_caps (GST_BASE_SRC_PAD (basesrc), NULL);
  if (peercaps == NULL) {
    /* no peer caps, just add the other properties */
    gst_caps_set_simple (srccaps,
        "payload", G_TYPE_INT, dtmfsrc->pt,
        "ssrc", G_TYPE_UINT, dtmfsrc->current_ssrc,
        "timestamp-offset", G_TYPE_UINT, dtmfsrc->ts_base,
        "clock-rate", G_TYPE_INT, dtmfsrc->clock_rate,
        "seqnum-offset", G_TYPE_UINT, dtmfsrc->seqnum_base, NULL);

    GST_DEBUG_OBJECT (dtmfsrc, "no peer caps: %" GST_PTR_FORMAT, srccaps);
  } else {
    GstCaps *temp;
    GstStructure *s;
    const GValue *value;
    gint pt;
    gint clock_rate;

    /* peer provides caps we can use to fixate, intersect. This always returns a
     * writable caps. */
    temp = gst_caps_intersect (srccaps, peercaps);
    gst_caps_unref (srccaps);
    gst_caps_unref (peercaps);

    if (!temp) {
      GST_DEBUG_OBJECT (dtmfsrc, "Could not get intersection with peer caps");
      return FALSE;
    }

    if (gst_caps_is_empty (temp)) {
      GST_DEBUG_OBJECT (dtmfsrc, "Intersection with peer caps is empty");
      gst_caps_unref (temp);
      return FALSE;
    }

    /* now fixate, start by taking the first caps */
    temp = gst_caps_truncate (temp);
    temp = gst_caps_make_writable (temp);
    srccaps = temp;

    /* get first structure */
    s = gst_caps_get_structure (srccaps, 0);

    if (gst_structure_get_int (s, "payload", &pt)) {
      /* use peer pt */
      dtmfsrc->pt = pt;
      GST_LOG_OBJECT (dtmfsrc, "using peer pt %d", pt);
    } else {
      if (gst_structure_has_field (s, "payload")) {
        /* can only fixate if there is a field */
        gst_structure_fixate_field_nearest_int (s, "payload", dtmfsrc->pt);
        gst_structure_get_int (s, "payload", &pt);
        GST_LOG_OBJECT (dtmfsrc, "using peer pt %d", pt);
      } else {
        /* no pt field, use the internal pt */
        pt = dtmfsrc->pt;
        gst_structure_set (s, "payload", G_TYPE_INT, pt, NULL);
        GST_LOG_OBJECT (dtmfsrc, "using internal pt %d", pt);
      }
    }

    if (gst_structure_get_int (s, "clock-rate", &clock_rate)) {
      dtmfsrc->clock_rate = clock_rate;
      GST_LOG_OBJECT (dtmfsrc, "using clock-rate from caps %d",
          dtmfsrc->clock_rate);
    } else {
      GST_LOG_OBJECT (dtmfsrc, "using existing clock-rate %d",
          dtmfsrc->clock_rate);
    }
    gst_structure_set (s, "clock-rate", G_TYPE_INT, dtmfsrc->clock_rate, NULL);


    if (gst_structure_has_field_typed (s, "ssrc", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "ssrc");
      dtmfsrc->current_ssrc = g_value_get_uint (value);
      GST_LOG_OBJECT (dtmfsrc, "using peer ssrc %08x", dtmfsrc->current_ssrc);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "ssrc", G_TYPE_UINT, dtmfsrc->current_ssrc, NULL);
      GST_LOG_OBJECT (dtmfsrc, "using internal ssrc %08x",
          dtmfsrc->current_ssrc);
    }

    if (gst_structure_has_field_typed (s, "timestamp-offset", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "timestamp-offset");
      dtmfsrc->ts_base = g_value_get_uint (value);
      GST_LOG_OBJECT (dtmfsrc, "using peer timestamp-offset %u",
          dtmfsrc->ts_base);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "timestamp-offset", G_TYPE_UINT, dtmfsrc->ts_base,
          NULL);
      GST_LOG_OBJECT (dtmfsrc, "using internal timestamp-offset %u",
          dtmfsrc->ts_base);
    }
    if (gst_structure_has_field_typed (s, "seqnum-offset", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "seqnum-offset");
      dtmfsrc->seqnum_base = g_value_get_uint (value);
      GST_LOG_OBJECT (dtmfsrc, "using peer seqnum-offset %u",
          dtmfsrc->seqnum_base);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "seqnum-offset", G_TYPE_UINT, dtmfsrc->seqnum_base,
          NULL);
      GST_LOG_OBJECT (dtmfsrc, "using internal seqnum-offset %u",
          dtmfsrc->seqnum_base);
    }

    if (gst_structure_has_field_typed (s, "ptime", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "ptime");
      dtmfsrc->ptime = g_value_get_uint (value);
      GST_LOG_OBJECT (dtmfsrc, "using peer ptime %u", dtmfsrc->ptime);
    } else if (gst_structure_has_field_typed (s, "maxptime", G_TYPE_UINT)) {
      value = gst_structure_get_value (s, "maxptime");
      dtmfsrc->ptime = g_value_get_uint (value);
      GST_LOG_OBJECT (dtmfsrc, "using peer maxptime as ptime %u",
          dtmfsrc->ptime);
    } else {
      /* FIXME, fixate_nearest_uint would be even better */
      gst_structure_set (s, "ptime", G_TYPE_UINT, dtmfsrc->ptime, NULL);
      GST_LOG_OBJECT (dtmfsrc, "using internal ptime %u", dtmfsrc->ptime);
    }


    GST_DEBUG_OBJECT (dtmfsrc, "with peer caps: %" GST_PTR_FORMAT, srccaps);
  }

  ret = gst_pad_set_caps (GST_BASE_SRC_PAD (basesrc), srccaps);
  gst_caps_unref (srccaps);

  dtmfsrc->dirty = FALSE;

  return ret;

}
Beispiel #26
0
static void
gst_h263_parse_set_src_caps (GstH263Parse * h263parse,
    const H263Params * params)
{
  GstStructure *st;
  GstCaps *caps, *sink_caps;
  gint fr_num, fr_denom;

  g_assert (h263parse->state == PASSTHROUGH || h263parse->state == GOT_HEADER);

  caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (h263parse));
  if (caps) {
    caps = gst_caps_copy (caps);
  } else {
    caps = gst_caps_new_simple ("video/x-h263",
        "variant", G_TYPE_STRING, "itu", NULL);
  }
  gst_caps_set_simple (caps, "parsed", G_TYPE_BOOLEAN, TRUE, NULL);

  sink_caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (h263parse));
  if (sink_caps && (st = gst_caps_get_structure (sink_caps, 0)) &&
      gst_structure_get_fraction (st, "framerate", &fr_num, &fr_denom)) {
    /* Got it in caps - nothing more to do */
    GST_DEBUG_OBJECT (h263parse, "sink caps override framerate from headers");
  } else {
    /* Caps didn't have the framerate - get it from params */
    gst_h263_parse_get_framerate (params, &fr_num, &fr_denom);
  }
  gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION, fr_num, fr_denom,
      NULL);

  if (params->width && params->height)
    gst_caps_set_simple (caps, "width", G_TYPE_INT, params->width,
        "height", G_TYPE_INT, params->height, NULL);

  if (h263parse->state == GOT_HEADER) {
    gst_caps_set_simple (caps,
        "annex-d", G_TYPE_BOOLEAN, (params->features & H263_OPTION_UMV_MODE),
        "annex-e", G_TYPE_BOOLEAN, (params->features & H263_OPTION_SAC_MODE),
        "annex-f", G_TYPE_BOOLEAN, (params->features & H263_OPTION_AP_MODE),
        "annex-g", G_TYPE_BOOLEAN, (params->features & H263_OPTION_PB_MODE),
        "annex-i", G_TYPE_BOOLEAN, (params->features & H263_OPTION_AIC_MODE),
        "annex-j", G_TYPE_BOOLEAN, (params->features & H263_OPTION_DF_MODE),
        "annex-k", G_TYPE_BOOLEAN, (params->features & H263_OPTION_SS_MODE),
        "annex-m", G_TYPE_BOOLEAN, (params->type == PICTURE_IMPROVED_PB),
        "annex-n", G_TYPE_BOOLEAN, (params->features & H263_OPTION_RPS_MODE),
        "annex-q", G_TYPE_BOOLEAN, (params->features & H263_OPTION_RRU_MODE),
        "annex-r", G_TYPE_BOOLEAN, (params->features & H263_OPTION_ISD_MODE),
        "annex-s", G_TYPE_BOOLEAN, (params->features & H263_OPTION_AIV_MODE),
        "annex-t", G_TYPE_BOOLEAN, (params->features & H263_OPTION_MQ_MODE),
        "annex-u", G_TYPE_BOOLEAN, (params->features & H263_OPTION_ERPS_MODE),
        "annex-v", G_TYPE_BOOLEAN, (params->features & H263_OPTION_DPS_MODE),
        NULL);

    h263parse->profile = gst_h263_parse_get_profile (params);
    if (h263parse->profile != -1)
      gst_caps_set_simple (caps, "profile", G_TYPE_UINT, h263parse->profile,
          NULL);

    h263parse->level = gst_h263_parse_get_level (params, h263parse->profile,
        h263parse->bitrate, fr_num, fr_denom);
    if (h263parse->level != -1)
      gst_caps_set_simple (caps, "level", G_TYPE_UINT, h263parse->level, NULL);
  }

  gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (GST_BASE_PARSE (h263parse)), caps);
  gst_caps_unref (caps);
}
Beispiel #27
0
static gboolean
gst_jasper_enc_set_src_caps (GstJasperEnc * enc)
{
    GstCaps *caps;
    guint32 fourcc;
    gboolean ret;
    GstCaps *peercaps;

    peercaps = gst_pad_peer_get_caps (enc->srcpad);
    if (peercaps) {
        guint i, n;

        n = gst_caps_get_size (peercaps);
        for (i = 0; i < n; i++) {
            GstStructure *s = gst_caps_get_structure (peercaps, i);
            const gchar *name = gst_structure_get_name (s);

            if (!strcmp (name, "image/x-j2c")) {
                enc->mode = GST_JP2ENC_MODE_J2C;
                break;
            } else if (!strcmp (name, "image/x-jpc")) {
                enc->mode = GST_JP2ENC_MODE_JPC;
                break;
            } else if (!strcmp (name, "image/jp2")) {
                enc->mode = GST_JP2ENC_MODE_JP2;
                break;
            }
        }
        gst_caps_unref (peercaps);
    }

    /* enumerated colourspace */
    if (gst_video_format_is_rgb (enc->format)) {
        fourcc = GST_MAKE_FOURCC ('s', 'R', 'G', 'B');
    } else {
        fourcc = GST_MAKE_FOURCC ('s', 'Y', 'U', 'V');
    }

    switch (enc->mode) {
    case GST_JP2ENC_MODE_J2C:
        caps =
            gst_caps_new_simple ("image/x-j2c", "width", G_TYPE_INT, enc->width,
                                 "height", G_TYPE_INT, enc->height, "fourcc", GST_TYPE_FOURCC, fourcc,
                                 NULL);
        break;
    case GST_JP2ENC_MODE_JPC:
        caps =
            gst_caps_new_simple ("image/x-jpc", "width", G_TYPE_INT, enc->width,
                                 "height", G_TYPE_INT, enc->height, "fourcc", GST_TYPE_FOURCC, fourcc,
                                 NULL);
        break;
    case GST_JP2ENC_MODE_JP2:
        caps = gst_caps_new_simple ("image/jp2", "width", G_TYPE_INT, enc->width,
                                    "height", G_TYPE_INT, enc->height, "fourcc", GST_TYPE_FOURCC, fourcc,
                                    NULL);
        break;
    default:
        g_assert_not_reached ();
    }


    if (enc->fps_den > 0)
        gst_caps_set_simple (caps,
                             "framerate", GST_TYPE_FRACTION, enc->fps_num, enc->fps_den, NULL);
    if (enc->par_den > 0)
        gst_caps_set_simple (caps,
                             "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_num, enc->par_den,
                             NULL);

    ret = gst_pad_set_caps (enc->srcpad, caps);
    gst_caps_unref (caps);

    return ret;
}
static GstFlowReturn
gst_gdk_pixbuf_flush (GstGdkPixbuf * filter)
{
    GstBuffer *outbuf;
    GdkPixbuf *pixbuf;
    int y;
    guint8 *out_pix;
    guint8 *in_pix;
    int in_rowstride;
    GstFlowReturn ret;
    GstCaps *caps = NULL;
    gint n_channels;

    pixbuf = gdk_pixbuf_loader_get_pixbuf (filter->pixbuf_loader);
    if (pixbuf == NULL)
        goto no_pixbuf;

    if (filter->image_size == 0) {
        filter->width = gdk_pixbuf_get_width (pixbuf);
        filter->height = gdk_pixbuf_get_height (pixbuf);
        filter->rowstride = gdk_pixbuf_get_rowstride (pixbuf);
        filter->image_size = filter->rowstride * filter->height;

        n_channels = gdk_pixbuf_get_n_channels (pixbuf);
        switch (n_channels) {
        case 3:
            caps = gst_caps_from_string (GST_VIDEO_CAPS_RGB);
            break;
        case 4:
            caps = gst_caps_from_string (GST_VIDEO_CAPS_RGBA);
            break;
        default:
            goto channels_not_supported;
        }

        gst_caps_set_simple (caps,
                             "width", G_TYPE_INT, filter->width,
                             "height", G_TYPE_INT, filter->height,
                             "framerate", GST_TYPE_FRACTION, filter->framerate_numerator,
                             filter->framerate_denominator, NULL);

        GST_DEBUG ("Set size to %dx%d", filter->width, filter->height);
        gst_pad_set_caps (filter->srcpad, caps);
        gst_caps_unref (caps);
    }

    ret = gst_pad_alloc_buffer_and_set_caps (filter->srcpad,
            GST_BUFFER_OFFSET_NONE,
            filter->image_size, GST_PAD_CAPS (filter->srcpad), &outbuf);

    if (ret != GST_FLOW_OK)
        goto no_buffer;

    GST_BUFFER_TIMESTAMP (outbuf) = filter->last_timestamp;
    GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;

    in_pix = gdk_pixbuf_get_pixels (pixbuf);
    in_rowstride = gdk_pixbuf_get_rowstride (pixbuf);
    out_pix = GST_BUFFER_DATA (outbuf);

    /* FIXME, last line might not have rowstride pixels */
    for (y = 0; y < filter->height; y++) {
        memcpy (out_pix, in_pix, filter->rowstride);
        in_pix += in_rowstride;
        out_pix += filter->rowstride;
    }

    GST_DEBUG ("pushing... %d bytes", GST_BUFFER_SIZE (outbuf));
    ret = gst_pad_push (filter->srcpad, outbuf);

    if (ret != GST_FLOW_OK)
        GST_DEBUG_OBJECT (filter, "flow: %s", gst_flow_get_name (ret));

    return ret;

    /* ERRORS */
no_pixbuf:
    {
        GST_ELEMENT_ERROR (filter, STREAM, DECODE, (NULL), ("error geting pixbuf"));
        return GST_FLOW_ERROR;
    }
channels_not_supported:
    {
        GST_ELEMENT_ERROR (filter, STREAM, DECODE, (NULL),
                           ("%d channels not supported", n_channels));
        return GST_FLOW_ERROR;
    }
no_buffer:
    {
        GST_DEBUG ("Failed to create outbuffer - %s", gst_flow_get_name (ret));
        return ret;
    }
}
Beispiel #29
0
static GstFlowReturn
gst_flxdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstCaps *caps;
  guint avail;
  GstFlowReturn res = GST_FLOW_OK;

  GstFlxDec *flxdec;
  FlxHeader *flxh;

  g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
  flxdec = (GstFlxDec *) parent;
  g_return_val_if_fail (flxdec != NULL, GST_FLOW_ERROR);

  gst_adapter_push (flxdec->adapter, buf);
  avail = gst_adapter_available (flxdec->adapter);

  if (flxdec->state == GST_FLXDEC_READ_HEADER) {
    if (avail >= FlxHeaderSize) {
      const guint8 *data = gst_adapter_map (flxdec->adapter, FlxHeaderSize);
      GstCaps *templ;

      memcpy ((gchar *) & flxdec->hdr, data, FlxHeaderSize);
      FLX_HDR_FIX_ENDIANNESS (&(flxdec->hdr));
      gst_adapter_unmap (flxdec->adapter);
      gst_adapter_flush (flxdec->adapter, FlxHeaderSize);

      flxh = &flxdec->hdr;

      /* check header */
      if (flxh->type != FLX_MAGICHDR_FLI &&
          flxh->type != FLX_MAGICHDR_FLC && flxh->type != FLX_MAGICHDR_FLX)
        goto wrong_type;

      GST_LOG ("size      :  %d", flxh->size);
      GST_LOG ("frames    :  %d", flxh->frames);
      GST_LOG ("width     :  %d", flxh->width);
      GST_LOG ("height    :  %d", flxh->height);
      GST_LOG ("depth     :  %d", flxh->depth);
      GST_LOG ("speed     :  %d", flxh->speed);

      flxdec->next_time = 0;

      if (flxh->type == FLX_MAGICHDR_FLI) {
        flxdec->frame_time = JIFFIE * flxh->speed;
      } else if (flxh->speed == 0) {
        flxdec->frame_time = GST_SECOND / 70;
      } else {
        flxdec->frame_time = flxh->speed * GST_MSECOND;
      }

      flxdec->duration = flxh->frames * flxdec->frame_time;
      GST_LOG ("duration   :  %" GST_TIME_FORMAT,
          GST_TIME_ARGS (flxdec->duration));

      templ = gst_pad_get_pad_template_caps (flxdec->srcpad);
      caps = gst_caps_copy (templ);
      gst_caps_unref (templ);
      gst_caps_set_simple (caps,
          "width", G_TYPE_INT, flxh->width,
          "height", G_TYPE_INT, flxh->height,
          "framerate", GST_TYPE_FRACTION, (gint) GST_MSECOND,
          (gint) flxdec->frame_time / 1000, NULL);

      gst_pad_set_caps (flxdec->srcpad, caps);
      gst_caps_unref (caps);

      if (flxh->depth <= 8)
        flxdec->converter =
            flx_colorspace_converter_new (flxh->width, flxh->height);

      if (flxh->type == FLX_MAGICHDR_FLC || flxh->type == FLX_MAGICHDR_FLX) {
        GST_LOG ("(FLC) aspect_dx :  %d", flxh->aspect_dx);
        GST_LOG ("(FLC) aspect_dy :  %d", flxh->aspect_dy);
        GST_LOG ("(FLC) oframe1   :  0x%08x", flxh->oframe1);
        GST_LOG ("(FLC) oframe2   :  0x%08x", flxh->oframe2);
      }

      flxdec->size = (flxh->width * flxh->height);

      /* create delta and output frame */
      flxdec->frame_data = g_malloc (flxdec->size);
      flxdec->delta_data = g_malloc (flxdec->size);

      flxdec->state = GST_FLXDEC_PLAYING;
    }
  } else if (flxdec->state == GST_FLXDEC_PLAYING) {
    GstBuffer *out;

    /* while we have enough data in the adapter */
    while (avail >= FlxFrameChunkSize && res == GST_FLOW_OK) {
      FlxFrameChunk flxfh;
      guchar *chunk;
      const guint8 *data;
      GstMapInfo map;

      chunk = NULL;
      data = gst_adapter_map (flxdec->adapter, FlxFrameChunkSize);
      memcpy (&flxfh, data, FlxFrameChunkSize);
      FLX_FRAME_CHUNK_FIX_ENDIANNESS (&flxfh);
      gst_adapter_unmap (flxdec->adapter);

      switch (flxfh.id) {
        case FLX_FRAME_TYPE:
          /* check if we have the complete frame */
          if (avail < flxfh.size)
            goto need_more_data;

          /* flush header */
          gst_adapter_flush (flxdec->adapter, FlxFrameChunkSize);

          chunk = gst_adapter_take (flxdec->adapter,
              flxfh.size - FlxFrameChunkSize);
          FLX_FRAME_TYPE_FIX_ENDIANNESS ((FlxFrameType *) chunk);
          if (((FlxFrameType *) chunk)->chunks == 0)
            break;

          /* create 32 bits output frame */
//          res = gst_pad_alloc_buffer_and_set_caps (flxdec->srcpad,
//              GST_BUFFER_OFFSET_NONE,
//              flxdec->size * 4, GST_PAD_CAPS (flxdec->srcpad), &out);
//          if (res != GST_FLOW_OK)
//            break;

          out = gst_buffer_new_and_alloc (flxdec->size * 4);

          /* decode chunks */
          flx_decode_chunks (flxdec,
              ((FlxFrameType *) chunk)->chunks,
              chunk + FlxFrameTypeSize, flxdec->frame_data);

          /* save copy of the current frame for possible delta. */
          memcpy (flxdec->delta_data, flxdec->frame_data, flxdec->size);

          gst_buffer_map (out, &map, GST_MAP_WRITE);
          /* convert current frame. */
          flx_colorspace_convert (flxdec->converter, flxdec->frame_data,
              map.data);
          gst_buffer_unmap (out, &map);

          GST_BUFFER_TIMESTAMP (out) = flxdec->next_time;
          flxdec->next_time += flxdec->frame_time;

          res = gst_pad_push (flxdec->srcpad, out);
          break;
        default:
          /* check if we have the complete frame */
          if (avail < flxfh.size)
            goto need_more_data;

          gst_adapter_flush (flxdec->adapter, flxfh.size);
          break;
      }

      if (chunk)
        g_free (chunk);

      avail = gst_adapter_available (flxdec->adapter);
    }
  }
need_more_data:
  return res;

  /* ERRORS */
wrong_type:
  {
    GST_ELEMENT_ERROR (flxdec, STREAM, WRONG_TYPE, (NULL),
        ("not a flx file (type %x)", flxh->type));
    gst_object_unref (flxdec);
    return GST_FLOW_ERROR;
  }
}
Beispiel #30
0
static GstFlowReturn
gst_real_audio_demux_parse_header (GstRealAudioDemux * demux)
{
  const guint8 *data;
  gchar *codec_name = NULL;
  GstCaps *caps = NULL;
  guint avail;

  g_assert (demux->ra_version == 4 || demux->ra_version == 3);

  avail = gst_adapter_available (demux->adapter);
  if (avail < 16)
    return GST_FLOW_OK;

  if (!gst_real_audio_demux_get_data_offset_from_header (demux))
    return GST_FLOW_ERROR;      /* shouldn't happen */

  GST_DEBUG_OBJECT (demux, "data_offset  = %u", demux->data_offset);

  if (avail + 6 < demux->data_offset) {
    GST_DEBUG_OBJECT (demux, "Need %u bytes, but only %u available now",
        demux->data_offset - 6, avail);
    return GST_FLOW_OK;
  }

  data = gst_adapter_peek (demux->adapter, demux->data_offset - 6);
  g_assert (data);

  switch (demux->ra_version) {
    case 3:
      demux->fourcc = GST_RM_AUD_14_4;
      demux->packet_size = 20;
      demux->sample_rate = 8000;
      demux->channels = 1;
      demux->sample_width = 16;
      demux->flavour = 1;
      demux->leaf_size = 0;
      demux->height = 0;
      break;
    case 4:
      demux->flavour = GST_READ_UINT16_BE (data + 16);
      /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */
      demux->leaf_size = GST_READ_UINT16_BE (data + 38);
      demux->height = GST_READ_UINT16_BE (data + 34);
      demux->packet_size = GST_READ_UINT32_BE (data + 18);
      demux->sample_rate = GST_READ_UINT16_BE (data + 42);
      demux->sample_width = GST_READ_UINT16_BE (data + 46);
      demux->channels = GST_READ_UINT16_BE (data + 48);
      demux->fourcc = GST_READ_UINT32_LE (data + 56);
      demux->pending_tags = gst_rm_utils_read_tags (data + 63,
          demux->data_offset - 63, gst_rm_utils_read_string8);
      break;
    default:
      g_assert_not_reached ();
#if 0
    case 5:
      demux->flavour = GST_READ_UINT16_BE (data + 16);
      /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */
      demux->leaf_size = GST_READ_UINT16_BE (data + 38);
      demux->height = GST_READ_UINT16_BE (data + 34);

      demux->sample_rate = GST_READ_UINT16_BE (data + 48);
      demux->sample_width = GST_READ_UINT16_BE (data + 52);
      demux->n_channels = GST_READ_UINT16_BE (data + 54);
      demux->fourcc = RMDEMUX_FOURCC_GET (data + 60);
      break;
#endif
  }

  GST_INFO_OBJECT (demux, "packet_size  = %u", demux->packet_size);
  GST_INFO_OBJECT (demux, "sample_rate  = %u", demux->sample_rate);
  GST_INFO_OBJECT (demux, "sample_width = %u", demux->sample_width);
  GST_INFO_OBJECT (demux, "channels     = %u", demux->channels);
  GST_INFO_OBJECT (demux, "fourcc       = '%" GST_FOURCC_FORMAT "' (%08X)",
      GST_FOURCC_ARGS (demux->fourcc), demux->fourcc);

  switch (demux->fourcc) {
    case GST_RM_AUD_14_4:
      caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion",
          G_TYPE_INT, 1, NULL);
      demux->byterate_num = 1000;
      demux->byterate_denom = 1;
      break;

    case GST_RM_AUD_28_8:
      /* FIXME: needs descrambling */
      caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion",
          G_TYPE_INT, 2, NULL);
      break;

    case GST_RM_AUD_DNET:
      caps = gst_caps_new_simple ("audio/x-ac3", "rate", G_TYPE_INT,
          demux->sample_rate, NULL);
      if (demux->packet_size == 0 || demux->sample_rate == 0)
        goto broken_file;
      demux->byterate_num = demux->packet_size * demux->sample_rate;
      demux->byterate_denom = 1536;
      break;

      /* Sipro/ACELP.NET Voice Codec (MIME unknown) */
    case GST_RM_AUD_SIPR:
      caps = gst_caps_new_simple ("audio/x-sipro", NULL);
      break;

    default:
      GST_WARNING_OBJECT (demux, "unknown fourcc %08X", demux->fourcc);
      break;
  }

  if (caps == NULL)
    goto unknown_fourcc;

  gst_caps_set_simple (caps,
      "flavor", G_TYPE_INT, demux->flavour,
      "rate", G_TYPE_INT, demux->sample_rate,
      "channels", G_TYPE_INT, demux->channels,
      "width", G_TYPE_INT, demux->sample_width,
      "leaf_size", G_TYPE_INT, demux->leaf_size,
      "packet_size", G_TYPE_INT, demux->packet_size,
      "height", G_TYPE_INT, demux->height, NULL);

  GST_INFO_OBJECT (demux, "Adding source pad, caps %" GST_PTR_FORMAT, caps);
  demux->srcpad = gst_pad_new_from_static_template (&src_template, "src");
  gst_pad_use_fixed_caps (demux->srcpad);
  gst_pad_set_caps (demux->srcpad, caps);
  codec_name = gst_pb_utils_get_codec_description (caps);
  gst_caps_unref (caps);
  gst_pad_set_event_function (demux->srcpad,
      GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_event));
  gst_pad_set_query_function (demux->srcpad,
      GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_query));
  gst_pad_set_active (demux->srcpad, TRUE);
  gst_element_add_pad (GST_ELEMENT (demux), demux->srcpad);

  if (demux->byterate_num > 0 && demux->byterate_denom > 0) {
    GstFormat bformat = GST_FORMAT_BYTES;
    gint64 size_bytes = 0;

    GST_INFO_OBJECT (demux, "byte rate = %u/%u = %u bytes/sec",
        demux->byterate_num, demux->byterate_denom,
        demux->byterate_num / demux->byterate_denom);

    if (gst_pad_query_peer_duration (demux->sinkpad, &bformat, &size_bytes)) {
      demux->duration =
          gst_real_demux_get_timestamp_from_offset (demux, size_bytes);
      demux->upstream_size = size_bytes;
      GST_INFO_OBJECT (demux, "upstream_size = %" G_GUINT64_FORMAT,
          demux->upstream_size);
      GST_INFO_OBJECT (demux, "duration      = %" GST_TIME_FORMAT,
          GST_TIME_ARGS (demux->duration));
    }
  }

  demux->need_newsegment = TRUE;

  if (codec_name) {
    if (demux->pending_tags == NULL)
      demux->pending_tags = gst_tag_list_new ();

    gst_tag_list_add (demux->pending_tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_AUDIO_CODEC, codec_name, NULL);
    g_free (codec_name);
  }

  gst_adapter_flush (demux->adapter, demux->data_offset - 6);

  demux->state = REAL_AUDIO_DEMUX_STATE_DATA;
  demux->need_newsegment = TRUE;

  return GST_FLOW_OK;

/* ERRORS */
unknown_fourcc:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL),
        ("Unknown fourcc '%" GST_FOURCC_FORMAT "'",
            GST_FOURCC_ARGS (demux->fourcc)));
    return GST_FLOW_ERROR;
  }
broken_file:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL),
        ("Broken file - invalid sample_rate or other header value"));
    return GST_FLOW_ERROR;
  }

}