bool GStreamerFormatHelper::HaveElementsToProcessCaps(GstCaps* aCaps) { NS_ASSERTION(sLoadOK, "GStreamer library not linked"); GList* factories = GetFactories(); /* here aCaps contains [containerCaps, [codecCaps1, [codecCaps2, ...]]] so process * caps structures individually as we want one element for _each_ * structure */ for (unsigned int i = 0; i < gst_caps_get_size(aCaps); i++) { GstStructure* s = gst_caps_get_structure(aCaps, i); GstCaps* caps = gst_caps_new_full(gst_structure_copy(s), nullptr); bool found = false; for (GList *elem = factories; elem; elem = elem->next) { if (SupportsCaps(GST_ELEMENT_FACTORY_CAST(elem->data), caps)) { found = true; break; } } gst_caps_unref(caps); if (!found) { return false; } } return true; }
static gboolean gst_avdtp_sink_update_caps(GstAvdtpSink *self) { sbc_capabilities_t *sbc; mpeg_capabilities_t *mpeg; GstStructure *sbc_structure; GstStructure *mpeg_structure; gchar *tmp; GST_LOG_OBJECT(self, "updating device caps"); sbc = (void *) gst_avdtp_find_caps(self, BT_A2DP_SBC_SINK); mpeg = (void *) gst_avdtp_find_caps(self, BT_A2DP_MPEG12_SINK); sbc_structure = gst_avdtp_sink_parse_sbc_caps(self, sbc); mpeg_structure = gst_avdtp_sink_parse_mpeg_caps(self, mpeg); if (self->dev_caps != NULL) gst_caps_unref(self->dev_caps); self->dev_caps = gst_caps_new_full(sbc_structure, NULL); if (mpeg_structure != NULL) gst_caps_append_structure(self->dev_caps, mpeg_structure); tmp = gst_caps_to_string(self->dev_caps); GST_DEBUG_OBJECT(self, "Device capabilities: %s", tmp); g_free(tmp); return TRUE; }
/** * gst_dvbsub_overlay_intersect_by_feature: * * Creates a new #GstCaps based on the following filtering rule. * * For each individual caps contained in given caps, if the * caps uses the given caps feature, keep a version of the caps * with the feature and an another one without. Otherwise, intersect * the caps with the given filter. * * Returns: the new #GstCaps */ static GstCaps * gst_dvbsub_overlay_intersect_by_feature (GstCaps * caps, const gchar * feature, GstCaps * filter) { int i, caps_size; GstCaps *new_caps; new_caps = gst_caps_new_empty (); caps_size = gst_caps_get_size (caps); for (i = 0; i < caps_size; i++) { GstStructure *caps_structure = gst_caps_get_structure (caps, i); GstCapsFeatures *caps_features = gst_caps_features_copy (gst_caps_get_features (caps, i)); GstCaps *filtered_caps; GstCaps *simple_caps = gst_caps_new_full (gst_structure_copy (caps_structure), NULL); gst_caps_set_features (simple_caps, 0, caps_features); if (gst_caps_features_contains (caps_features, feature)) { gst_caps_append (new_caps, gst_caps_copy (simple_caps)); gst_caps_features_remove (caps_features, feature); filtered_caps = gst_caps_ref (simple_caps); } else { filtered_caps = gst_caps_intersect_full (simple_caps, filter, GST_CAPS_INTERSECT_FIRST); } gst_caps_unref (simple_caps); gst_caps_append (new_caps, filtered_caps); } return new_caps; }
GstCaps * fs_codec_to_gst_caps (const FsCodec *codec) { GstCaps *caps; GstStructure *structure; GList *item; if (codec == NULL) return NULL; structure = gst_structure_new ("application/x-rtp", NULL); if (codec->encoding_name) { gchar *encoding_name = g_ascii_strup (codec->encoding_name, -1); if (!g_ascii_strcasecmp (encoding_name, "H263-N800")) { g_free (encoding_name); encoding_name = g_strdup ("H263-1998"); } gst_structure_set (structure, "encoding-name", G_TYPE_STRING, encoding_name, NULL); g_free (encoding_name); } if (codec->clock_rate) gst_structure_set (structure, "clock-rate", G_TYPE_INT, codec->clock_rate, NULL); if (fs_media_type_to_string (codec->media_type)) gst_structure_set (structure, "media", G_TYPE_STRING, fs_media_type_to_string (codec->media_type), NULL); if (codec->id >= 0 && codec->id < 128) gst_structure_set (structure, "payload", G_TYPE_INT, codec->id, NULL); if (codec->channels) gst_structure_set (structure, "channels", G_TYPE_INT, codec->channels, NULL); for (item = codec->optional_params; item; item = g_list_next (item)) { FsCodecParameter *param = item->data; gchar *lower_name = g_ascii_strdown (param->name, -1); gst_structure_set (structure, lower_name, G_TYPE_STRING, param->value, NULL); g_free (lower_name); } caps = gst_caps_new_full (structure, NULL); return caps; }
static gboolean structure_can_intersect (const GstStructure *st1, const GstStructure *st2) { /* Since there is no API to intersect GstStructures, we cheat (thanks * for the idea, tpm!) and make caps from the structuresa */ GstCaps *caps1, *caps2; gboolean ret; caps1 = gst_caps_new_full (gst_structure_copy (st1), NULL); caps2 = gst_caps_new_full (gst_structure_copy (st2), NULL); ret = gst_caps_can_intersect (caps1, caps2); gst_caps_unref (caps1); gst_caps_unref (caps2); return ret; }
int main (int argc, char *argv[]) { xmlDocPtr doc; xmlNodePtr parent; GstCaps *caps; gst_init (&argc, &argv); doc = xmlNewDoc ((const xmlChar *) "1.0"); doc->xmlRootNode = xmlNewDocNode (doc, NULL, (const xmlChar *) "Capabilities", NULL); /* for (i = 0; i<100000; i++) { caps = gst_caps_intersect (gst_static_caps_get (rawcaps3), GST_CAPS_GET (rawcaps4)); gst_caps_unref (caps); } */ caps = gst_caps_intersect (gst_static_caps_get (&sinkcaps), gst_static_caps_get (&mp1parsecaps)); parent = xmlNewChild (doc->xmlRootNode, NULL, (const xmlChar *) "Capabilities1", NULL); gst_caps_save_thyself (caps, parent); caps = gst_caps_intersect (gst_static_caps_get (&rawcaps), gst_static_caps_get (&rawcaps2)); parent = xmlNewChild (doc->xmlRootNode, NULL, (const xmlChar *) "Capabilities2", NULL); gst_caps_save_thyself (caps, parent); caps = gst_caps_intersect (gst_static_caps_get (&rawcaps3), gst_static_caps_get (&rawcaps4)); parent = xmlNewChild (doc->xmlRootNode, NULL, (const xmlChar *) "Capabilities3", NULL); gst_caps_save_thyself (caps, parent); caps = gst_caps_intersect (gst_static_caps_get (&rawcaps3), gst_static_caps_get (&rawcaps5)); parent = xmlNewChild (doc->xmlRootNode, NULL, (const xmlChar *) "Capabilities4", NULL); gst_caps_save_thyself (caps, parent); caps = gst_caps_intersect (gst_static_caps_get (&rawcaps6), gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (gst_static_caps_get (&rawcaps6), 0)), NULL)); parent = xmlNewChild (doc->xmlRootNode, NULL, (const xmlChar *) "Capabilities5", NULL); gst_caps_save_thyself (caps, parent); caps = gst_caps_intersect (gst_static_caps_get (&rawcaps7), gst_static_caps_get (&rawcaps8)); g_print ("intersection: %s\n", gst_caps_to_string (caps)); xmlDocDump (stdout, doc); return 0; }
static gboolean brasero_transcode_create_pipeline (BraseroTranscode *transcode, GError **error) { gchar *uri; gboolean keep_dts; GstElement *decode; GstElement *source; GstBus *bus = NULL; GstCaps *filtercaps; GValue *value = NULL; GstElement *pipeline; GstElement *sink = NULL; BraseroJobAction action; GstElement *filter = NULL; GstElement *volume = NULL; GstElement *convert = NULL; BraseroTrack *track = NULL; GstElement *resample = NULL; BraseroTranscodePrivate *priv; priv = BRASERO_TRANSCODE_PRIVATE (transcode); BRASERO_JOB_LOG (transcode, "Creating new pipeline"); priv->set_active_state = 0; /* free the possible current pipeline and create a new one */ if (priv->pipeline) { gst_element_set_state (priv->pipeline, GST_STATE_NULL); gst_object_unref (G_OBJECT (priv->pipeline)); priv->link = NULL; priv->sink = NULL; priv->source = NULL; priv->convert = NULL; priv->pipeline = NULL; } /* create three types of pipeline according to the needs: (possibly adding grvolume) * - filesrc ! decodebin ! audioconvert ! fakesink (find size) and filesrc!mp3parse!fakesink for mp3s * - filesrc ! decodebin ! audioresample ! audioconvert ! audio/x-raw,format=S16BE,rate=44100 ! filesink * - filesrc ! decodebin ! audioresample ! audioconvert ! audio/x-raw,format=S16BE,rate=44100 ! fdsink */ pipeline = gst_pipeline_new (NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); gst_bus_add_watch (bus, (GstBusFunc) brasero_transcode_bus_messages, transcode); gst_object_unref (bus); /* source */ brasero_job_get_current_track (BRASERO_JOB (transcode), &track); uri = brasero_track_stream_get_source (BRASERO_TRACK_STREAM (track), TRUE); source = gst_element_make_from_uri (GST_URI_SRC, uri, NULL, NULL); g_free (uri); if (source == NULL) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, /* Translators: %s is the name of the object (as in * GObject) from the Gstreamer library that could * not be created */ _("%s element could not be created"), "\"Source\""); goto error; } gst_bin_add (GST_BIN (pipeline), source); g_object_set (source, "typefind", FALSE, NULL); /* sink */ brasero_job_get_action (BRASERO_JOB (transcode), &action); switch (action) { case BRASERO_JOB_ACTION_SIZE: if (priv->mp3_size_pipeline) return brasero_transcode_create_pipeline_size_mp3 (transcode, pipeline, source, error); sink = gst_element_factory_make ("fakesink", NULL); break; case BRASERO_JOB_ACTION_IMAGE: volume = brasero_transcode_create_volume (transcode, track); if (brasero_job_get_fd_out (BRASERO_JOB (transcode), NULL) != BRASERO_BURN_OK) { gchar *output; brasero_job_get_image_output (BRASERO_JOB (transcode), &output, NULL); sink = gst_element_factory_make ("filesink", NULL); g_object_set (sink, "location", output, NULL); g_free (output); } else { int fd; brasero_job_get_fd_out (BRASERO_JOB (transcode), &fd); sink = gst_element_factory_make ("fdsink", NULL); g_object_set (sink, "fd", fd, NULL); } break; default: goto error; } if (!sink) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Sink\""); goto error; } gst_bin_add (GST_BIN (pipeline), sink); g_object_set (sink, "sync", FALSE, NULL); brasero_job_tag_lookup (BRASERO_JOB (transcode), BRASERO_SESSION_STREAM_AUDIO_FORMAT, &value); if (value) keep_dts = (g_value_get_int (value) & BRASERO_AUDIO_FORMAT_DTS) != 0; else keep_dts = FALSE; if (keep_dts && action == BRASERO_JOB_ACTION_IMAGE && (brasero_track_stream_get_format (BRASERO_TRACK_STREAM (track)) & BRASERO_AUDIO_FORMAT_DTS) != 0) { GstElement *wavparse; GstPad *sinkpad; BRASERO_JOB_LOG (transcode, "DTS wav pipeline"); /* FIXME: volume normalization won't work here. We'd need to * reencode it afterwards otherwise. */ /* This is a special case. This is DTS wav. So we only decode wav. */ wavparse = gst_element_factory_make ("wavparse", NULL); if (wavparse == NULL) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Wavparse\""); goto error; } gst_bin_add (GST_BIN (pipeline), wavparse); if (!gst_element_link_many (source, wavparse, sink, NULL)) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("Impossible to link plugin pads")); goto error; } /* This is an ugly workaround for the lack of accuracy with * gstreamer. Yet this is unfortunately a necessary evil. */ /* FIXME: this does not look like it makes sense... (tpm) */ priv->pos = 0; priv->size = 0; sinkpad = gst_element_get_static_pad (sink, "sink"); priv->probe = gst_pad_add_probe (sinkpad, GST_PAD_PROBE_TYPE_BUFFER, brasero_transcode_buffer_handler, transcode, NULL); gst_object_unref (sinkpad); priv->link = NULL; priv->sink = sink; priv->decode = NULL; priv->source = source; priv->convert = NULL; priv->pipeline = pipeline; gst_element_set_state (pipeline, GST_STATE_PLAYING); return TRUE; } /* audioconvert */ convert = gst_element_factory_make ("audioconvert", NULL); if (convert == NULL) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Audioconvert\""); goto error; } gst_bin_add (GST_BIN (pipeline), convert); if (action == BRASERO_JOB_ACTION_IMAGE) { BraseroStreamFormat session_format; BraseroTrackType *output_type; output_type = brasero_track_type_new (); brasero_job_get_output_type (BRASERO_JOB (transcode), output_type); session_format = brasero_track_type_get_stream_format (output_type); brasero_track_type_free (output_type); /* audioresample */ resample = gst_element_factory_make ("audioresample", NULL); if (resample == NULL) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Audioresample\""); goto error; } gst_bin_add (GST_BIN (pipeline), resample); /* filter */ filter = gst_element_factory_make ("capsfilter", NULL); if (!filter) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Filter\""); goto error; } gst_bin_add (GST_BIN (pipeline), filter); filtercaps = gst_caps_new_full (gst_structure_new ("audio/x-raw", /* NOTE: we use little endianness only for libburn which requires little */ "format", G_TYPE_STRING, (session_format & BRASERO_AUDIO_FORMAT_RAW_LITTLE_ENDIAN) != 0 ? "S16LE" : "S16BE", "channels", G_TYPE_INT, 2, "rate", G_TYPE_INT, 44100, NULL), NULL); g_object_set (GST_OBJECT (filter), "caps", filtercaps, NULL); gst_caps_unref (filtercaps); } /* decode */ decode = gst_element_factory_make ("decodebin", NULL); if (decode == NULL) { g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("%s element could not be created"), "\"Decodebin\""); goto error; } gst_bin_add (GST_BIN (pipeline), decode); if (action == BRASERO_JOB_ACTION_IMAGE) { GstPad *sinkpad; gboolean res; if (!gst_element_link (source, decode)) { BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads"); g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("Impossible to link plugin pads")); goto error; } priv->link = resample; g_signal_connect (G_OBJECT (decode), "pad-added", G_CALLBACK (brasero_transcode_new_decoded_pad_cb), transcode); if (volume) { gst_bin_add (GST_BIN (pipeline), volume); res = gst_element_link_many (resample, volume, convert, filter, sink, NULL); } else res = gst_element_link_many (resample, convert, filter, sink, NULL); if (!res) { BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads"); g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("Impossible to link plugin pads")); goto error; } /* This is an ugly workaround for the lack of accuracy with * gstreamer. Yet this is unfortunately a necessary evil. */ /* FIXME: this does not look like it makes sense... (tpm) */ priv->pos = 0; priv->size = 0; sinkpad = gst_element_get_static_pad (sink, "sink"); priv->probe = gst_pad_add_probe (sinkpad, GST_PAD_PROBE_TYPE_BUFFER, brasero_transcode_buffer_handler, transcode, NULL); gst_object_unref (sinkpad); } else { if (!gst_element_link (source, decode) || !gst_element_link (convert, sink)) { BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads"); g_set_error (error, BRASERO_BURN_ERROR, BRASERO_BURN_ERROR_GENERAL, _("Impossible to link plugin pads")); goto error; } priv->link = convert; g_signal_connect (G_OBJECT (decode), "pad-added", G_CALLBACK (brasero_transcode_new_decoded_pad_cb), transcode); } priv->sink = sink; priv->decode = decode; priv->source = source; priv->convert = convert; priv->pipeline = pipeline; gst_element_set_state (pipeline, GST_STATE_PLAYING); return TRUE; error: if (error && (*error)) BRASERO_JOB_LOG (transcode, "can't create object : %s \n", (*error)->message); gst_object_unref (GST_OBJECT (pipeline)); return FALSE; }
/** * adapt_image_capture: * @self: camerasrc object * @in_caps: caps object that describes incoming image format * * Adjust capsfilters and crop according image capture caps if necessary. * The captured image format from video source might be different from * what application requested, so we can try to fix that in camerabin. * */ static void adapt_image_capture (GstWrapperCameraBinSrc * self, GstCaps * in_caps) { GstBaseCameraSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); GstStructure *in_st, *new_st, *req_st; gint in_width = 0, in_height = 0, req_width = 0, req_height = 0, crop = 0; gdouble ratio_w, ratio_h; GstCaps *filter_caps = NULL; GST_LOG_OBJECT (self, "in caps: %" GST_PTR_FORMAT, in_caps); GST_LOG_OBJECT (self, "requested caps: %" GST_PTR_FORMAT, self->image_capture_caps); in_st = gst_caps_get_structure (in_caps, 0); gst_structure_get_int (in_st, "width", &in_width); gst_structure_get_int (in_st, "height", &in_height); req_st = gst_caps_get_structure (self->image_capture_caps, 0); gst_structure_get_int (req_st, "width", &req_width); gst_structure_get_int (req_st, "height", &req_height); GST_INFO_OBJECT (self, "we requested %dx%d, and got %dx%d", req_width, req_height, in_width, in_height); new_st = gst_structure_copy (req_st); /* If new fields have been added, we need to copy them */ gst_structure_foreach (in_st, copy_missing_fields, new_st); gst_structure_set (new_st, "width", G_TYPE_INT, in_width, "height", G_TYPE_INT, in_height, NULL); GST_LOG_OBJECT (self, "new image capture caps: %" GST_PTR_FORMAT, new_st); /* Crop if requested aspect ratio differs from incoming frame aspect ratio */ if (self->src_zoom_crop) { ratio_w = (gdouble) in_width / req_width; ratio_h = (gdouble) in_height / req_height; if (ratio_w < ratio_h) { crop = in_height - (req_height * ratio_w); self->base_crop_top = crop / 2; self->base_crop_bottom = crop / 2; } else { crop = in_width - (req_width * ratio_h); self->base_crop_left = crop / 2; self->base_crop_right += crop / 2; } GST_INFO_OBJECT (self, "setting base crop: left:%d, right:%d, top:%d, bottom:%d", self->base_crop_left, self->base_crop_right, self->base_crop_top, self->base_crop_bottom); g_object_set (G_OBJECT (self->src_zoom_crop), "top", self->base_crop_top, "bottom", self->base_crop_bottom, "left", self->base_crop_left, "right", self->base_crop_right, NULL); } /* Update capsfilters */ if (self->image_capture_caps) { gst_caps_unref (self->image_capture_caps); } self->image_capture_caps = gst_caps_new_full (new_st, NULL); set_capsfilter_caps (self, self->image_capture_caps); /* Adjust the capsfilter before crop and videoscale elements if necessary */ if (in_width == bcamsrc->width && in_height == bcamsrc->height) { GST_DEBUG_OBJECT (self, "no adaptation with resolution needed"); } else { GST_DEBUG_OBJECT (self, "changing %" GST_PTR_FORMAT " from %dx%d to %dx%d", self->src_filter, bcamsrc->width, bcamsrc->height, in_width, in_height); /* Apply the width and height to filter caps */ g_object_get (G_OBJECT (self->src_filter), "caps", &filter_caps, NULL); filter_caps = gst_caps_make_writable (filter_caps); gst_caps_set_simple (filter_caps, "width", G_TYPE_INT, in_width, "height", G_TYPE_INT, in_height, NULL); g_object_set (G_OBJECT (self->src_filter), "caps", filter_caps, NULL); gst_caps_unref (filter_caps); } }
static gboolean setup_pipeline (void) { gboolean res = TRUE; GstBus *bus; GstElement *sink = NULL, *ipp = NULL; GstEncodingProfile *prof = NULL; camerabin = gst_element_factory_make ("camerabin2", NULL); if (NULL == camerabin) { g_warning ("can't create camerabin element\n"); goto error; } bus = gst_pipeline_get_bus (GST_PIPELINE (camerabin)); /* Add sync handler for time critical messages that need to be handled fast */ gst_bus_set_sync_handler (bus, sync_bus_callback, NULL); /* Handle normal messages asynchronously */ gst_bus_add_watch (bus, bus_callback, NULL); gst_object_unref (bus); GST_INFO_OBJECT (camerabin, "camerabin2 created"); if (videosrc_name) { GstElement *wrapper; GstElement *videosrc; if (wrappersrc_name) wrapper = gst_element_factory_make (wrappersrc_name, NULL); else wrapper = gst_element_factory_make ("wrappercamerabinsrc", NULL); if (setup_pipeline_element (wrapper, "video-src", videosrc_name, NULL)) { g_object_set (camerabin, "camera-src", wrapper, NULL); } else { GST_WARNING ("Failed to set videosrc to %s", videosrc_name); } g_object_get (wrapper, "video-src", &videosrc, NULL); if (videosrc && videodevice_name && g_object_class_find_property (G_OBJECT_GET_CLASS (videosrc), "device")) { g_object_set (videosrc, "device", videodevice_name, NULL); } } /* configure used elements */ res &= setup_pipeline_element (camerabin, "audio-src", audiosrc_name, NULL); res &= setup_pipeline_element (camerabin, "viewfinder-sink", vfsink_name, &sink); res &= setup_pipeline_element (camerabin, "viewfinder-filter", viewfinder_filter, NULL); if (imagepp_name) { ipp = create_ipp_bin (); if (ipp) g_object_set (camerabin, "image-filter", ipp, NULL); else GST_WARNING ("Could not create ipp elements"); } prof = load_encoding_profile (); if (prof) g_object_set (G_OBJECT (camerabin), "video-profile", prof, NULL); GST_INFO_OBJECT (camerabin, "elements created"); if (sink) g_object_set (sink, "sync", TRUE, NULL); GST_INFO_OBJECT (camerabin, "elements configured"); /* configure a resolution and framerate */ if (image_width > 0 && image_height > 0) { if (mode == MODE_VIDEO) { GstCaps *caps = NULL; if (view_framerate_num > 0) caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, "framerate", GST_TYPE_FRACTION, view_framerate_num, view_framerate_den, NULL), gst_structure_new ("video/x-raw-rgb", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, "framerate", GST_TYPE_FRACTION, view_framerate_num, view_framerate_den, NULL), NULL); else caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), gst_structure_new ("video/x-raw-rgb", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), NULL); g_object_set (camerabin, "video-capture-caps", caps, NULL); gst_caps_unref (caps); } else { GstCaps *caps = gst_caps_new_full (gst_structure_new ("video/x-raw-yuv", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), gst_structure_new ("video/x-raw-rgb", "width", G_TYPE_INT, image_width, "height", G_TYPE_INT, image_height, NULL), NULL); g_object_set (camerabin, "image-capture-caps", caps, NULL); gst_caps_unref (caps); } } set_camerabin2_caps_from_string (); if (GST_STATE_CHANGE_FAILURE == gst_element_set_state (camerabin, GST_STATE_READY)) { g_warning ("can't set camerabin to ready\n"); goto error; } GST_INFO_OBJECT (camerabin, "camera ready"); if (GST_STATE_CHANGE_FAILURE == gst_element_set_state (camerabin, GST_STATE_PLAYING)) { g_warning ("can't set camerabin to playing\n"); goto error; } GST_INFO_OBJECT (camerabin, "camera started"); return TRUE; error: cleanup_pipeline (); return FALSE; }
static gboolean gst_ffmpegenc_setcaps (GstPad * pad, GstCaps * caps) { GstCaps *other_caps; GstCaps *allowed_caps; GstCaps *icaps; enum PixelFormat pix_fmt; GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) GST_PAD_PARENT (pad); GstFFMpegEncClass *oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc); /* close old session */ if (ffmpegenc->opened) { gst_ffmpeg_avcodec_close (ffmpegenc->context); ffmpegenc->opened = FALSE; } /* set defaults */ avcodec_get_context_defaults (ffmpegenc->context); /* if we set it in _getcaps we should set it also in _link */ ffmpegenc->context->strict_std_compliance = -1; /* user defined properties */ ffmpegenc->context->bit_rate = ffmpegenc->bitrate; ffmpegenc->context->bit_rate_tolerance = ffmpegenc->bitrate; ffmpegenc->context->gop_size = ffmpegenc->gop_size; ffmpegenc->context->me_method = ffmpegenc->me_method; GST_DEBUG_OBJECT (ffmpegenc, "Setting avcontext to bitrate %lu, gop_size %d", ffmpegenc->bitrate, ffmpegenc->gop_size); /* RTP payload used for GOB production (for Asterisk) */ if (ffmpegenc->rtp_payload_size) { ffmpegenc->context->rtp_payload_size = ffmpegenc->rtp_payload_size; } /* additional avcodec settings */ /* first fill in the majority by copying over */ gst_ffmpeg_cfg_fill_context (ffmpegenc, ffmpegenc->context); /* then handle some special cases */ ffmpegenc->context->lmin = (ffmpegenc->lmin * FF_QP2LAMBDA + 0.5); ffmpegenc->context->lmax = (ffmpegenc->lmax * FF_QP2LAMBDA + 0.5); if (ffmpegenc->interlaced) { ffmpegenc->context->flags |= CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME; ffmpegenc->picture->interlaced_frame = TRUE; /* if this is not the case, a filter element should be used to swap fields */ ffmpegenc->picture->top_field_first = TRUE; } /* some other defaults */ ffmpegenc->context->rc_strategy = 2; ffmpegenc->context->b_frame_strategy = 0; ffmpegenc->context->coder_type = 0; ffmpegenc->context->context_model = 0; ffmpegenc->context->scenechange_threshold = 0; ffmpegenc->context->inter_threshold = 0; /* and last but not least the pass; CBR, 2-pass, etc */ ffmpegenc->context->flags |= ffmpegenc->pass; switch (ffmpegenc->pass) { /* some additional action depends on type of pass */ case CODEC_FLAG_QSCALE: ffmpegenc->context->global_quality = ffmpegenc->picture->quality = FF_QP2LAMBDA * ffmpegenc->quantizer; break; case CODEC_FLAG_PASS1: /* need to prepare a stats file */ /* we don't close when changing caps, fingers crossed */ if (!ffmpegenc->file) ffmpegenc->file = g_fopen (ffmpegenc->filename, "w"); if (!ffmpegenc->file) { GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, OPEN_WRITE, (("Could not open file \"%s\" for writing."), ffmpegenc->filename), GST_ERROR_SYSTEM); return FALSE; } break; case CODEC_FLAG_PASS2: { /* need to read the whole stats file ! */ gsize size; if (!g_file_get_contents (ffmpegenc->filename, &ffmpegenc->context->stats_in, &size, NULL)) { GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, READ, (("Could not get contents of file \"%s\"."), ffmpegenc->filename), GST_ERROR_SYSTEM); return FALSE; } break; } default: break; } /* fetch pix_fmt and so on */ gst_ffmpeg_caps_with_codectype (oclass->in_plugin->type, caps, ffmpegenc->context); if (!ffmpegenc->context->time_base.den) { ffmpegenc->context->time_base.den = 25; ffmpegenc->context->time_base.num = 1; ffmpegenc->context->ticks_per_frame = 1; } else if ((oclass->in_plugin->id == CODEC_ID_MPEG4) && (ffmpegenc->context->time_base.den > 65535)) { /* MPEG4 Standards do not support time_base denominator greater than * (1<<16) - 1 . We therefore scale them down. * Agreed, it will not be the exact framerate... but the difference * shouldn't be that noticeable */ ffmpegenc->context->time_base.num = (gint) gst_util_uint64_scale_int (ffmpegenc->context->time_base.num, 65535, ffmpegenc->context->time_base.den); ffmpegenc->context->time_base.den = 65535; GST_LOG_OBJECT (ffmpegenc, "MPEG4 : scaled down framerate to %d / %d", ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num); } pix_fmt = ffmpegenc->context->pix_fmt; /* max-key-interval may need the framerate set above */ if (ffmpegenc->max_key_interval) { AVCodecContext *ctx; /* override gop-size */ ctx = ffmpegenc->context; ctx->gop_size = (ffmpegenc->max_key_interval < 0) ? (-ffmpegenc->max_key_interval * (ctx->time_base.den * ctx->ticks_per_frame / ctx->time_base.num)) : ffmpegenc->max_key_interval; } /* open codec */ if (gst_ffmpeg_avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0) { if (ffmpegenc->context->priv_data) gst_ffmpeg_avcodec_close (ffmpegenc->context); if (ffmpegenc->context->stats_in) g_free (ffmpegenc->context->stats_in); GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: Failed to open FFMPEG codec", oclass->in_plugin->name); return FALSE; } /* second pass stats buffer no longer needed */ if (ffmpegenc->context->stats_in) g_free (ffmpegenc->context->stats_in); /* is the colourspace correct? */ if (pix_fmt != ffmpegenc->context->pix_fmt) { gst_ffmpeg_avcodec_close (ffmpegenc->context); GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: AV wants different colourspace (%d given, %d wanted)", oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt); return FALSE; } /* we may have failed mapping caps to a pixfmt, * and quite some codecs do not make up their own mind about that * in any case, _NONE can never work out later on */ if (oclass->in_plugin->type == CODEC_TYPE_VIDEO && pix_fmt == PIX_FMT_NONE) { GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: Failed to determine input format", oclass->in_plugin->name); return FALSE; } /* some codecs support more than one format, first auto-choose one */ GST_DEBUG_OBJECT (ffmpegenc, "picking an output format ..."); allowed_caps = gst_pad_get_allowed_caps (ffmpegenc->srcpad); if (!allowed_caps) { GST_DEBUG_OBJECT (ffmpegenc, "... but no peer, using template caps"); /* we need to copy because get_allowed_caps returns a ref, and * get_pad_template_caps doesn't */ allowed_caps = gst_caps_copy (gst_pad_get_pad_template_caps (ffmpegenc->srcpad)); } GST_DEBUG_OBJECT (ffmpegenc, "chose caps %" GST_PTR_FORMAT, allowed_caps); gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id, oclass->in_plugin->type, allowed_caps, ffmpegenc->context); /* try to set this caps on the other side */ other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id, ffmpegenc->context, TRUE); if (!other_caps) { gst_ffmpeg_avcodec_close (ffmpegenc->context); GST_DEBUG ("Unsupported codec - no caps found"); return FALSE; } icaps = gst_caps_intersect (allowed_caps, other_caps); gst_caps_unref (allowed_caps); gst_caps_unref (other_caps); if (gst_caps_is_empty (icaps)) { gst_caps_unref (icaps); return FALSE; } if (gst_caps_get_size (icaps) > 1) { GstCaps *newcaps; newcaps = gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (icaps, 0)), NULL); gst_caps_unref (icaps); icaps = newcaps; } if (!gst_pad_set_caps (ffmpegenc->srcpad, icaps)) { gst_ffmpeg_avcodec_close (ffmpegenc->context); gst_caps_unref (icaps); return FALSE; } gst_caps_unref (icaps); /* success! */ ffmpegenc->opened = TRUE; return TRUE; }
GstVaapiCapsFeature gst_vaapi_find_preferred_caps_feature (GstPad * pad, GstVideoFormat format, GstVideoFormat * out_format_ptr) { GstVaapiCapsFeature feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; guint i, num_structures; GstCaps *caps = NULL; GstCaps *gl_texture_upload_caps = NULL; GstCaps *sysmem_caps = NULL; GstCaps *vaapi_caps = NULL; GstCaps *out_caps, *templ; GstVideoFormat out_format; templ = gst_pad_get_pad_template_caps (pad); out_caps = gst_pad_peer_query_caps (pad, templ); gst_caps_unref (templ); if (!out_caps) { feature = GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED; goto cleanup; } out_format = format == GST_VIDEO_FORMAT_ENCODED ? GST_VIDEO_FORMAT_I420 : format; gl_texture_upload_caps = new_gl_texture_upload_meta_caps (); if (!gl_texture_upload_caps) goto cleanup; vaapi_caps = gst_vaapi_video_format_new_template_caps_with_features (out_format, GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE); if (!vaapi_caps) goto cleanup; sysmem_caps = gst_vaapi_video_format_new_template_caps_with_features (out_format, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY); if (!sysmem_caps) goto cleanup; num_structures = gst_caps_get_size (out_caps); for (i = 0; i < num_structures; i++) { GstCapsFeatures *const features = gst_caps_get_features (out_caps, i); GstStructure *const structure = gst_caps_get_structure (out_caps, i); #if GST_CHECK_VERSION(1,3,0) /* Skip ANY features, we need an exact match for correct evaluation */ if (gst_caps_features_is_any (features)) continue; #endif caps = gst_caps_new_full (gst_structure_copy (structure), NULL); if (!caps) continue; gst_caps_set_features (caps, 0, gst_caps_features_copy (features)); if (gst_caps_can_intersect (caps, vaapi_caps) && feature < GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE) feature = GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE; else if (gst_caps_can_intersect (caps, gl_texture_upload_caps) && feature < GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META) feature = GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META; else if (gst_caps_can_intersect (caps, sysmem_caps) && feature < GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; gst_caps_replace (&caps, NULL); #if GST_CHECK_VERSION(1,3,0) /* Stop at the first match, the caps should already be sorted out by preference order from downstream elements */ if (feature != GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) break; #endif } if (out_format_ptr) { if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META) { GstStructure *structure; gchar *format_str; out_format = GST_VIDEO_FORMAT_UNKNOWN; do { caps = gst_caps_intersect_full (out_caps, gl_texture_upload_caps, GST_CAPS_INTERSECT_FIRST); if (!caps) break; structure = gst_caps_get_structure (caps, 0); if (!structure) break; if (!gst_structure_get (structure, "format", G_TYPE_STRING, &format_str, NULL)) break; out_format = gst_video_format_from_string (format_str); g_free (format_str); } while (0); if (!out_format) goto cleanup; } *out_format_ptr = out_format; } cleanup: gst_caps_replace (&gl_texture_upload_caps, NULL); gst_caps_replace (&sysmem_caps, NULL); gst_caps_replace (&vaapi_caps, NULL); gst_caps_replace (&caps, NULL); gst_caps_replace (&out_caps, NULL); return feature; }
int main (int argc, char **argv) { GError *err = NULL; gchar *outputuri = NULL; gchar *format = NULL; gchar *aformat = NULL; gchar *vformat = NULL; gboolean allmissing = FALSE; gboolean listcodecs = FALSE; GOptionEntry options[] = { { "silent", 's', 0, G_OPTION_ARG_NONE, &silent, "Don't output the information structure", NULL }, { "outputuri", 'o', 0, G_OPTION_ARG_STRING, &outputuri, "URI to encode to", "URI (<protocol>://<location>)" }, { "format", 'f', 0, G_OPTION_ARG_STRING, &format, "Container format", "<GstCaps>" }, { "vformat", 'v', 0, G_OPTION_ARG_STRING, &vformat, "Video format", "<GstCaps>" }, { "aformat", 'a', 0, G_OPTION_ARG_STRING, &aformat, "Audio format", "<GstCaps>" }, { "allmissing", 'm', 0, G_OPTION_ARG_NONE, &allmissing, "encode to all matching format/codec that aren't specified", NULL }, { "list-codecs", 'l', 0, G_OPTION_ARG_NONE, &listcodecs, "list all available codecs and container formats", NULL }, {NULL} }; GOptionContext *ctx; GstEncodingProfile *prof; gchar *inputuri; ctx = g_option_context_new ("- encode URIs with GstProfile and encodebin"); g_option_context_add_main_entries (ctx, options, NULL); g_option_context_add_group (ctx, gst_init_get_option_group ()); if (!g_option_context_parse (ctx, &argc, &argv, &err)) { g_print ("Error initializing: %s\n", err->message); exit (1); } if (listcodecs) { list_codecs (); g_option_context_free (ctx); exit (0); } if (outputuri == NULL || argc != 2) { g_print ("%s", g_option_context_get_help (ctx, TRUE, NULL)); g_option_context_free (ctx); exit (-1); } g_option_context_free (ctx); /* Fixup outputuri to be a URI */ inputuri = ensure_uri (argv[1]); outputuri = ensure_uri (outputuri); if (allmissing) { GList *muxers; GstCaps *formats = NULL; GstCaps *vformats = NULL; GstCaps *aformats = NULL; guint f, v, a, flen, vlen, alen; if (!format) formats = gst_caps_list_container_formats (GST_RANK_NONE); else formats = gst_caps_from_string (format); if (!vformat) vformats = gst_caps_list_video_encoding_formats (GST_RANK_NONE); else vformats = gst_caps_from_string (vformat); if (!aformat) aformats = gst_caps_list_audio_encoding_formats (GST_RANK_NONE); else aformats = gst_caps_from_string (aformat); muxers = gst_element_factory_list_get_elements (GST_ELEMENT_FACTORY_TYPE_MUXER, GST_RANK_NONE); flen = gst_caps_get_size (formats); for (f = 0; f < flen; f++) { GstCaps *container = gst_caps_new_full (gst_caps_steal_structure (formats, 0), NULL); GstCaps *compatv = gst_caps_list_compatible_codecs (container, vformats, muxers); GstCaps *compata = gst_caps_list_compatible_codecs (container, aformats, muxers); vlen = gst_caps_get_size (compatv); alen = gst_caps_get_size (compata); for (v = 0; v < vlen; v++) { GstCaps *vcodec = gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (compatv, v)), NULL); for (a = 0; a < alen; a++) { GstCaps *acodec = gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (compata, a)), NULL); prof = create_profile ((GstCaps *) container, (GstCaps *) vcodec, (GstCaps *) acodec); if (G_UNLIKELY (prof == NULL)) { g_print ("Wrong arguments\n"); break; } outputuri = ensure_uri (generate_filename (container, vcodec, acodec)); transcode_file (inputuri, outputuri, prof); gst_encoding_profile_unref (prof); gst_caps_unref (acodec); } gst_caps_unref (vcodec); } gst_caps_unref (container); } } else { /* Create the profile */ prof = create_profile_from_string (format, vformat, aformat); if (G_UNLIKELY (prof == NULL)) { g_print ("Encoding arguments are not valid !\n"); return 1; } /* Transcode file */ transcode_file (inputuri, outputuri, prof); /* cleanup */ gst_encoding_profile_unref (prof); } return 0; }
/* creates/returns a list of CodecCap based on given filter function and caps */ static GList * get_plugins_filtered_from_caps (FilterFunc filter, GstCaps *caps, GstPadDirection direction) { GList *walk, *result; GstElementFactory *factory; GList *list = NULL; gboolean is_valid; GstCaps *matched_caps = NULL; result = gst_registry_get_feature_list (gst_registry_get_default (), GST_TYPE_ELEMENT_FACTORY); result = g_list_sort (result, (GCompareFunc) compare_ranks); walk = result; while (walk) { factory = GST_ELEMENT_FACTORY (walk->data); is_valid = FALSE; if (!filter (factory)) { goto next; } if (caps) { if (check_caps_compatibility (factory, caps, &matched_caps)) { is_valid = TRUE; } } if (is_valid || !caps) { if (!matched_caps) { list = create_codec_cap_list (factory, direction, list, NULL); } else { gint i; for (i = 0; i < gst_caps_get_size (matched_caps); i++) { GstStructure *structure = gst_caps_get_structure (matched_caps, i); GstCaps *cur_caps = gst_caps_new_full (gst_structure_copy (structure), NULL); list = create_codec_cap_list (factory, direction, list, cur_caps); gst_caps_unref (cur_caps); } gst_caps_unref (matched_caps); } } next: walk = g_list_next (walk); } /* walk = result; while (walk) { factory = GST_ELEMENT_FACTORY (walk->data); DEBUG ("new refcnt is %d", GST_OBJECT_REFCOUNT_VALUE (GST_OBJECT (factory))); walk = g_list_next (walk); } */ gst_plugin_feature_list_free (result); return list; }
/* if element caps already in list, will make sure Transform elements have * priority and replace old ones */ static GList * create_codec_cap_list (GstElementFactory *factory, GstPadDirection direction, GList *list, GstCaps *rtp_caps) { const GList *pads = factory->staticpadtemplates; gint i; /* Let us look at each pad for stuff to add*/ while (pads) { GstCaps *caps = NULL; GstStaticPadTemplate *padtemplate = NULL; padtemplate = (GstStaticPadTemplate *) (pads->data); pads = g_list_next (pads); if (padtemplate->direction != direction) continue; if (GST_PAD_TEMPLATE_PRESENCE (padtemplate) != GST_PAD_ALWAYS) { continue; } caps = gst_static_caps_get (&padtemplate->static_caps); /* DEBUG ("%s caps are %s", gst_plugin_feature_get_name (GST_PLUGIN_FEATURE (factory)), gst_caps_to_string (caps)); */ /* skips caps ANY */ if (!caps || gst_caps_is_any (caps)) { goto done; } /* let us add one entry to the list per media type */ for (i = 0; i < gst_caps_get_size (caps); i++) { CodecCap *entry = NULL; GList *found_item = NULL; GstStructure *structure = gst_caps_get_structure (caps, i); GstCaps *cur_caps = gst_caps_new_full (gst_structure_copy (structure), NULL); /* FIXME fix this in gstreamer! The rtpdepay element is bogus, it claims to * be a depayloader yet has application/x-rtp on both sides and does * absolutely nothing */ /* Let's check if media caps are really media caps, this is to deal with * wierd elements such as rtpdepay that says it's a depayloader but has * application/x-rtp on src and sink pads */ const gchar *name = gst_structure_get_name (structure); if (g_ascii_strcasecmp (name, "application/x-rtp") == 0) { GST_DEBUG ("skipping %s", gst_plugin_feature_get_name (GST_PLUGIN_FEATURE (factory))); continue; } /* let's check if this caps is already in the list, if so let's replace * that CodecCap list instead of creating a new one */ /* we need to compare both media caps and rtp caps */ found_item = g_list_find_custom (list, cur_caps, (GCompareFunc)compare_media_caps); if (found_item) { entry = (CodecCap *)found_item->data; /* if RTP caps exist and don't match nullify entry */ if (rtp_caps && compare_rtp_caps (found_item->data, rtp_caps)) { entry = NULL; } } if (!entry) { entry = g_slice_new0 (CodecCap); entry->caps = cur_caps; if (rtp_caps) { entry->rtp_caps = rtp_caps; gst_caps_ref (rtp_caps); } list = g_list_append (list, entry); entry->element_list1 = g_list_prepend (NULL, g_list_prepend (NULL, factory)); gst_object_ref (factory); } else { GstCaps *newcaps; entry->element_list1->data = g_list_append (entry->element_list1->data, factory); gst_object_ref (factory); if (rtp_caps) { if (entry->rtp_caps) { GstCaps *new_rtp_caps; new_rtp_caps = gst_caps_union (rtp_caps, entry->rtp_caps); gst_caps_unref (entry->rtp_caps); entry->rtp_caps = new_rtp_caps; } else { entry->rtp_caps = rtp_caps; /* This shouldn't happen, its we're looking at rtp elements * or we're not */ g_assert_not_reached (); } gst_caps_unref (rtp_caps); } newcaps = gst_caps_union (cur_caps, entry->caps); gst_caps_unref (entry->caps); gst_caps_unref (cur_caps); entry->caps = newcaps; } } done: if (caps != NULL) { gst_caps_unref (caps); } } return list; }
static GstCaps * gst_play_sink_convert_bin_getcaps (GstPad * pad, GstCaps * filter) { GstPlaySinkConvertBin *self = GST_PLAY_SINK_CONVERT_BIN (gst_pad_get_parent (pad)); GstCaps *ret; GstPad *otherpad, *peer; GST_PLAY_SINK_CONVERT_BIN_LOCK (self); if (pad == self->srcpad) { otherpad = self->sinkpad; } else if (pad == self->sinkpad) { otherpad = self->srcpad; } else { GST_ERROR_OBJECT (pad, "Not one of our pads"); otherpad = NULL; } if (otherpad) { peer = gst_pad_get_peer (otherpad); if (peer) { GstCaps *peer_caps; GstCaps *downstream_filter = NULL; /* Add all the caps that we can convert to to the filter caps, * otherwise downstream might just return EMPTY caps because * it doesn't handle the filter caps but we could still convert * to these caps */ if (filter) { guint i, n; downstream_filter = gst_caps_new_empty (); /* Intersect raw video caps in the filter caps with the converter * caps. This makes sure that we don't accept raw video that we * can't handle, e.g. because of caps features */ n = gst_caps_get_size (filter); for (i = 0; i < n; i++) { GstStructure *s; GstCaps *tmp, *tmp2; s = gst_structure_copy (gst_caps_get_structure (filter, i)); if (gst_structure_has_name (s, self->audio ? "audio/x-raw" : "video/x-raw")) { tmp = gst_caps_new_full (s, NULL); tmp2 = gst_caps_intersect (tmp, self->converter_caps); gst_caps_append (downstream_filter, tmp2); gst_caps_unref (tmp); } else { gst_caps_append_structure (downstream_filter, s); } } downstream_filter = gst_caps_merge (downstream_filter, gst_caps_ref (self->converter_caps)); } peer_caps = gst_pad_query_caps (peer, downstream_filter); if (downstream_filter) gst_caps_unref (downstream_filter); gst_object_unref (peer); if (self->converter_caps && is_raw_caps (peer_caps, self->audio)) { GstCaps *converter_caps = gst_caps_ref (self->converter_caps); GstCapsFeatures *cf; GstStructure *s; guint i, n; ret = gst_caps_make_writable (peer_caps); /* Filter out ANY capsfeatures from the converter caps. We can't * convert to ANY capsfeatures, they are only there so that we * can passthrough whatever downstream can support... but we * definitely don't want to return them here */ n = gst_caps_get_size (converter_caps); for (i = 0; i < n; i++) { s = gst_caps_get_structure (converter_caps, i); cf = gst_caps_get_features (converter_caps, i); if (cf && gst_caps_features_is_any (cf)) continue; ret = gst_caps_merge_structure_full (ret, gst_structure_copy (s), (cf ? gst_caps_features_copy (cf) : NULL)); } gst_caps_unref (converter_caps); } else { ret = peer_caps; } } else { ret = gst_caps_ref (self->converter_caps); } GST_PLAY_SINK_CONVERT_BIN_FILTER_CAPS (filter, ret); } else { ret = filter ? gst_caps_ref (filter) : gst_caps_new_any (); } GST_PLAY_SINK_CONVERT_BIN_UNLOCK (self); gst_object_unref (self); GST_DEBUG_OBJECT (pad, "Returning caps %" GST_PTR_FORMAT, ret); return ret; }
/* creates/returns a list of CodecCap based on given filter function and caps */ static GList * get_plugins_filtered_from_caps (FilterFunc filter, GstCaps *caps, GstPadDirection direction) { GList *walk, *result; GList *list = NULL; GstCaps *matched_caps = NULL; result = gst_registry_get_feature_list (gst_registry_get (), GST_TYPE_ELEMENT_FACTORY); result = g_list_sort (result, (GCompareFunc) compare_ranks); for (walk = result; walk; walk = walk->next) { GstElementFactory *factory = GST_ELEMENT_FACTORY (walk->data); /* Ignore unranked plugins */ if (gst_plugin_feature_get_rank (GST_PLUGIN_FEATURE (factory)) == GST_RANK_NONE) continue; if (!filter (factory)) continue; if (caps && !check_caps_compatibility (factory, caps, &matched_caps)) continue; if (!matched_caps) { list = create_codec_cap_list (factory, direction, list, NULL); } else { gint i; GPtrArray *capslist = g_ptr_array_new_with_free_func ( (GDestroyNotify) gst_caps_unref); while (gst_caps_get_size (matched_caps) > 0) { GstCaps *stolencaps = gst_caps_new_full ( gst_caps_steal_structure (matched_caps, 0), NULL); gboolean got_match = FALSE; for (i = 0; i < capslist->len; i++) { GstCaps *intersect = gst_caps_intersect (stolencaps, g_ptr_array_index (capslist, i)); if (gst_caps_is_empty (intersect)) { gst_caps_unref (intersect); } else { got_match = TRUE; gst_caps_unref (g_ptr_array_index (capslist, i)); g_ptr_array_index (capslist, i) = intersect; } } if (got_match) gst_caps_unref (stolencaps); else g_ptr_array_add (capslist, stolencaps); } gst_caps_unref (matched_caps); for (i = 0; i < capslist->len; i++) list = create_codec_cap_list (factory, direction, list, g_ptr_array_index (capslist, i)); g_ptr_array_unref (capslist); } } gst_plugin_feature_list_free (result); return list; }
GstCaps * gst_xvid_csp_to_caps (gint csp, gint w, gint h) { GstStructure *structure = NULL; switch (csp) { case XVID_CSP_RGB555: case XVID_CSP_RGB565: case XVID_CSP_BGR: case XVID_CSP_ABGR: case XVID_CSP_BGRA: #ifdef XVID_CSP_ARGB case XVID_CSP_ARGB: #endif case XVID_CSP_RGBA:{ gint r_mask = 0, b_mask = 0, g_mask = 0, endianness = 0, bpp = 0, depth = 0; switch (csp) { case XVID_CSP_RGB555: r_mask = GST_VIDEO_RED_MASK_15_INT; g_mask = GST_VIDEO_GREEN_MASK_15_INT; b_mask = GST_VIDEO_BLUE_MASK_15_INT; endianness = G_BYTE_ORDER; depth = 15; bpp = 16; break; case XVID_CSP_RGB565: r_mask = GST_VIDEO_RED_MASK_16_INT; g_mask = GST_VIDEO_GREEN_MASK_16_INT; b_mask = GST_VIDEO_BLUE_MASK_16_INT; endianness = G_BYTE_ORDER; depth = 16; bpp = 16; break; case XVID_CSP_BGR: r_mask = 0x0000ff; g_mask = 0x00ff00; b_mask = 0xff0000; endianness = G_BIG_ENDIAN; depth = 24; bpp = 24; break; case XVID_CSP_ABGR: r_mask = 0x000000ff; g_mask = 0x0000ff00; b_mask = 0x00ff0000; endianness = G_BIG_ENDIAN; depth = 24; bpp = 32; break; case XVID_CSP_BGRA: r_mask = 0x0000ff00; g_mask = 0x00ff0000; b_mask = 0xff000000; endianness = G_BIG_ENDIAN; depth = 24; bpp = 32; break; #ifdef XVID_CSP_ARGB case XVID_CSP_ARGB: r_mask = 0x00ff0000; g_mask = 0x0000ff00; b_mask = 0x000000ff; endianness = G_BIG_ENDIAN; depth = 24; bpp = 32; break; #endif case XVID_CSP_RGBA: r_mask = 0xff000000; g_mask = 0x00ff0000; b_mask = 0x0000ff00; endianness = G_BIG_ENDIAN; depth = 24; bpp = 32; break; } structure = gst_structure_new ("video/x-raw-rgb", "width", G_TYPE_INT, w, "height", G_TYPE_INT, h, "depth", G_TYPE_INT, depth, "bpp", G_TYPE_INT, bpp, "endianness", G_TYPE_INT, endianness, "red_mask", G_TYPE_INT, r_mask, "green_mask", G_TYPE_INT, g_mask, "blue_mask", G_TYPE_INT, b_mask, NULL); break; } case XVID_CSP_YUY2: case XVID_CSP_YVYU: case XVID_CSP_UYVY: case XVID_CSP_I420: case XVID_CSP_YV12:{ guint32 fourcc = 0; switch (csp) { case XVID_CSP_YUY2: fourcc = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'); break; case XVID_CSP_YVYU: fourcc = GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U'); break; case XVID_CSP_UYVY: fourcc = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'); break; case XVID_CSP_I420: fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0'); break; case XVID_CSP_YV12: fourcc = GST_MAKE_FOURCC ('Y', 'V', '1', '2'); break; } structure = gst_structure_new ("video/x-raw-yuv", "width", G_TYPE_INT, w, "height", G_TYPE_INT, h, "format", GST_TYPE_FOURCC, fourcc, NULL); break; } } return gst_caps_new_full (structure, NULL); }
GstVaapiCapsFeature gst_vaapi_find_preferred_caps_feature (GstPad * pad, GstCaps * allowed_caps, GstVideoFormat * out_format_ptr) { GstVaapiCapsFeature feature = GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED; guint i, j, num_structures; GstCaps *peer_caps, *out_caps = NULL, *caps = NULL; static const guint feature_list[] = { GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE, GST_VAAPI_CAPS_FEATURE_DMABUF, GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META, GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY, }; /* query with no filter */ peer_caps = gst_pad_peer_query_caps (pad, NULL); if (!peer_caps) goto cleanup; if (gst_caps_is_empty (peer_caps)) goto cleanup; /* filter against our allowed caps */ out_caps = gst_caps_intersect_full (allowed_caps, peer_caps, GST_CAPS_INTERSECT_FIRST); /* default feature */ feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; /* if downstream requests caps ANY, system memory is preferred */ if (gst_caps_is_any (peer_caps)) goto find_format; num_structures = gst_caps_get_size (out_caps); for (i = 0; i < num_structures; i++) { GstCapsFeatures *const features = gst_caps_get_features (out_caps, i); GstStructure *const structure = gst_caps_get_structure (out_caps, i); /* Skip ANY features, we need an exact match for correct evaluation */ if (gst_caps_features_is_any (features)) continue; gst_caps_replace (&caps, NULL); caps = gst_caps_new_full (gst_structure_copy (structure), NULL); if (!caps) continue; gst_caps_set_features (caps, 0, gst_caps_features_copy (features)); for (j = 0; j < G_N_ELEMENTS (feature_list); j++) { if (gst_vaapi_caps_feature_contains (caps, feature_list[j]) && feature < feature_list[j]) { feature = feature_list[j]; break; } } /* Stop at the first match, the caps should already be sorted out by preference order from downstream elements */ if (feature != GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) break; } if (!caps) goto cleanup; find_format: if (out_format_ptr) { GstVideoFormat out_format; GstStructure *structure = NULL; const GValue *format_list; GstCapsFeatures *features; /* if the best feature is SystemMemory, we should choose the * vidoe/x-raw caps in the filtered peer caps set. If not, use * the first caps, which is the preferred by downstream. */ if (feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) { gst_caps_replace (&caps, out_caps); num_structures = gst_caps_get_size (caps); for (i = 0; i < num_structures; i++) { structure = gst_caps_get_structure (caps, i); features = gst_caps_get_features (caps, i); if (!gst_caps_features_is_any (features) && gst_caps_features_contains (features, gst_vaapi_caps_feature_to_string (GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY))) break; } } else { structure = gst_caps_get_structure (caps, 0); } if (!structure) goto cleanup; format_list = gst_structure_get_value (structure, "format"); if (!format_list) goto cleanup; out_format = gst_vaapi_find_preferred_format (format_list, *out_format_ptr); if (out_format == GST_VIDEO_FORMAT_UNKNOWN) goto cleanup; *out_format_ptr = out_format; } cleanup: gst_caps_replace (&caps, NULL); gst_caps_replace (&out_caps, NULL); gst_caps_replace (&peer_caps, NULL); return feature; }