static void get_theora_extradata(AVCodecContext *ctx, GstStructure *in_struc) { const GValue *array; const GValue *value; GstBuffer *buf; size_t size = 0; uint8_t *p; array = gst_structure_get_value(in_struc, "streamheader"); if (!array) return; /* get size */ for (unsigned i = 0; i < gst_value_array_get_size(array); i++) { value = gst_value_array_get_value(array, i); buf = gst_value_get_buffer(value); size += buf->size + 2; } /* fill it up */ ctx->extradata = p = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE); for (unsigned i = 0; i < gst_value_array_get_size(array); i++) { value = gst_value_array_get_value(array, i); buf = gst_value_get_buffer(value); AV_WB16(p, buf->size); p += 2; memcpy(p, buf->data, buf->size); p += buf->size; } ctx->extradata_size = p - ctx->extradata; }
static gboolean opus_dec_sink_setcaps (GstPad * pad, GstCaps * caps) { GstOpusDec *dec = GST_OPUS_DEC (gst_pad_get_parent (pad)); gboolean ret = TRUE; GstStructure *s; const GValue *streamheader; s = gst_caps_get_structure (caps, 0); if ((streamheader = gst_structure_get_value (s, "streamheader")) && G_VALUE_HOLDS (streamheader, GST_TYPE_ARRAY) && gst_value_array_get_size (streamheader) >= 2) { const GValue *header; GstBuffer *buf; GstFlowReturn res = GST_FLOW_OK; header = gst_value_array_get_value (streamheader, 0); if (header && G_VALUE_HOLDS (header, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (header); res = opus_dec_chain_parse_header (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->streamheader, buf); } #if 0 vorbiscomment = gst_value_array_get_value (streamheader, 1); if (vorbiscomment && G_VALUE_HOLDS (vorbiscomment, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (vorbiscomment); res = opus_dec_chain_parse_comments (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->vorbiscomment, buf); } #endif g_list_foreach (dec->extra_headers, (GFunc) gst_mini_object_unref, NULL); g_list_free (dec->extra_headers); dec->extra_headers = NULL; if (gst_value_array_get_size (streamheader) > 2) { gint i, n; n = gst_value_array_get_size (streamheader); for (i = 2; i < n; i++) { header = gst_value_array_get_value (streamheader, i); buf = gst_value_get_buffer (header); dec->extra_headers = g_list_prepend (dec->extra_headers, gst_buffer_ref (buf)); } } } done: gst_object_unref (dec); return ret; }
static gboolean gst_soup_http_client_sink_set_caps (GstBaseSink * sink, GstCaps * caps) { GstSoupHttpClientSink *souphttpsink = GST_SOUP_HTTP_CLIENT_SINK (sink); GstStructure *structure; const GValue *value_array; int i, n; GST_DEBUG_OBJECT (souphttpsink, "new stream headers set"); structure = gst_caps_get_structure (caps, 0); value_array = gst_structure_get_value (structure, "streamheader"); if (value_array) { g_list_free_full (souphttpsink->streamheader_buffers, (GDestroyNotify) gst_buffer_unref); souphttpsink->streamheader_buffers = NULL; n = gst_value_array_get_size (value_array); for (i = 0; i < n; i++) { const GValue *value; GstBuffer *buffer; value = gst_value_array_get_value (value_array, i); buffer = GST_BUFFER (gst_value_get_buffer (value)); souphttpsink->streamheader_buffers = g_list_append (souphttpsink->streamheader_buffers, gst_buffer_ref (buffer)); } } return TRUE; }
static gboolean _append_extra_headers (GQuark field_id, const GValue * value, gpointer user_data) { if (G_VALUE_TYPE (value) == GST_TYPE_ARRAY) { guint n = gst_value_array_get_size (value); guint i; for (i = 0; i < n; i++) { const GValue *v = gst_value_array_get_value (value, i); if (!_append_extra_header (field_id, v, user_data)) return FALSE; } } else if (G_VALUE_TYPE (value) == GST_TYPE_LIST) { guint n = gst_value_list_get_size (value); guint i; for (i = 0; i < n; i++) { const GValue *v = gst_value_list_get_value (value, i); if (!_append_extra_header (field_id, v, user_data)) return FALSE; } } else { return _append_extra_header (field_id, value, user_data); } return TRUE; }
/** * gst_buffer_pool_config_add_option: * @config: a #GstBufferPool configuration * @option: an option to add * * Enabled the option in @config. This will instruct the @bufferpool to enable * the specified option on the buffers that it allocates. * * The supported options by @pool can be retrieved with gst_buffer_pool_get_options(). */ void gst_buffer_pool_config_add_option (GstStructure * config, const gchar * option) { const GValue *value; GValue option_value = { 0, }; guint i, len; g_return_if_fail (config != NULL); value = gst_structure_id_get_value (config, GST_QUARK (OPTIONS)); if (value) { len = gst_value_array_get_size (value); for (i = 0; i < len; ++i) { const GValue *nth_val = gst_value_array_get_value (value, i); if (g_str_equal (option, g_value_get_string (nth_val))) return; } } else { GValue new_array_val = { 0, }; g_value_init (&new_array_val, GST_TYPE_ARRAY); gst_structure_id_take_value (config, GST_QUARK (OPTIONS), &new_array_val); value = gst_structure_id_get_value (config, GST_QUARK (OPTIONS)); } g_value_init (&option_value, G_TYPE_STRING); g_value_set_string (&option_value, option); gst_value_array_append_value ((GValue *) value, &option_value); g_value_unset (&option_value); }
static JsonNode * snra_json_value_to_node (const GValue *value) { JsonNode *n = NULL; if (GST_VALUE_HOLDS_STRUCTURE (value)) { const GstStructure *s = gst_value_get_structure (value); n = snra_json_from_gst_structure (s); } else if (GST_VALUE_HOLDS_ARRAY (value)) { guint count = gst_value_array_get_size (value); guint i; JsonArray *arr = json_array_sized_new (count); for (i = 0; i < count; i++) { const GValue *sub_val = gst_value_array_get_value (value, i); JsonNode *tmp = snra_json_value_to_node (sub_val); if (tmp) json_array_add_element (arr, tmp); } n = json_node_new (JSON_NODE_ARRAY); json_node_take_array (n, arr); } else { n = json_node_new (JSON_NODE_VALUE); json_node_set_value (n, value); } return n; }
static gboolean insert_field (GQuark field_id, const GValue * val, gpointer user_data) { GtkTreeIter *parent_iter = user_data; GtkTreeIter iter; const gchar *f = g_quark_to_string (field_id); gtk_tree_store_append (treestore, &iter, parent_iter); if (G_VALUE_TYPE (val) == GST_TYPE_ARRAY) { guint n = gst_value_array_get_size (val); guint i; GtkTreeIter child_iter; gtk_tree_store_set (treestore, &iter, 0, f, -1); for (i = 0; i < n; i++) { const GValue *ve = gst_value_array_get_value (val, i); gtk_tree_store_append (treestore, &child_iter, &iter); if (G_VALUE_TYPE (ve) == GST_TYPE_STRUCTURE) { const GstStructure *s = gst_value_get_structure (ve); gtk_tree_store_set (treestore, &child_iter, 0, gst_structure_get_name (s), -1); gst_structure_foreach (s, insert_field, &child_iter); } else { gchar *v = g_value_to_string (ve); gtk_tree_store_set (treestore, &child_iter, 0, v, -1); g_free (v); } } } else if (G_VALUE_TYPE (val) == GST_TYPE_STRUCTURE) { const GstStructure *s = gst_value_get_structure (val); gchar *entry = g_strdup_printf ("%s: %s", f, gst_structure_get_name (s)); gtk_tree_store_set (treestore, &iter, 0, entry, -1); g_free (entry); gst_structure_foreach (s, insert_field, &iter); } else { gchar *v = g_value_to_string (val); gchar *entry = g_strdup_printf ("%s: %s", f, v); gtk_tree_store_set (treestore, &iter, 0, entry, -1); g_free (v); g_free (entry); } return TRUE; }
void test_channel_layout_value_intersect() { GValue layout = { 0, }; GValue list = { 0, }; GValue res = { 0, }; xmlfile = "test_channel_layout_value_intersect"; std_log(LOG_FILENAME_LINE, "Test Started test_channel_layout_value_intersect"); g_value_init (&list, GST_TYPE_LIST); init_value_to_channel_layout (&layout, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT); gst_value_list_append_value (&list, &layout); g_value_unset (&layout); init_value_to_channel_layout (&layout, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT); gst_value_list_append_value (&list, &layout); g_value_unset (&layout); init_value_to_channel_layout (&layout, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT); /* we should get the second layout in the list, as it matches the input */ fail_unless (gst_value_intersect (&res, &layout, &list)); g_value_unset (&layout); fail_unless (GST_VALUE_HOLDS_ARRAY (&res)); fail_unless_equals_int (gst_value_array_get_size (&res), 2); fail_unless_equals_int (g_value_get_enum (gst_value_array_get_value (&res, 0)), GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT); fail_unless_equals_int (g_value_get_enum (gst_value_array_get_value (&res, 1)), GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT); g_value_unset (&res); /* this (with rear position) should not yield any results */ init_value_to_channel_layout (&layout, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT); fail_if (gst_value_intersect (&res, &layout, &list)); g_value_unset (&layout); g_value_unset (&list); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
static gboolean gst_flac_dec_set_format (GstAudioDecoder * dec, GstCaps * caps) { const GValue *headers; GstFlacDec *flacdec; GstStructure *s; guint i, num; flacdec = GST_FLAC_DEC (dec); GST_LOG_OBJECT (dec, "sink caps: %" GST_PTR_FORMAT, caps); s = gst_caps_get_structure (caps, 0); headers = gst_structure_get_value (s, "streamheader"); if (headers == NULL || !GST_VALUE_HOLDS_ARRAY (headers)) { GST_WARNING_OBJECT (dec, "no 'streamheader' field in input caps, try " "adding a flacparse element upstream"); return FALSE; } if (gst_adapter_available (flacdec->adapter) > 0) { GST_WARNING_OBJECT (dec, "unexpected data left in adapter"); gst_adapter_clear (flacdec->adapter); } num = gst_value_array_get_size (headers); for (i = 0; i < num; ++i) { const GValue *header_val; GstBuffer *header_buf; header_val = gst_value_array_get_value (headers, i); if (header_val == NULL || !GST_VALUE_HOLDS_BUFFER (header_val)) return FALSE; header_buf = g_value_dup_boxed (header_val); GST_INFO_OBJECT (dec, "pushing header buffer of %" G_GSIZE_FORMAT " bytes " "into adapter", gst_buffer_get_size (header_buf)); gst_adapter_push (flacdec->adapter, header_buf); } GST_DEBUG_OBJECT (dec, "Processing headers and metadata"); if (!FLAC__stream_decoder_process_until_end_of_metadata (flacdec->decoder)) { GST_WARNING_OBJECT (dec, "process_until_end_of_metadata failed"); if (FLAC__stream_decoder_get_state (flacdec->decoder) == FLAC__STREAM_DECODER_ABORTED) { GST_WARNING_OBJECT (flacdec, "Read callback caused internal abort"); /* allow recovery */ gst_adapter_clear (flacdec->adapter); FLAC__stream_decoder_flush (flacdec->decoder); gst_flac_dec_handle_decoder_error (flacdec, TRUE); } } GST_INFO_OBJECT (dec, "headers and metadata are now processed"); return TRUE; }
static gboolean gst_opus_dec_set_format (GstAudioDecoder * bdec, GstCaps * caps) { GstOpusDec *dec = GST_OPUS_DEC (bdec); gboolean ret = TRUE; GstStructure *s; const GValue *streamheader; GST_DEBUG_OBJECT (dec, "set_format: %" GST_PTR_FORMAT, caps); s = gst_caps_get_structure (caps, 0); if ((streamheader = gst_structure_get_value (s, "streamheader")) && G_VALUE_HOLDS (streamheader, GST_TYPE_ARRAY) && gst_value_array_get_size (streamheader) >= 2) { const GValue *header, *vorbiscomment; GstBuffer *buf; GstFlowReturn res = GST_FLOW_OK; header = gst_value_array_get_value (streamheader, 0); if (header && G_VALUE_HOLDS (header, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (header); res = gst_opus_dec_parse_header (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->streamheader, buf); } vorbiscomment = gst_value_array_get_value (streamheader, 1); if (vorbiscomment && G_VALUE_HOLDS (vorbiscomment, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (vorbiscomment); res = gst_opus_dec_parse_comments (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->vorbiscomment, buf); } } done: return ret; }
Handle<Value> gstvaluearray_to_v8(const GValue *gv) { if(!GST_VALUE_HOLDS_ARRAY(gv)) { Nan::ThrowTypeError("not a GstValueArray"); return Nan::Undefined(); } int size = gst_value_array_get_size(gv); Handle<Array> array = Nan::New<Array>(gst_value_array_get_size(gv)); for(int i=0; i<size; i++) { array->Set(Nan::New<Number>(i), gvalue_to_v8(gst_value_array_get_value(gv,i))); } return array; }
static VALUE value_array_gvalue2rvalue(const GValue *value) { guint i, len; VALUE result; len = gst_value_array_get_size(value); result = rb_ary_new2(len); for (i = 0; i < len; i++) { rb_ary_push(result, GVAL2RVAL(gst_value_array_get_value(value, i))); } return result; }
static GstFlowReturn vorbis_dec_handle_header_caps (GstVorbisDec * vd) { GstFlowReturn result = GST_FLOW_OK; GstCaps *caps; GstStructure *s = NULL; const GValue *array = NULL; caps = gst_pad_get_current_caps (GST_AUDIO_DECODER_SINK_PAD (vd)); if (caps) s = gst_caps_get_structure (caps, 0); if (s) array = gst_structure_get_value (s, "streamheader"); if (caps) gst_caps_unref (caps); if (array && (gst_value_array_get_size (array) >= MIN_NUM_HEADERS)) { const GValue *value = NULL; GstBuffer *buf = NULL; gint i = 0; while (result == GST_FLOW_OK && i < gst_value_array_get_size (array)) { value = gst_value_array_get_value (array, i); buf = gst_value_get_buffer (value); if (!buf) goto null_buffer; result = vorbis_dec_handle_header_buffer (vd, buf); i++; } } else goto array_error; done: return (result != GST_FLOW_OK ? GST_FLOW_NOT_NEGOTIATED : GST_FLOW_OK); /* ERRORS */ array_error: { GST_WARNING_OBJECT (vd, "streamheader array not found"); result = GST_FLOW_ERROR; goto done; } null_buffer: { GST_WARNING_OBJECT (vd, "streamheader with null buffer received"); result = GST_FLOW_ERROR; goto done; } }
/** * gst_buffer_pool_config_get_option: * @config: a #GstBufferPool configuration * @index: position in the option array to read * * Parse an available @config and get the option * at @index of the options API array. * * Returns: a #gchar of the option at @index. */ const gchar * gst_buffer_pool_config_get_option (GstStructure * config, guint index) { const GValue *value; const gchar *ret = NULL; g_return_val_if_fail (config != NULL, 0); value = gst_structure_id_get_value (config, GST_QUARK (OPTIONS)); if (value) { const GValue *option_value; option_value = gst_value_array_get_value (value, index); if (option_value) ret = g_value_get_string (option_value); } return ret; }
EXPORT_C #endif void gst_mixer_message_parse_volume_changed (GstMessage * message, GstMixerTrack ** track, gint ** volumes, gint * num_channels) { const GstStructure *s; g_return_if_fail (gst_mixer_message_is_mixer_message (message)); g_return_if_fail (GST_MIXER_MESSAGE_HAS_TYPE (message, VOLUME_CHANGED)); s = gst_message_get_structure (message); if (track) { const GValue *v = gst_structure_get_value (s, "track"); g_return_if_fail (v != NULL); *track = (GstMixerTrack *) g_value_get_object (v); g_return_if_fail (GST_IS_MIXER_TRACK (*track)); } if (volumes || num_channels) { gint n_chans, i; const GValue *v = gst_structure_get_value (s, "volumes"); g_return_if_fail (v != NULL); g_return_if_fail (GST_VALUE_HOLDS_ARRAY (v)); n_chans = gst_value_array_get_size (v); if (num_channels) *num_channels = n_chans; if (volumes) { *volumes = g_new (gint, n_chans); for (i = 0; i < n_chans; i++) { const GValue *e = gst_value_array_get_value (v, i); g_return_if_fail (e != NULL && G_VALUE_HOLDS_INT (e)); (*volumes)[i] = g_value_get_int (e); } } } }
static PyObject * gi_gst_array_from_value (const GValue * value) { PyObject *list, *array_type, *array; gint i; list = PyList_New (gst_value_array_get_size (value)); for (i = 0; i < gst_value_array_get_size (value); i++) { const GValue *v = gst_value_array_get_value (value, i); PyList_SET_ITEM (list, i, pyg_value_as_pyobject (v, TRUE)); } array_type = gi_gst_get_type ("ValueArray"); array = PyObject_CallFunction (array_type, "N", list); Py_DECREF (array_type); return array; }
/** * gst_buffer_pool_config_has_option: * @config: a #GstBufferPool configuration * @option: an option * * Check if @config contains @option * * Returns: TRUE if the options array contains @option. */ gboolean gst_buffer_pool_config_has_option (GstStructure * config, const gchar * option) { const GValue *value; guint i, len; g_return_val_if_fail (config != NULL, 0); value = gst_structure_id_get_value (config, GST_QUARK (OPTIONS)); if (value) { len = gst_value_array_get_size (value); for (i = 0; i < len; ++i) { const GValue *nth_val = gst_value_array_get_value (value, i); if (g_str_equal (option, g_value_get_string (nth_val))) return TRUE; } } return FALSE; }
static gboolean gst_multi_file_sink_set_caps (GstBaseSink * sink, GstCaps * caps) { GstMultiFileSink *multifilesink; GstStructure *structure; multifilesink = GST_MULTI_FILE_SINK (sink); structure = gst_caps_get_structure (caps, 0); if (structure) { const GValue *value; value = gst_structure_get_value (structure, "streamheader"); if (GST_VALUE_HOLDS_ARRAY (value)) { int i; if (multifilesink->streamheaders) { for (i = 0; i < multifilesink->n_streamheaders; i++) { gst_buffer_unref (multifilesink->streamheaders[i]); } g_free (multifilesink->streamheaders); } multifilesink->n_streamheaders = gst_value_array_get_size (value); multifilesink->streamheaders = g_malloc (sizeof (GstBuffer *) * multifilesink->n_streamheaders); for (i = 0; i < multifilesink->n_streamheaders; i++) { multifilesink->streamheaders[i] = gst_buffer_ref (gst_value_get_buffer (gst_value_array_get_value (value, i))); } } } return TRUE; }
static void lgm_device_parse_structure (GstStructure * s, GHashTable * table) { gint width, height; const GValue *val; gchar *struct_str; struct_str = gst_structure_to_string (s); GST_DEBUG ("Parsing structure: %s\n", struct_str); g_free (struct_str); width = lgm_device_fixate_int_value (gst_structure_get_value (s, "width")); height = lgm_device_fixate_int_value (gst_structure_get_value (s, "height")); val = gst_structure_get_value (s, "framerate"); if (G_VALUE_TYPE (val) == GST_TYPE_FRACTION) { lgm_device_add_format_from_fps_val (table, width, height, val); } else if (G_VALUE_TYPE (val) == GST_TYPE_FRACTION_RANGE) { /* For sources returning template caps or ranges set framerate to 0/0 */ lgm_device_add_format (table, width, height, 0, 0); } else if (G_VALUE_TYPE (val) == GST_TYPE_ARRAY) { guint n, len; len = gst_value_array_get_size (val); for (n = 0; n < len; n++) { const GValue *kid = gst_value_array_get_value (val, n); lgm_device_add_format_from_fps_val (table, width, height, kid); } } else if (G_VALUE_TYPE (val) == GST_TYPE_LIST) { guint n, len; len = gst_value_list_get_size (val); for (n = 0; n < len; n++) { const GValue *kid = gst_value_list_get_value (val, n); lgm_device_add_format_from_fps_val (table, width, height, kid); } } }
static int lgm_device_fixate_int_value (const GValue * val) { int ret; if (G_VALUE_TYPE (val) == GST_TYPE_INT_RANGE) { ret = gst_value_get_int_range_min (val); } else if (G_VALUE_TYPE (val) == GST_TYPE_ARRAY) { const GValue *kid = gst_value_array_get_value (val, 0); ret = g_value_get_int (kid); } else if (G_VALUE_TYPE (val) == GST_TYPE_LIST) { const GValue *kid = gst_value_list_get_value (val, 0); ret = g_value_get_int (kid); } else { ret = g_value_get_int (val); } /* For sources returning template caps set width and height to 0 */ if (ret == 1) { ret = 0; } return ret; }
static gboolean gst_opus_dec_set_format (GstAudioDecoder * bdec, GstCaps * caps) { GstOpusDec *dec = GST_OPUS_DEC (bdec); gboolean ret = TRUE; GstStructure *s; const GValue *streamheader; GstCaps *old_caps; GST_DEBUG_OBJECT (dec, "set_format: %" GST_PTR_FORMAT, caps); if ((old_caps = gst_pad_get_current_caps (GST_AUDIO_DECODER_SINK_PAD (bdec)))) { if (gst_caps_is_equal (caps, old_caps)) { gst_caps_unref (old_caps); GST_DEBUG_OBJECT (dec, "caps didn't change"); goto done; } GST_DEBUG_OBJECT (dec, "caps have changed, resetting decoder"); gst_opus_dec_reset (dec); gst_caps_unref (old_caps); } s = gst_caps_get_structure (caps, 0); if ((streamheader = gst_structure_get_value (s, "streamheader")) && G_VALUE_HOLDS (streamheader, GST_TYPE_ARRAY) && gst_value_array_get_size (streamheader) >= 2) { const GValue *header, *vorbiscomment; GstBuffer *buf; GstFlowReturn res = GST_FLOW_OK; header = gst_value_array_get_value (streamheader, 0); if (header && G_VALUE_HOLDS (header, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (header); res = gst_opus_dec_parse_header (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->streamheader, buf); } vorbiscomment = gst_value_array_get_value (streamheader, 1); if (vorbiscomment && G_VALUE_HOLDS (vorbiscomment, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (vorbiscomment); res = gst_opus_dec_parse_comments (dec, buf); if (res != GST_FLOW_OK) goto done; gst_buffer_replace (&dec->vorbiscomment, buf); } } else { /* defaults if not in the caps */ dec->n_channels = 2; dec->sample_rate = 48000; gst_structure_get_int (s, "channels", &dec->n_channels); gst_structure_get_int (s, "rate", &dec->sample_rate); /* default stereo mapping */ dec->channel_mapping_family = 0; dec->channel_mapping[0] = 0; dec->channel_mapping[1] = 1; dec->n_streams = 1; dec->n_stereo_streams = 1; gst_opus_dec_negotiate (dec, NULL); } done: return ret; }
static gboolean gst_interleave_sink_setcaps (GstPad * pad, GstCaps * caps) { GstInterleave *self; g_return_val_if_fail (GST_IS_INTERLEAVE_PAD (pad), FALSE); self = GST_INTERLEAVE (gst_pad_get_parent (pad)); /* First caps that are set on a sink pad are used as output caps */ /* TODO: handle caps changes */ if (self->sinkcaps && !gst_caps_is_subset (caps, self->sinkcaps)) { goto cannot_change_caps; } else { GstCaps *srccaps; GstStructure *s; gboolean res; s = gst_caps_get_structure (caps, 0); if (!gst_structure_get_int (s, "width", &self->width)) goto no_width; if (!gst_structure_get_int (s, "rate", &self->rate)) goto no_rate; gst_interleave_set_process_function (self); if (gst_structure_has_field (s, "channel-positions")) { const GValue *pos_array; pos_array = gst_structure_get_value (s, "channel-positions"); if (GST_VALUE_HOLDS_ARRAY (pos_array) && gst_value_array_get_size (pos_array) == 1) { const GValue *pos = gst_value_array_get_value (pos_array, 0); GValue *apos = g_value_array_get_nth (self->input_channel_positions, GST_INTERLEAVE_PAD_CAST (pad)->channel); g_value_set_enum (apos, g_value_get_enum (pos)); } } srccaps = gst_caps_copy (caps); s = gst_caps_get_structure (srccaps, 0); gst_structure_set (s, "channels", G_TYPE_INT, self->channels, NULL); gst_interleave_set_channel_positions (self, s); res = gst_pad_set_caps (self->src, srccaps); gst_caps_unref (srccaps); if (!res) goto src_did_not_accept; } if (!self->sinkcaps) { GstCaps *sinkcaps = gst_caps_copy (caps); GstStructure *s = gst_caps_get_structure (sinkcaps, 0); gst_structure_remove_field (s, "channel-positions"); gst_caps_replace (&self->sinkcaps, sinkcaps); gst_caps_unref (sinkcaps); } gst_object_unref (self); return TRUE; cannot_change_caps: { GST_DEBUG_OBJECT (self, "caps of %" GST_PTR_FORMAT " already set, can't " "change", self->sinkcaps); gst_object_unref (self); return FALSE; } src_did_not_accept: { GST_DEBUG_OBJECT (self, "src did not accept setcaps()"); gst_object_unref (self); return FALSE; } no_width: { GST_WARNING_OBJECT (self, "caps did not have width: %" GST_PTR_FORMAT, caps); gst_object_unref (self); return FALSE; } no_rate: { GST_WARNING_OBJECT (self, "caps did not have rate: %" GST_PTR_FORMAT, caps); gst_object_unref (self); return FALSE; } }
static gboolean gst_amc_audio_dec_set_format (GstAudioDecoder * decoder, GstCaps * caps) { GstAmcAudioDec *self; GstStructure *s; GstAmcFormat *format; const gchar *mime; gboolean is_format_change = FALSE; gboolean needs_disable = FALSE; gchar *format_string; gint rate, channels; GError *err = NULL; self = GST_AMC_AUDIO_DEC (decoder); GST_DEBUG_OBJECT (self, "Setting new caps %" GST_PTR_FORMAT, caps); /* Check if the caps change is a real format change or if only irrelevant * parts of the caps have changed or nothing at all. */ is_format_change |= (!self->input_caps || !gst_caps_is_equal (self->input_caps, caps)); needs_disable = self->started; /* If the component is not started and a real format change happens * we have to restart the component. If no real format change * happened we can just exit here. */ if (needs_disable && !is_format_change) { /* Framerate or something minor changed */ self->input_caps_changed = TRUE; GST_DEBUG_OBJECT (self, "Already running and caps did not change the format"); return TRUE; } if (needs_disable && is_format_change) { gst_amc_audio_dec_drain (self); GST_AUDIO_DECODER_STREAM_UNLOCK (self); gst_amc_audio_dec_stop (GST_AUDIO_DECODER (self)); GST_AUDIO_DECODER_STREAM_LOCK (self); gst_amc_audio_dec_close (GST_AUDIO_DECODER (self)); if (!gst_amc_audio_dec_open (GST_AUDIO_DECODER (self))) { GST_ERROR_OBJECT (self, "Failed to open codec again"); return FALSE; } if (!gst_amc_audio_dec_start (GST_AUDIO_DECODER (self))) { GST_ERROR_OBJECT (self, "Failed to start codec again"); } } /* srcpad task is not running at this point */ mime = caps_to_mime (caps); if (!mime) { GST_ERROR_OBJECT (self, "Failed to convert caps to mime"); return FALSE; } s = gst_caps_get_structure (caps, 0); if (!gst_structure_get_int (s, "rate", &rate) || !gst_structure_get_int (s, "channels", &channels)) { GST_ERROR_OBJECT (self, "Failed to get rate/channels"); return FALSE; } format = gst_amc_format_new_audio (mime, rate, channels, &err); if (!format) { GST_ELEMENT_ERROR_FROM_ERROR (self, err); return FALSE; } /* FIXME: These buffers needs to be valid until the codec is stopped again */ g_list_foreach (self->codec_datas, (GFunc) gst_buffer_unref, NULL); g_list_free (self->codec_datas); self->codec_datas = NULL; if (gst_structure_has_field (s, "codec_data")) { const GValue *h = gst_structure_get_value (s, "codec_data"); GstBuffer *codec_data = gst_value_get_buffer (h); GstMapInfo minfo; guint8 *data; gst_buffer_map (codec_data, &minfo, GST_MAP_READ); data = g_memdup (minfo.data, minfo.size); self->codec_datas = g_list_prepend (self->codec_datas, data); gst_amc_format_set_buffer (format, "csd-0", data, minfo.size, &err); if (err) GST_ELEMENT_WARNING_FROM_ERROR (self, err); gst_buffer_unmap (codec_data, &minfo); } else if (gst_structure_has_field (s, "streamheader")) { const GValue *sh = gst_structure_get_value (s, "streamheader"); gint nsheaders = gst_value_array_get_size (sh); GstBuffer *buf; const GValue *h; gint i, j; gchar *fname; GstMapInfo minfo; guint8 *data; for (i = 0, j = 0; i < nsheaders; i++) { h = gst_value_array_get_value (sh, i); buf = gst_value_get_buffer (h); if (strcmp (mime, "audio/vorbis") == 0) { guint8 header_type; gst_buffer_extract (buf, 0, &header_type, 1); /* Only use the identification and setup packets */ if (header_type != 0x01 && header_type != 0x05) continue; } fname = g_strdup_printf ("csd-%d", j); gst_buffer_map (buf, &minfo, GST_MAP_READ); data = g_memdup (minfo.data, minfo.size); self->codec_datas = g_list_prepend (self->codec_datas, data); gst_amc_format_set_buffer (format, fname, data, minfo.size, &err); if (err) GST_ELEMENT_WARNING_FROM_ERROR (self, err); gst_buffer_unmap (buf, &minfo); g_free (fname); j++; } } format_string = gst_amc_format_to_string (format, &err); if (err) GST_ELEMENT_WARNING_FROM_ERROR (self, err); GST_DEBUG_OBJECT (self, "Configuring codec with format: %s", GST_STR_NULL (format_string)); g_free (format_string); if (!gst_amc_codec_configure (self->codec, format, 0, &err)) { GST_ERROR_OBJECT (self, "Failed to configure codec"); GST_ELEMENT_ERROR_FROM_ERROR (self, err); return FALSE; } gst_amc_format_free (format); if (!gst_amc_codec_start (self->codec, &err)) { GST_ERROR_OBJECT (self, "Failed to start codec"); GST_ELEMENT_ERROR_FROM_ERROR (self, err); return FALSE; } self->spf = -1; /* TODO: Implement for other codecs too */ if (gst_structure_has_name (s, "audio/mpeg")) { gint mpegversion = -1; gst_structure_get_int (s, "mpegversion", &mpegversion); if (mpegversion == 1) { gint layer = -1, mpegaudioversion = -1; gst_structure_get_int (s, "layer", &layer); gst_structure_get_int (s, "mpegaudioversion", &mpegaudioversion); if (layer == 1) self->spf = 384; else if (layer == 2) self->spf = 1152; else if (layer == 3 && mpegaudioversion != -1) self->spf = (mpegaudioversion == 1 ? 1152 : 576); } } self->started = TRUE; self->input_caps_changed = TRUE; /* Start the srcpad loop again */ self->flushing = FALSE; self->downstream_flow_ret = GST_FLOW_OK; gst_pad_start_task (GST_AUDIO_DECODER_SRC_PAD (self), (GstTaskFunction) gst_amc_audio_dec_loop, decoder, NULL); return TRUE; }
static gboolean gst_opus_dec_set_format (GstAudioDecoder * bdec, GstCaps * caps) { GstOpusDec *dec = GST_OPUS_DEC (bdec); gboolean ret = TRUE; GstStructure *s; const GValue *streamheader; GstCaps *old_caps; GST_DEBUG_OBJECT (dec, "set_format: %" GST_PTR_FORMAT, caps); if ((old_caps = gst_pad_get_current_caps (GST_AUDIO_DECODER_SINK_PAD (bdec)))) { if (gst_caps_is_equal (caps, old_caps)) { gst_caps_unref (old_caps); GST_DEBUG_OBJECT (dec, "caps didn't change"); goto done; } GST_DEBUG_OBJECT (dec, "caps have changed, resetting decoder"); gst_opus_dec_reset (dec); gst_caps_unref (old_caps); } s = gst_caps_get_structure (caps, 0); if ((streamheader = gst_structure_get_value (s, "streamheader")) && G_VALUE_HOLDS (streamheader, GST_TYPE_ARRAY) && gst_value_array_get_size (streamheader) >= 2) { const GValue *header, *vorbiscomment; GstBuffer *buf; GstFlowReturn res = GST_FLOW_OK; header = gst_value_array_get_value (streamheader, 0); if (header && G_VALUE_HOLDS (header, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (header); res = gst_opus_dec_parse_header (dec, buf); if (res != GST_FLOW_OK) { ret = FALSE; goto done; } gst_buffer_replace (&dec->streamheader, buf); } vorbiscomment = gst_value_array_get_value (streamheader, 1); if (vorbiscomment && G_VALUE_HOLDS (vorbiscomment, GST_TYPE_BUFFER)) { buf = gst_value_get_buffer (vorbiscomment); res = gst_opus_dec_parse_comments (dec, buf); if (res != GST_FLOW_OK) { ret = FALSE; goto done; } gst_buffer_replace (&dec->vorbiscomment, buf); } } else { const GstAudioChannelPosition *posn = NULL; if (!gst_codec_utils_opus_parse_caps (caps, &dec->sample_rate, &dec->n_channels, &dec->channel_mapping_family, &dec->n_streams, &dec->n_stereo_streams, dec->channel_mapping)) { ret = FALSE; goto done; } if (dec->channel_mapping_family == 1 && dec->n_channels <= 8) posn = gst_opus_channel_positions[dec->n_channels - 1]; if (!gst_opus_dec_negotiate (dec, posn)) return FALSE; } done: return ret; }
static void handle_player_info (G_GNUC_UNUSED SoupSession *session, SoupMessage *msg, SnraClient *client) { SoupBuffer *buffer; if (msg->status_code < 200 || msg->status_code >= 300) return; buffer = soup_message_body_flatten (msg->response_body); if (json_parser_load_from_data (client->json, buffer->data, buffer->length, NULL)) { const GValue *v1; GArray *player_info = NULL; gsize i; JsonNode *root = json_parser_get_root (client->json); GstStructure *s1 = snra_json_to_gst_structure (root); if (s1 == NULL) return; /* Invalid chunk */ v1 = gst_structure_get_value (s1, "player-clients"); if (!GST_VALUE_HOLDS_ARRAY (v1)) goto failed; player_info = g_array_sized_new (TRUE, TRUE, sizeof (SnraPlayerInfo), gst_value_array_get_size (v1)); for (i = 0; i < gst_value_array_get_size (v1); i++) { SnraPlayerInfo info; const GValue *v2 = gst_value_array_get_value (v1, i); const GstStructure *s2; gint64 client_id; if (!GST_VALUE_HOLDS_STRUCTURE (v2)) goto failed; s2 = gst_value_get_structure (v2); if (!snra_json_structure_get_int64 (s2, "client-id", &client_id)) goto failed; info.id = client_id; if (!snra_json_structure_get_boolean (s2, "enabled", &info.enabled)) goto failed; if (!snra_json_structure_get_double (s2, "volume", &info.volume)) goto failed; if (!(info.host = g_strdup (gst_structure_get_string (s2, "host")))) goto failed; g_array_append_val (player_info, info); } free_player_info (client->player_info); client->player_info = player_info; player_info = NULL; g_signal_emit (client, signals[SIGNAL_PLAYER_INFO_CHANGED], 0); failed: if (player_info) free_player_info (player_info); gst_structure_free (s1); } }
/* * cheese_camera_device_get_highest_framerate: * @framerate: a #GValue holding a framerate cap * @numerator: destination to store the numerator of the highest rate * @denominator: destination to store the denominator of the highest rate * * Get the numerator and denominator for the highest framerate stored in * a framerate cap. * * Note this function does not handle framerate ranges, if @framerate * contains a range it will return 0/0 as framerate */ static void cheese_camera_device_get_highest_framerate (const GValue *framerate, gint *numerator, gint *denominator) { *numerator = 0; *denominator = 0; if (GST_VALUE_HOLDS_FRACTION (framerate)) { *numerator = gst_value_get_fraction_numerator (framerate); *denominator = gst_value_get_fraction_denominator (framerate); } else if (GST_VALUE_HOLDS_ARRAY (framerate)) { float curr, highest = 0; guint i, size = gst_value_array_get_size (framerate); for (i = 0; i < size; i++) { const GValue *val = gst_value_array_get_value (framerate, i); if (!GST_VALUE_HOLDS_FRACTION (val) || gst_value_get_fraction_denominator (val) == 0) { continue; } curr = (float)gst_value_get_fraction_numerator (val) / (float)gst_value_get_fraction_denominator (val); if (curr > highest && curr <= CHEESE_MAXIMUM_RATE) { highest = curr; *numerator = gst_value_get_fraction_numerator (val); *denominator = gst_value_get_fraction_denominator (val); } } } else if (GST_VALUE_HOLDS_LIST (framerate)) { float curr, highest = 0; guint i, size = gst_value_list_get_size (framerate); for (i = 0; i < size; i++) { const GValue *val = gst_value_list_get_value(framerate, i); if (!GST_VALUE_HOLDS_FRACTION (val) || gst_value_get_fraction_denominator (val) == 0) { continue; } curr = (float)gst_value_get_fraction_numerator (val) / (float)gst_value_get_fraction_denominator (val); if (curr > highest && curr <= CHEESE_MAXIMUM_RATE) { highest = curr; *numerator = gst_value_get_fraction_numerator (val); *denominator = gst_value_get_fraction_denominator (val); } } } }
static gboolean gst_imx_audio_uniaudio_dec_set_format(GstAudioDecoder *dec, GstCaps *caps) { UniACodecParameter parameter; UniACodecMemoryOps memory_ops; GstImxAudioUniaudioDec *imx_audio_uniaudio_dec = GST_IMX_AUDIO_UNIAUDIO_DEC(dec); #define UNIA_SET_PARAMETER(PARAM_ID, DESC) \ do \ { \ if (imx_audio_uniaudio_dec->codec->set_parameter(imx_audio_uniaudio_dec->handle, (PARAM_ID), ¶meter) != ACODEC_SUCCESS) \ { \ GST_ERROR_OBJECT(dec, "setting %s parameter failed: %s", (DESC), imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle)); \ gst_imx_audio_uniaudio_dec_close_handle(imx_audio_uniaudio_dec); \ return FALSE; \ } \ } \ while (0) #define UNIA_SET_PARAMETER_EX(PARAM_ID, DESC, VALUE) \ do \ { \ if (imx_audio_uniaudio_dec->codec->set_parameter(imx_audio_uniaudio_dec->handle, (PARAM_ID), ((UniACodecParameter *)(VALUE))) != ACODEC_SUCCESS) \ { \ GST_ERROR_OBJECT(dec, "setting %s parameter failed: %s", (DESC), imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle)); \ gst_imx_audio_uniaudio_dec_close_handle(imx_audio_uniaudio_dec); \ return FALSE; \ } \ } \ while (0) if (imx_audio_uniaudio_dec->handle != NULL) { /* drain old decoder handle */ gst_imx_audio_uniaudio_dec_handle_frame(dec, NULL); gst_imx_audio_uniaudio_dec_close_handle(imx_audio_uniaudio_dec); } if ((imx_audio_uniaudio_dec->codec = gst_imx_audio_uniaudio_codec_table_get_codec(caps)) == NULL) { GST_ERROR_OBJECT(dec, "found no suitable codec for caps %" GST_PTR_FORMAT, (gpointer)caps); return FALSE; } memory_ops.Calloc = gst_imx_audio_uniaudio_dec_calloc; memory_ops.Malloc = gst_imx_audio_uniaudio_dec_malloc; memory_ops.Free = gst_imx_audio_uniaudio_dec_free; memory_ops.ReAlloc = gst_imx_audio_uniaudio_dec_realloc; if ((imx_audio_uniaudio_dec->handle = imx_audio_uniaudio_dec->codec->create_codec(&memory_ops)) == NULL) { GST_ERROR_OBJECT(dec, "creating codec handle for caps %" GST_PTR_FORMAT " failed", (gpointer)caps); return FALSE; } /* Get configuration parameters from caps */ { int samplerate, channels, bitrate, block_align, wmaversion; gchar const *stream_format, *sample_format; GValue const *value; gboolean framed, is_vorbis; GstBuffer *codec_data = NULL; GstStructure *structure = gst_caps_get_structure(caps, 0); imx_audio_uniaudio_dec->skip_header_counter = 0; is_vorbis = (g_strcmp0(gst_structure_get_name(structure), "audio/x-vorbis") == 0); parameter.framed = is_vorbis || (gst_structure_get_boolean(structure, "framed", &framed) && framed) || (gst_structure_get_boolean(structure, "parsed", &framed) && framed); GST_DEBUG_OBJECT(dec, "input is framed: %d", parameter.framed); UNIA_SET_PARAMETER(UNIA_FRAMED, "framed"); if (gst_structure_get_int(structure, "rate", &samplerate)) { GST_DEBUG_OBJECT(dec, "input caps sample rate: %d Hz", samplerate); parameter.samplerate = samplerate; UNIA_SET_PARAMETER(UNIA_SAMPLERATE, "sample rate"); } if (gst_structure_get_int(structure, "channels", &channels)) { CHAN_TABLE table; GST_DEBUG_OBJECT(dec, "input caps channel count: %d", channels); parameter.channels = channels; UNIA_SET_PARAMETER(UNIA_CHANNEL, "channel"); memset(&table, 0, sizeof(table)); table.size = CHANNEL_MAPS_SIZE; memcpy(&table.channel_table, uniaudio_channel_maps, sizeof(uniaudio_channel_maps)); UNIA_SET_PARAMETER_EX(UNIA_CHAN_MAP_TABLE, "channel map", &table); } if (gst_structure_get_int(structure, "bitrate", &bitrate)) { GST_DEBUG_OBJECT(dec, "input caps channel count: %d", bitrate); parameter.bitrate = bitrate; UNIA_SET_PARAMETER(UNIA_BITRATE, "bitrate"); } if (gst_structure_get_int(structure, "block_align", &block_align)) { GST_DEBUG_OBJECT(dec, "block alignment: %d", block_align); parameter.blockalign = block_align; UNIA_SET_PARAMETER(UNIA_WMA_BlOCKALIGN, "blockalign"); } if (gst_structure_get_int(structure, "wmaversion", &wmaversion)) { GST_DEBUG_OBJECT(dec, "WMA version: %d", wmaversion); parameter.version = wmaversion; UNIA_SET_PARAMETER(UNIA_WMA_VERSION, "wmaversion"); } if ((stream_format = gst_structure_get_string(structure, "stream-format")) != NULL) { GST_DEBUG_OBJECT(dec, "input caps stream format: %s", stream_format); if (g_strcmp0(stream_format, "raw") == 0) parameter.stream_type = STREAM_ADTS; if (g_strcmp0(stream_format, "adif") == 0) parameter.stream_type = STREAM_ADIF; if (g_strcmp0(stream_format, "raw") == 0) parameter.stream_type = STREAM_RAW; else parameter.stream_type = STREAM_UNKNOW; UNIA_SET_PARAMETER(UNIA_STREAM_TYPE, "stream type"); } if ((sample_format = gst_structure_get_string(structure, "format")) != NULL) { GstAudioFormat fmt; GstAudioFormatInfo const * fmtinfo; GST_DEBUG_OBJECT(dec, "input caps stream sample format: %s", sample_format); if ((fmt = gst_audio_format_from_string(sample_format)) == GST_AUDIO_FORMAT_UNKNOWN) { GST_ERROR_OBJECT(dec, "format is unknown, cannot continue"); return FALSE; } fmtinfo = gst_audio_format_get_info(fmt); g_assert(fmtinfo != NULL); parameter.depth = GST_AUDIO_FORMAT_INFO_DEPTH(fmtinfo); UNIA_SET_PARAMETER(UNIA_DEPTH, "depth"); } /* Handle codec data, either directly from a codec_data caps, * or assemble it from a list of buffers specified by the * streamheader caps (typically used by Vorbis audio) */ /* Cleanup old codec data first */ if (imx_audio_uniaudio_dec->codec_data != NULL) { gst_buffer_unref(imx_audio_uniaudio_dec->codec_data); imx_audio_uniaudio_dec->codec_data = NULL; } /* Check if either codec_data or streamheader caps exist */ if ((value = gst_structure_get_value(structure, "codec_data")) != NULL) { /* codec_data caps exist - simply make a copy of its buffer * (this makes sure we own that buffer properly) */ GstBuffer *caps_buffer; GST_DEBUG_OBJECT(dec, "reading codec_data value"); caps_buffer = gst_value_get_buffer(value); g_assert(caps_buffer != NULL); codec_data = gst_buffer_copy(caps_buffer); } else if ((value = gst_structure_get_value(structure, "streamheader")) != NULL) { /* streamheader caps exist, which are a list of buffers * these buffers need to be concatenated and then given as * one consecutive codec data buffer to the decoder */ guint i, num_buffers = gst_value_array_get_size(value); GstAdapter *streamheader_adapter = gst_adapter_new(); GST_DEBUG_OBJECT(dec, "reading streamheader value (%u headers)", num_buffers); imx_audio_uniaudio_dec->num_vorbis_headers = num_buffers; /* Use the GstAdapter to stitch these buffers together */ for (i = 0; i < num_buffers; ++i) { GValue const *array_value = gst_value_array_get_value(value, i); GstBuffer *buf = gst_value_get_buffer(array_value); GST_DEBUG_OBJECT(dec, "add streamheader buffer #%u with %" G_GSIZE_FORMAT " byte", i, gst_buffer_get_size(buf)); gst_adapter_push(streamheader_adapter, gst_buffer_copy(buf)); } codec_data = gst_adapter_take_buffer(streamheader_adapter, gst_adapter_available(streamheader_adapter)); g_object_unref(G_OBJECT(streamheader_adapter)); } /* At this point, if either codec_data or streamheader caps were found, * the codec_data pointer will refer to a valid non-empty buffer with * codec data inside. This buffer is owned by this audio decoder object, * and must be kept around for as long as the decoder needs to be ran, * since the set_parameter call below does *not* copy the codec data * bytes into some internal buffer. Instead, the uniaudio decoder plugin * expects the caller to keep the buffer valid. */ if ((codec_data != NULL) && (gst_buffer_get_size(codec_data) != 0)) { GstMapInfo map; gst_buffer_map(codec_data, &map, GST_MAP_READ); parameter.codecData.size = map.size; parameter.codecData.buf = (char *)(map.data); UNIA_SET_PARAMETER(UNIA_CODEC_DATA, "codec data"); gst_buffer_unmap(codec_data, &map); imx_audio_uniaudio_dec->codec_data = codec_data; GST_DEBUG_OBJECT(dec, "codec data: %" G_GUINT32_FORMAT " byte", (guint32)(parameter.codecData.size)); } } GST_DEBUG_OBJECT(dec, "decoder configured"); imx_audio_uniaudio_dec->has_audioinfo_set = FALSE; #undef UNIA_SET_PARAMETER return TRUE; }