/* this function does the actual processing */ static GstFlowReturn gst_freeverb_transform (GstBaseTransform * base, GstBuffer * inbuf, GstBuffer * outbuf) { GstFreeverb *filter = GST_FREEVERB (base); guint num_samples; GstClockTime timestamp; GstMapInfo inmap, outmap; timestamp = GST_BUFFER_TIMESTAMP (inbuf); timestamp = gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp); gst_buffer_map (inbuf, &inmap, GST_MAP_READ); gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE); num_samples = outmap.size / (2 * GST_AUDIO_INFO_BPS (&filter->info)); GST_DEBUG_OBJECT (filter, "processing %u samples at %" GST_TIME_FORMAT, num_samples, GST_TIME_ARGS (timestamp)); if (GST_CLOCK_TIME_IS_VALID (timestamp)) gst_object_sync_values (GST_OBJECT (filter), timestamp); if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DISCONT))) { filter->drained = FALSE; } if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_GAP))) { if (filter->drained) { memset (outmap.data, 0, outmap.size); } } else { filter->drained = FALSE; } if (!filter->drained) { filter->drained = filter->process (filter, inmap.data, outmap.data, num_samples); } if (filter->drained) { GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP); } gst_buffer_unmap (inbuf, &inmap); gst_buffer_unmap (outbuf, &outmap); return GST_FLOW_OK; }
/* this function does the actual processing */ static GstFlowReturn gst_audio_panorama_transform (GstBaseTransform * base, GstBuffer * inbuf, GstBuffer * outbuf) { GstAudioPanorama *filter = GST_AUDIO_PANORAMA (base); GstClockTime timestamp, stream_time; GstMapInfo inmap, outmap; timestamp = GST_BUFFER_TIMESTAMP (inbuf); stream_time = gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp); GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); if (GST_CLOCK_TIME_IS_VALID (stream_time)) gst_object_sync_values (GST_OBJECT (filter), stream_time); gst_buffer_map (inbuf, &inmap, GST_MAP_READ); gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE); if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_GAP))) { GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP); memset (outmap.data, 0, outmap.size); } else { /* output always stereo, input mono or stereo, * and info describes input format */ guint num_samples = outmap.size / (2 * GST_AUDIO_INFO_BPS (&filter->info)); filter->process (filter, inmap.data, outmap.data, num_samples); } gst_buffer_unmap (inbuf, &inmap); gst_buffer_unmap (outbuf, &outmap); return GST_FLOW_OK; }
static void gst_openal_sink_parse_spec (GstOpenALSink * sink, const GstAudioRingBufferSpec * spec) { ALuint format = AL_NONE; GST_DEBUG_OBJECT (sink, "looking up format for type %d, gst-format %d, and %d channels", spec->type, GST_AUDIO_INFO_FORMAT (&spec->info), GST_AUDIO_INFO_CHANNELS (&spec->info)); /* Don't need to verify supported formats, since the probed caps will only * report what was detected and we shouldn't get anything different */ switch (spec->type) { case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW: switch (GST_AUDIO_INFO_FORMAT (&spec->info)) { case GST_AUDIO_FORMAT_U8: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO8; break; case 2: format = AL_FORMAT_STEREO8; break; case 4: format = AL_FORMAT_QUAD8; break; case 6: format = AL_FORMAT_51CHN8; break; case 7: format = AL_FORMAT_61CHN8; break; case 8: format = AL_FORMAT_71CHN8; break; default: break; } break; case GST_AUDIO_FORMAT_S16: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO16; break; case 2: format = AL_FORMAT_STEREO16; break; case 4: format = AL_FORMAT_QUAD16; break; case 6: format = AL_FORMAT_51CHN16; break; case 7: format = AL_FORMAT_61CHN16; break; case 8: format = AL_FORMAT_71CHN16; break; default: break; } break; case GST_AUDIO_FORMAT_F32: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_FLOAT32; break; case 2: format = AL_FORMAT_STEREO_FLOAT32; break; case 4: format = AL_FORMAT_QUAD32; break; case 6: format = AL_FORMAT_51CHN32; break; case 7: format = AL_FORMAT_61CHN32; break; case 8: format = AL_FORMAT_71CHN32; break; default: break; } break; case GST_AUDIO_FORMAT_F64: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_DOUBLE_EXT; break; case 2: format = AL_FORMAT_STEREO_DOUBLE_EXT; break; default: break; } break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_IMA_ADPCM: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_IMA4; break; case 2: format = AL_FORMAT_STEREO_IMA4; break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_A_LAW: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_ALAW_EXT; break; case 2: format = AL_FORMAT_STEREO_ALAW_EXT; break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_MU_LAW: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_MULAW; break; case 2: format = AL_FORMAT_STEREO_MULAW; break; case 4: format = AL_FORMAT_QUAD_MULAW; break; case 6: format = AL_FORMAT_51CHN_MULAW; break; case 7: format = AL_FORMAT_61CHN_MULAW; break; case 8: format = AL_FORMAT_71CHN_MULAW; break; default: break; } break; default: break; } sink->bytes_per_sample = GST_AUDIO_INFO_BPS (&spec->info); sink->rate = GST_AUDIO_INFO_RATE (&spec->info); sink->channels = GST_AUDIO_INFO_CHANNELS (&spec->info); sink->format = format; sink->buffer_count = spec->segtotal; sink->buffer_length = spec->segsize; }
static GstFlowReturn gst_fdkaacenc_handle_frame (GstAudioEncoder * enc, GstBuffer * inbuf) { GstFdkAacEnc *self = GST_FDKAACENC (enc); GstFlowReturn ret = GST_FLOW_OK; GstAudioInfo *info; GstMapInfo imap, omap; GstBuffer *outbuf; AACENC_BufDesc in_desc = { 0 }; AACENC_BufDesc out_desc = { 0 }; AACENC_InArgs in_args = { 0 }; AACENC_OutArgs out_args = { 0 }; gint in_id = IN_AUDIO_DATA, out_id = OUT_BITSTREAM_DATA; gint in_sizes, out_sizes; gint in_el_sizes, out_el_sizes; AACENC_ERROR err; info = gst_audio_encoder_get_audio_info (enc); if (inbuf) { if (self->need_reorder) { inbuf = gst_buffer_copy (inbuf); gst_buffer_map (inbuf, &imap, GST_MAP_READWRITE); gst_audio_reorder_channels (imap.data, imap.size, GST_AUDIO_INFO_FORMAT (info), GST_AUDIO_INFO_CHANNELS (info), &GST_AUDIO_INFO_POSITION (info, 0), self->aac_positions); } else { gst_buffer_map (inbuf, &imap, GST_MAP_READ); } in_args.numInSamples = imap.size / GST_AUDIO_INFO_BPS (info); in_sizes = imap.size; in_el_sizes = GST_AUDIO_INFO_BPS (info); in_desc.numBufs = 1; } else { in_args.numInSamples = -1; in_sizes = 0; in_el_sizes = 0; in_desc.numBufs = 0; } in_desc.bufferIdentifiers = &in_id; in_desc.bufs = (void *) &imap.data; in_desc.bufSizes = &in_sizes; in_desc.bufElSizes = &in_el_sizes; outbuf = gst_audio_encoder_allocate_output_buffer (enc, self->outbuf_size); if (!outbuf) { ret = GST_FLOW_ERROR; goto out; } gst_buffer_map (outbuf, &omap, GST_MAP_WRITE); out_sizes = omap.size; out_el_sizes = 1; out_desc.bufferIdentifiers = &out_id; out_desc.numBufs = 1; out_desc.bufs = (void *) &omap.data; out_desc.bufSizes = &out_sizes; out_desc.bufElSizes = &out_el_sizes; err = aacEncEncode (self->enc, &in_desc, &out_desc, &in_args, &out_args); if (err == AACENC_ENCODE_EOF && !inbuf) goto out; else if (err != AACENC_OK) { GST_ERROR_OBJECT (self, "Failed to encode data: %d", err); ret = GST_FLOW_ERROR; goto out; } if (inbuf) { gst_buffer_unmap (inbuf, &imap); if (self->need_reorder) gst_buffer_unref (inbuf); inbuf = NULL; } if (!out_args.numOutBytes) goto out; gst_buffer_unmap (outbuf, &omap); gst_buffer_set_size (outbuf, out_args.numOutBytes); ret = gst_audio_encoder_finish_frame (enc, outbuf, self->samples_per_frame); outbuf = NULL; out: if (outbuf) { gst_buffer_unmap (outbuf, &omap); gst_buffer_unref (outbuf); } if (inbuf) { gst_buffer_unmap (inbuf, &imap); if (self->need_reorder) gst_buffer_unref (inbuf); } return ret; }
static void gst_openal_src_parse_spec (GstOpenalSrc * openalsrc, const GstAudioRingBufferSpec * spec) { ALuint format = AL_NONE; GST_DEBUG_OBJECT (openalsrc, "looking up format for type %d, gst-format %d, and %d channels", spec->type, GST_AUDIO_INFO_FORMAT (&spec->info), GST_AUDIO_INFO_CHANNELS (&spec->info)); switch (spec->type) { case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW: switch (GST_AUDIO_INFO_FORMAT (&spec->info)) { case GST_AUDIO_FORMAT_U8: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO8; break; default: break; } break; case GST_AUDIO_FORMAT_U16: case GST_AUDIO_FORMAT_S16: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO16; break; default: break; } break; case GST_AUDIO_FORMAT_F32: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_FLOAT32; break; default: break; } break; case GST_AUDIO_FORMAT_F64: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_DOUBLE_EXT; break; default: break; } break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_IMA_ADPCM: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_IMA4; break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_A_LAW: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_ALAW_EXT; break; default: break; } break; case GST_AUDIO_RING_BUFFER_FORMAT_TYPE_MU_LAW: switch (GST_AUDIO_INFO_CHANNELS (&spec->info)) { case 1: format = AL_FORMAT_MONO_MULAW; break; default: break; } break; default: break; } openalsrc->bytes_per_sample = GST_AUDIO_INFO_BPS (&spec->info); openalsrc->rate = GST_AUDIO_INFO_RATE (&spec->info); openalsrc->buffer_length = spec->segsize; openalsrc->format = format; }
static GstFlowReturn gst_level_transform_ip (GstBaseTransform * trans, GstBuffer * in) { GstLevel *filter; GstMapInfo map; guint8 *in_data; gsize in_size; gdouble CS; guint i; guint num_frames; guint num_int_samples = 0; /* number of interleaved samples * ie. total count for all channels combined */ guint block_size, block_int_size; /* we subdivide buffers to not skip message * intervals */ GstClockTimeDiff falloff_time; gint channels, rate, bps; filter = GST_LEVEL (trans); channels = GST_AUDIO_INFO_CHANNELS (&filter->info); bps = GST_AUDIO_INFO_BPS (&filter->info); rate = GST_AUDIO_INFO_RATE (&filter->info); gst_buffer_map (in, &map, GST_MAP_READ); in_data = map.data; in_size = map.size; num_int_samples = in_size / bps; GST_LOG_OBJECT (filter, "analyzing %u sample frames at ts %" GST_TIME_FORMAT, num_int_samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (in))); g_return_val_if_fail (num_int_samples % channels == 0, GST_FLOW_ERROR); if (GST_BUFFER_FLAG_IS_SET (in, GST_BUFFER_FLAG_DISCONT)) { filter->message_ts = GST_BUFFER_TIMESTAMP (in); filter->num_frames = 0; } if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (filter->message_ts))) { filter->message_ts = GST_BUFFER_TIMESTAMP (in); } num_frames = num_int_samples / channels; while (num_frames > 0) { block_size = filter->interval_frames - filter->num_frames; block_size = MIN (block_size, num_frames); block_int_size = block_size * channels; for (i = 0; i < channels; ++i) { if (!GST_BUFFER_FLAG_IS_SET (in, GST_BUFFER_FLAG_GAP)) { filter->process (in_data + (bps * i), block_int_size, channels, &CS, &filter->peak[i]); GST_LOG_OBJECT (filter, "[%d]: cumulative squares %lf, over %d samples/%d channels", i, CS, block_int_size, channels); filter->CS[i] += CS; } else { filter->peak[i] = 0.0; } filter->decay_peak_age[i] += GST_FRAMES_TO_CLOCK_TIME (num_frames, rate); GST_LOG_OBJECT (filter, "[%d]: peak %f, last peak %f, decay peak %f, age %" GST_TIME_FORMAT, i, filter->peak[i], filter->last_peak[i], filter->decay_peak[i], GST_TIME_ARGS (filter->decay_peak_age[i])); /* update running peak */ if (filter->peak[i] > filter->last_peak[i]) filter->last_peak[i] = filter->peak[i]; /* make decay peak fall off if too old */ falloff_time = GST_CLOCK_DIFF (gst_gdouble_to_guint64 (filter->decay_peak_ttl), filter->decay_peak_age[i]); if (falloff_time > 0) { gdouble falloff_dB; gdouble falloff; gdouble length; /* length of falloff time in seconds */ length = (gdouble) falloff_time / (gdouble) GST_SECOND; falloff_dB = filter->decay_peak_falloff * length; falloff = pow (10, falloff_dB / -20.0); GST_LOG_OBJECT (filter, "falloff: current %f, base %f, interval %" GST_TIME_FORMAT ", dB falloff %f, factor %e", filter->decay_peak[i], filter->decay_peak_base[i], GST_TIME_ARGS (falloff_time), falloff_dB, falloff); filter->decay_peak[i] = filter->decay_peak_base[i] * falloff; GST_LOG_OBJECT (filter, "peak is %" GST_TIME_FORMAT " old, decayed with factor %e to %f", GST_TIME_ARGS (filter->decay_peak_age[i]), falloff, filter->decay_peak[i]); } else { GST_LOG_OBJECT (filter, "peak not old enough, not decaying"); } /* if the peak of this run is higher, the decay peak gets reset */ if (filter->peak[i] >= filter->decay_peak[i]) { GST_LOG_OBJECT (filter, "new peak, %f", filter->peak[i]); filter->decay_peak[i] = filter->peak[i]; filter->decay_peak_base[i] = filter->peak[i]; filter->decay_peak_age[i] = G_GINT64_CONSTANT (0); } } in_data += block_size * bps * channels; filter->num_frames += block_size; num_frames -= block_size; /* do we need to message ? */ if (filter->num_frames >= filter->interval_frames) { gst_level_post_message (filter); } } gst_buffer_unmap (in, &map); return GST_FLOW_OK; }
static GstMessage * update_rms_from_buffer (GstVideoFrameAudioLevel * self, GstBuffer * inbuf) { GstMapInfo map; guint8 *in_data; gsize in_size; gdouble CS; guint i; guint num_frames, frames; guint num_int_samples = 0; /* number of interleaved samples * ie. total count for all channels combined */ gint channels, rate, bps; GValue v = G_VALUE_INIT; GValue va = G_VALUE_INIT; GValueArray *a; GstStructure *s; GstMessage *msg; GstClockTime duration, running_time; channels = GST_AUDIO_INFO_CHANNELS (&self->ainfo); bps = GST_AUDIO_INFO_BPS (&self->ainfo); rate = GST_AUDIO_INFO_RATE (&self->ainfo); gst_buffer_map (inbuf, &map, GST_MAP_READ); in_data = map.data; in_size = map.size; num_int_samples = in_size / bps; GST_LOG_OBJECT (self, "analyzing %u sample frames at ts %" GST_TIME_FORMAT, num_int_samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf))); g_return_val_if_fail (num_int_samples % channels == 0, NULL); num_frames = num_int_samples / channels; frames = num_frames; duration = GST_FRAMES_TO_CLOCK_TIME (frames, rate); if (num_frames > 0) { for (i = 0; i < channels; ++i) { self->process (in_data + (bps * i), num_int_samples, channels, &CS); GST_LOG_OBJECT (self, "[%d]: cumulative squares %lf, over %d samples/%d channels", i, CS, num_int_samples, channels); self->CS[i] += CS; } in_data += num_frames * bps; self->total_frames += num_frames; } running_time = self->first_time + gst_util_uint64_scale (self->total_frames, GST_SECOND, rate); a = g_value_array_new (channels); s = gst_structure_new ("videoframe-audiolevel", "running-time", G_TYPE_UINT64, running_time, "duration", G_TYPE_UINT64, duration, NULL); g_value_init (&v, G_TYPE_DOUBLE); g_value_init (&va, G_TYPE_VALUE_ARRAY); for (i = 0; i < channels; i++) { gdouble rms; if (frames == 0 || self->CS[i] == 0) { rms = 0; /* empty buffer */ } else { rms = sqrt (self->CS[i] / frames); } self->CS[i] = 0.0; g_value_set_double (&v, rms); g_value_array_append (a, &v); } g_value_take_boxed (&va, a); gst_structure_take_value (s, "rms", &va); msg = gst_message_new_element (GST_OBJECT (self), s); gst_buffer_unmap (inbuf, &map); return msg; }