static GstFlowReturn gst_aubio_tempo_transform_ip (GstBaseTransform * trans, GstBuffer * buf) { uint j; GstAubioTempo *filter = GST_AUBIOTEMPO(trans); GstAudioFilter *audiofilter = GST_AUDIO_FILTER(trans); gint nsamples = GST_BUFFER_SIZE (buf) / (4 * audiofilter->format.channels); /* block loop */ for (j = 0; j < nsamples; j++) { /* copy input to ibuf */ fvec_write_sample(filter->ibuf, ((smpl_t *) GST_BUFFER_DATA(buf))[j], filter->pos); if (filter->pos == filter->hop_size - 1) { aubio_tempo_do(filter->t, filter->ibuf, filter->out); if (filter->out->data[0]> 0.) { gdouble now = GST_BUFFER_OFFSET (buf); // correction of inside buffer time now += (smpl_t)(j - filter->hop_size + 1); // correction of float period now += (filter->out->data[0] - 1.)*(smpl_t)filter->hop_size; if (filter->last_beat != -1 && now > filter->last_beat) { filter->bpm = 60./(GST_FRAMES_TO_CLOCK_TIME(now - filter->last_beat, audiofilter->format.rate))*1.e+9; } else { filter->bpm = 0.; } if (filter->silent == FALSE) { g_print ("beat: %f ", GST_FRAMES_TO_CLOCK_TIME( now, audiofilter->format.rate)*1.e-9); g_print ("| bpm: %f\n", filter->bpm); } GST_LOG_OBJECT (filter, "beat %" GST_TIME_FORMAT ", bpm %3.2f", GST_TIME_ARGS(now), filter->bpm); if (filter->message) { GstMessage *m = gst_aubio_tempo_message_new (filter, now); gst_element_post_message (GST_ELEMENT (filter), m); } filter->last_beat = now; } filter->pos = -1; /* so it will be zero next j loop */ } filter->pos++; } return GST_FLOW_OK; }
static GstFlowReturn gst_ofa_transform_ip (GstBaseTransform * trans, GstBuffer * buf) { GstOFA *ofa = GST_OFA (trans); GstAudioFilter *ofa_filter = GST_AUDIO_FILTER (ofa); guint64 nframes; GstClockTime duration; gint rate = ofa_filter->format.rate; gint channels = ofa_filter->format.channels; g_return_val_if_fail (rate > 0 && channels > 0, GST_FLOW_NOT_NEGOTIATED); if (!ofa->record) return GST_FLOW_OK; gst_adapter_push (ofa->adapter, gst_buffer_copy (buf)); nframes = gst_adapter_available (ofa->adapter) / (channels * 2); duration = GST_FRAMES_TO_CLOCK_TIME (nframes, rate); if (duration >= 135 * GST_SECOND && ofa->fingerprint == NULL) create_fingerprint (ofa); return GST_FLOW_OK; }
/* this tests that the output is a perfect stream if the input is */ static void test_perfect_stream_instance (int inrate, int outrate, int samples, int numbuffers) { GstElement *audioresample; GstBuffer *inbuffer, *outbuffer; GstCaps *caps; guint64 offset = 0; int i, j; GstMapInfo map; gint16 *p; audioresample = setup_audioresample (2, 0x3, inrate, outrate, GST_AUDIO_NE (S16)); caps = gst_pad_get_current_caps (mysrcpad); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); for (j = 1; j <= numbuffers; ++j) { inbuffer = gst_buffer_new_and_alloc (samples * 4); GST_BUFFER_DURATION (inbuffer) = GST_FRAMES_TO_CLOCK_TIME (samples, inrate); GST_BUFFER_TIMESTAMP (inbuffer) = GST_BUFFER_DURATION (inbuffer) * (j - 1); GST_BUFFER_OFFSET (inbuffer) = offset; offset += samples; GST_BUFFER_OFFSET_END (inbuffer) = offset; gst_buffer_map (inbuffer, &map, GST_MAP_WRITE); p = (gint16 *) map.data; /* create a 16 bit signed ramp */ for (i = 0; i < samples; ++i) { *p = -32767 + i * (65535 / samples); ++p; *p = -32767 + i * (65535 / samples); ++p; } gst_buffer_unmap (inbuffer, &map); /* pushing gives away my reference ... */ fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK); /* ... but it ends up being collected on the global buffer list */ fail_unless_equals_int (g_list_length (buffers), j); } /* FIXME: we should make audioresample handle eos by flushing out the last * samples, which will give us one more, small, buffer */ fail_if ((outbuffer = (GstBuffer *) buffers->data) == NULL); ASSERT_BUFFER_REFCOUNT (outbuffer, "outbuffer", 1); fail_unless_perfect_stream (); /* cleanup */ gst_caps_unref (caps); cleanup_audioresample (audioresample); }
static void run_fft_pipeline (int inrate, int outrate, int quality, int width, const gchar * format, void (*init) (GstBuffer *), void (*compare_ffts) (GstBuffer *, GstBuffer *)) { GstElement *audioresample; GstBuffer *inbuffer, *outbuffer; GstCaps *caps; const int nsamples = 2048; audioresample = setup_audioresample (1, 0, inrate, outrate, format); fail_unless (audioresample != NULL); g_object_set (audioresample, "quality", quality, NULL); caps = gst_pad_get_current_caps (mysrcpad); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); inbuffer = gst_buffer_new_and_alloc (nsamples * width / 8); GST_BUFFER_DURATION (inbuffer) = GST_FRAMES_TO_CLOCK_TIME (nsamples, inrate); GST_BUFFER_TIMESTAMP (inbuffer) = 0; gst_pad_set_caps (mysrcpad, caps); (*init) (inbuffer); gst_buffer_ref (inbuffer); /* pushing gives away my reference ... */ fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK); /* ... but it ends up being collected on the global buffer list */ fail_unless_equals_int (g_list_length (buffers), 1); /* retrieve out buffer */ fail_if ((outbuffer = (GstBuffer *) buffers->data) == NULL); fail_unless (gst_element_set_state (audioresample, GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS, "could not set to null"); if (inbuffer == outbuffer) gst_buffer_unref (inbuffer); (*compare_ffts) (inbuffer, outbuffer); /* cleanup */ gst_caps_unref (caps); cleanup_audioresample (audioresample); }
static void gst_level_recalc_interval_frames (GstLevel * level) { GstClockTime interval = level->interval; guint sample_rate = GST_AUDIO_INFO_RATE (&level->info); guint interval_frames; interval_frames = GST_CLOCK_TIME_TO_FRAMES (interval, sample_rate); if (interval_frames == 0) { GST_WARNING_OBJECT (level, "interval %" GST_TIME_FORMAT " is too small, " "should be at least %" GST_TIME_FORMAT " for sample rate %u", GST_TIME_ARGS (interval), GST_TIME_ARGS (GST_FRAMES_TO_CLOCK_TIME (1, sample_rate)), sample_rate); interval_frames = 1; } level->interval_frames = interval_frames; GST_INFO_OBJECT (level, "interval_frames now %u for interval " "%" GST_TIME_FORMAT " and sample rate %u", interval_frames, GST_TIME_ARGS (interval), sample_rate); }
static GstFlowReturn gst_aubio_pitch_transform_ip (GstBaseTransform * trans, GstBuffer * buf) { uint j; GstAubioPitch *filter = GST_AUBIO_PITCH (trans); GstAudioFilter *audiofilter = GST_AUDIO_FILTER(trans); gint nsamples = GST_BUFFER_SIZE (buf) / (4 * audiofilter->format.channels); /* block loop */ for (j = 0; j < nsamples; j++) { /* copy input to ibuf */ fvec_write_sample(filter->ibuf, ((smpl_t *) GST_BUFFER_DATA(buf))[j], filter->pos); if (filter->pos == filter->hop_size - 1) { aubio_pitch_do(filter->t, filter->ibuf, filter->obuf); smpl_t pitch = filter->obuf->data[0]; GstClockTime now = GST_BUFFER_TIMESTAMP (buf); // correction of inside buffer time now += GST_FRAMES_TO_CLOCK_TIME(j, audiofilter->format.rate); if (filter->silent == FALSE) { g_print ("%" GST_TIME_FORMAT "\tpitch: %.3f\n", GST_TIME_ARGS(now), pitch); } GST_LOG_OBJECT (filter, "pitch %" GST_TIME_FORMAT ", freq %3.2f", GST_TIME_ARGS(now), pitch); filter->pos = -1; /* so it will be zero next j loop */ } filter->pos++; } return GST_FLOW_OK; }
/** * gst_audio_info_convert: * @info: a #GstAudioInfo * @src_fmt: #GstFormat of the @src_val * @src_val: value to convert * @dest_fmt: #GstFormat of the @dest_val * @dest_val: pointer to destination value * * Converts among various #GstFormat types. This function handles * GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT. For * raw audio, GST_FORMAT_DEFAULT corresponds to audio frames. This * function can be used to handle pad queries of the type GST_QUERY_CONVERT. * * Returns: TRUE if the conversion was successful. */ gboolean gst_audio_info_convert (const GstAudioInfo * info, GstFormat src_fmt, gint64 src_val, GstFormat dest_fmt, gint64 * dest_val) { gboolean res = TRUE; gint bpf, rate; GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s (%d) to %s (%d)", src_val, gst_format_get_name (src_fmt), src_fmt, gst_format_get_name (dest_fmt), dest_fmt); if (src_fmt == dest_fmt || src_val == -1) { *dest_val = src_val; goto done; } /* get important info */ bpf = GST_AUDIO_INFO_BPF (info); rate = GST_AUDIO_INFO_RATE (info); if (bpf == 0 || rate == 0) { GST_DEBUG ("no rate or bpf configured"); res = FALSE; goto done; } switch (src_fmt) { case GST_FORMAT_BYTES: switch (dest_fmt) { case GST_FORMAT_TIME: *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val / bpf, rate); break; case GST_FORMAT_DEFAULT: *dest_val = src_val / bpf; break; default: res = FALSE; break; } break; case GST_FORMAT_DEFAULT: switch (dest_fmt) { case GST_FORMAT_TIME: *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val, rate); break; case GST_FORMAT_BYTES: *dest_val = src_val * bpf; break; default: res = FALSE; break; } break; case GST_FORMAT_TIME: switch (dest_fmt) { case GST_FORMAT_DEFAULT: *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate); break; case GST_FORMAT_BYTES: *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate); *dest_val *= bpf; break; default: res = FALSE; break; } break; default: res = FALSE; break; } done: GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, res, res ? *dest_val : -1); return res; }
static void gst_level_post_message (GstLevel * filter) { guint i; gint channels, rate, frames = filter->num_frames; GstClockTime duration; channels = GST_AUDIO_INFO_CHANNELS (&filter->info); rate = GST_AUDIO_INFO_RATE (&filter->info); duration = GST_FRAMES_TO_CLOCK_TIME (frames, rate); if (filter->post_messages) { GstMessage *m = gst_level_message_new (filter, filter->message_ts, duration); GST_LOG_OBJECT (filter, "message: ts %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT ", num_frames %d", GST_TIME_ARGS (filter->message_ts), GST_TIME_ARGS (duration), frames); for (i = 0; i < channels; ++i) { gdouble RMS; gdouble RMSdB, peakdB, decaydB; RMS = sqrt (filter->CS[i] / frames); GST_LOG_OBJECT (filter, "message: channel %d, CS %f, RMS %f", i, filter->CS[i], RMS); GST_LOG_OBJECT (filter, "message: last_peak: %f, decay_peak: %f", filter->last_peak[i], filter->decay_peak[i]); /* RMS values are calculated in amplitude, so 20 * log 10 */ RMSdB = 20 * log10 (RMS + EPSILON); /* peak values are square sums, ie. power, so 10 * log 10 */ peakdB = 10 * log10 (filter->last_peak[i] + EPSILON); decaydB = 10 * log10 (filter->decay_peak[i] + EPSILON); if (filter->decay_peak[i] < filter->last_peak[i]) { /* this can happen in certain cases, for example when * the last peak is between decay_peak and decay_peak_base */ GST_DEBUG_OBJECT (filter, "message: decay peak dB %f smaller than last peak dB %f, copying", decaydB, peakdB); filter->decay_peak[i] = filter->last_peak[i]; } GST_LOG_OBJECT (filter, "message: RMS %f dB, peak %f dB, decay %f dB", RMSdB, peakdB, decaydB); gst_level_message_append_channel (m, RMSdB, peakdB, decaydB); /* reset cumulative and normal peak */ filter->CS[i] = 0.0; filter->last_peak[i] = 0.0; } gst_element_post_message (GST_ELEMENT (filter), m); } filter->num_frames -= frames; filter->message_ts += duration; }
static GstFlowReturn gst_level_transform_ip (GstBaseTransform * trans, GstBuffer * in) { GstLevel *filter; GstMapInfo map; guint8 *in_data; gsize in_size; gdouble CS; guint i; guint num_frames; guint num_int_samples = 0; /* number of interleaved samples * ie. total count for all channels combined */ guint block_size, block_int_size; /* we subdivide buffers to not skip message * intervals */ GstClockTimeDiff falloff_time; gint channels, rate, bps; filter = GST_LEVEL (trans); channels = GST_AUDIO_INFO_CHANNELS (&filter->info); bps = GST_AUDIO_INFO_BPS (&filter->info); rate = GST_AUDIO_INFO_RATE (&filter->info); gst_buffer_map (in, &map, GST_MAP_READ); in_data = map.data; in_size = map.size; num_int_samples = in_size / bps; GST_LOG_OBJECT (filter, "analyzing %u sample frames at ts %" GST_TIME_FORMAT, num_int_samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (in))); g_return_val_if_fail (num_int_samples % channels == 0, GST_FLOW_ERROR); if (GST_BUFFER_FLAG_IS_SET (in, GST_BUFFER_FLAG_DISCONT)) { filter->message_ts = GST_BUFFER_TIMESTAMP (in); filter->num_frames = 0; } if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (filter->message_ts))) { filter->message_ts = GST_BUFFER_TIMESTAMP (in); } num_frames = num_int_samples / channels; while (num_frames > 0) { block_size = filter->interval_frames - filter->num_frames; block_size = MIN (block_size, num_frames); block_int_size = block_size * channels; for (i = 0; i < channels; ++i) { if (!GST_BUFFER_FLAG_IS_SET (in, GST_BUFFER_FLAG_GAP)) { filter->process (in_data + (bps * i), block_int_size, channels, &CS, &filter->peak[i]); GST_LOG_OBJECT (filter, "[%d]: cumulative squares %lf, over %d samples/%d channels", i, CS, block_int_size, channels); filter->CS[i] += CS; } else { filter->peak[i] = 0.0; } filter->decay_peak_age[i] += GST_FRAMES_TO_CLOCK_TIME (num_frames, rate); GST_LOG_OBJECT (filter, "[%d]: peak %f, last peak %f, decay peak %f, age %" GST_TIME_FORMAT, i, filter->peak[i], filter->last_peak[i], filter->decay_peak[i], GST_TIME_ARGS (filter->decay_peak_age[i])); /* update running peak */ if (filter->peak[i] > filter->last_peak[i]) filter->last_peak[i] = filter->peak[i]; /* make decay peak fall off if too old */ falloff_time = GST_CLOCK_DIFF (gst_gdouble_to_guint64 (filter->decay_peak_ttl), filter->decay_peak_age[i]); if (falloff_time > 0) { gdouble falloff_dB; gdouble falloff; gdouble length; /* length of falloff time in seconds */ length = (gdouble) falloff_time / (gdouble) GST_SECOND; falloff_dB = filter->decay_peak_falloff * length; falloff = pow (10, falloff_dB / -20.0); GST_LOG_OBJECT (filter, "falloff: current %f, base %f, interval %" GST_TIME_FORMAT ", dB falloff %f, factor %e", filter->decay_peak[i], filter->decay_peak_base[i], GST_TIME_ARGS (falloff_time), falloff_dB, falloff); filter->decay_peak[i] = filter->decay_peak_base[i] * falloff; GST_LOG_OBJECT (filter, "peak is %" GST_TIME_FORMAT " old, decayed with factor %e to %f", GST_TIME_ARGS (filter->decay_peak_age[i]), falloff, filter->decay_peak[i]); } else { GST_LOG_OBJECT (filter, "peak not old enough, not decaying"); } /* if the peak of this run is higher, the decay peak gets reset */ if (filter->peak[i] >= filter->decay_peak[i]) { GST_LOG_OBJECT (filter, "new peak, %f", filter->peak[i]); filter->decay_peak[i] = filter->peak[i]; filter->decay_peak_base[i] = filter->peak[i]; filter->decay_peak_age[i] = G_GINT64_CONSTANT (0); } } in_data += block_size * bps * channels; filter->num_frames += block_size; num_frames -= block_size; /* do we need to message ? */ if (filter->num_frames >= filter->interval_frames) { gst_level_post_message (filter); } } gst_buffer_unmap (in, &map); return GST_FLOW_OK; }
static GstMessage * update_rms_from_buffer (GstVideoFrameAudioLevel * self, GstBuffer * inbuf) { GstMapInfo map; guint8 *in_data; gsize in_size; gdouble CS; guint i; guint num_frames, frames; guint num_int_samples = 0; /* number of interleaved samples * ie. total count for all channels combined */ gint channels, rate, bps; GValue v = G_VALUE_INIT; GValue va = G_VALUE_INIT; GValueArray *a; GstStructure *s; GstMessage *msg; GstClockTime duration, running_time; channels = GST_AUDIO_INFO_CHANNELS (&self->ainfo); bps = GST_AUDIO_INFO_BPS (&self->ainfo); rate = GST_AUDIO_INFO_RATE (&self->ainfo); gst_buffer_map (inbuf, &map, GST_MAP_READ); in_data = map.data; in_size = map.size; num_int_samples = in_size / bps; GST_LOG_OBJECT (self, "analyzing %u sample frames at ts %" GST_TIME_FORMAT, num_int_samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf))); g_return_val_if_fail (num_int_samples % channels == 0, NULL); num_frames = num_int_samples / channels; frames = num_frames; duration = GST_FRAMES_TO_CLOCK_TIME (frames, rate); if (num_frames > 0) { for (i = 0; i < channels; ++i) { self->process (in_data + (bps * i), num_int_samples, channels, &CS); GST_LOG_OBJECT (self, "[%d]: cumulative squares %lf, over %d samples/%d channels", i, CS, num_int_samples, channels); self->CS[i] += CS; } in_data += num_frames * bps; self->total_frames += num_frames; } running_time = self->first_time + gst_util_uint64_scale (self->total_frames, GST_SECOND, rate); a = g_value_array_new (channels); s = gst_structure_new ("videoframe-audiolevel", "running-time", G_TYPE_UINT64, running_time, "duration", G_TYPE_UINT64, duration, NULL); g_value_init (&v, G_TYPE_DOUBLE); g_value_init (&va, G_TYPE_VALUE_ARRAY); for (i = 0; i < channels; i++) { gdouble rms; if (frames == 0 || self->CS[i] == 0) { rms = 0; /* empty buffer */ } else { rms = sqrt (self->CS[i] / frames); } self->CS[i] = 0.0; g_value_set_double (&v, rms); g_value_array_append (a, &v); } g_value_take_boxed (&va, a); gst_structure_take_value (s, "rms", &va); msg = gst_message_new_element (GST_OBJECT (self), s); gst_buffer_unmap (inbuf, &map); return msg; }
/****************************************************************************** * gst_tiaudenc1_encode_thread * Call the audio codec to process a full input buffer ******************************************************************************/ static void* gst_tiaudenc1_encode_thread(void *arg) { GstTIAudenc1 *audenc1 = GST_TIAUDENC1(gst_object_ref(arg)); void *threadRet = GstTIThreadSuccess; Buffer_Handle hDstBuf; Int32 encDataConsumed; GstBuffer *encDataWindow = NULL; GstClockTime encDataTime; Buffer_Handle hEncDataWindow; GstBuffer *outBuf; GstClockTime sampleDuration; guint sampleRate; guint numSamples; Int bufIdx; Int ret; GST_LOG("starting audenc encode thread\n"); /* Initialize codec engine */ ret = gst_tiaudenc1_codec_start(audenc1); /* Notify main thread that it is ok to continue initialization */ Rendezvous_meet(audenc1->waitOnEncodeThread); Rendezvous_reset(audenc1->waitOnEncodeThread); if (ret == FALSE) { GST_ELEMENT_ERROR(audenc1, RESOURCE, FAILED, ("Failed to start codec\n"), (NULL)); goto thread_exit; } while (TRUE) { /* Obtain an raw data frame */ encDataWindow = gst_ticircbuffer_get_data(audenc1->circBuf); encDataTime = GST_BUFFER_TIMESTAMP(encDataWindow); hEncDataWindow = GST_TIDMAIBUFFERTRANSPORT_DMAIBUF(encDataWindow); /* Check if there is enough encoded data to be sent to the codec. * The last frame of data may not be sufficient to meet the codec * requirements for the amount of input data. If so just throw * away the last bit of data rather than filling with bogus * data. */ if (GST_BUFFER_SIZE(encDataWindow) < Aenc1_getInBufSize(audenc1->hAe)) { GST_LOG("Not enough audio data remains\n"); if (!audenc1->drainingEOS) { goto thread_failure; } goto thread_exit; } /* Obtain a free output buffer for the encoded data */ if (!(hDstBuf = gst_tidmaibuftab_get_buf(audenc1->hOutBufTab))) { GST_ELEMENT_ERROR(audenc1, RESOURCE, READ, ("Failed to get a free contiguous buffer from BufTab\n"), (NULL)); goto thread_exit; } /* Invoke the audio encoder */ GST_LOG("Invoking the audio encoder at 0x%08lx with %u bytes\n", (unsigned long)Buffer_getUserPtr(hEncDataWindow), GST_BUFFER_SIZE(encDataWindow)); ret = Aenc1_process(audenc1->hAe, hEncDataWindow, hDstBuf); encDataConsumed = Buffer_getNumBytesUsed(hEncDataWindow); if (ret < 0) { GST_ELEMENT_ERROR(audenc1, STREAM, ENCODE, ("Failed to encode audio buffer\n"), (NULL)); goto thread_failure; } /* If no encoded data was used we cannot find the next frame */ if (ret == Dmai_EBITERROR && encDataConsumed == 0) { GST_ELEMENT_ERROR(audenc1, STREAM, ENCODE, ("Fatal bit error\n"), (NULL)); goto thread_failure; } if (ret > 0) { GST_LOG("Aenc1_process returned success code %d\n", ret); } sampleRate = audenc1->samplefreq; numSamples = encDataConsumed / (2 * audenc1->channels) ; sampleDuration = GST_FRAMES_TO_CLOCK_TIME(numSamples, sampleRate); /* Release the reference buffer, and tell the circular buffer how much * data was consumed. */ ret = gst_ticircbuffer_data_consumed(audenc1->circBuf, encDataWindow, encDataConsumed); encDataWindow = NULL; if (!ret) { goto thread_failure; } /* Set the source pad capabilities based on the encoded frame * properties. */ gst_tiaudenc1_set_source_caps(audenc1); /* Create a DMAI transport buffer object to carry a DMAI buffer to * the source pad. The transport buffer knows how to release the * buffer for re-use in this element when the source pad calls * gst_buffer_unref(). */ outBuf = gst_tidmaibuffertransport_new(hDstBuf, audenc1->hOutBufTab, NULL, NULL); gst_buffer_set_data(outBuf, GST_BUFFER_DATA(outBuf), Buffer_getNumBytesUsed(hDstBuf)); gst_buffer_set_caps(outBuf, GST_PAD_CAPS(audenc1->srcpad)); /* Set timestamp on output buffer */ if (audenc1->genTimeStamps) { GST_BUFFER_DURATION(outBuf) = sampleDuration; GST_BUFFER_TIMESTAMP(outBuf) = encDataTime; } else { GST_BUFFER_TIMESTAMP(outBuf) = GST_CLOCK_TIME_NONE; } /* Tell circular buffer how much time we consumed */ gst_ticircbuffer_time_consumed(audenc1->circBuf, sampleDuration); /* Push the transport buffer to the source pad */ GST_LOG("pushing buffer to source pad with timestamp : %" GST_TIME_FORMAT ", duration: %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP(outBuf)), GST_TIME_ARGS (GST_BUFFER_DURATION(outBuf))); if (gst_pad_push(audenc1->srcpad, outBuf) != GST_FLOW_OK) { GST_DEBUG("push to source pad failed\n"); goto thread_failure; } /* Release buffers no longer in use by the codec */ Buffer_freeUseMask(hDstBuf, gst_tidmaibuffer_CODEC_FREE); } thread_failure: gst_tithread_set_status(audenc1, TIThread_CODEC_ABORTED); gst_ticircbuffer_consumer_aborted(audenc1->circBuf); threadRet = GstTIThreadFailure; thread_exit: /* Re-claim any buffers owned by the codec */ bufIdx = BufTab_getNumBufs(GST_TIDMAIBUFTAB_BUFTAB(audenc1->hOutBufTab)); while (bufIdx-- > 0) { Buffer_Handle hBuf = BufTab_getBuf( GST_TIDMAIBUFTAB_BUFTAB(audenc1->hOutBufTab), bufIdx); Buffer_freeUseMask(hBuf, gst_tidmaibuffer_CODEC_FREE); } /* Release the last buffer we retrieved from the circular buffer */ if (encDataWindow) { gst_ticircbuffer_data_consumed(audenc1->circBuf, encDataWindow, 0); } /* We have to wait to shut down this thread until we can guarantee that * no more input buffers will be queued into the circular buffer * (we're about to delete it). */ Rendezvous_meet(audenc1->waitOnEncodeThread); Rendezvous_reset(audenc1->waitOnEncodeThread); /* Notify main thread that we are done draining before we shutdown the * codec, or we will hang. We proceed in this order so the EOS event gets * propagated downstream before we attempt to shut down the codec. The * codec-shutdown process will block until all BufTab buffers have been * released, and downstream-elements may hang on to buffers until * they get the EOS. */ Rendezvous_force(audenc1->waitOnEncodeDrain); /* Initialize codec engine */ if (gst_tiaudenc1_codec_stop(audenc1) < 0) { GST_ERROR("failed to stop codec\n"); GST_ELEMENT_ERROR(audenc1, RESOURCE, FAILED, ("Failed to stop codec\n"), (NULL)); } gst_object_unref(audenc1); GST_LOG("exit audio encode_thread (%d)\n", (int)threadRet); return threadRet; }