static gboolean audio_trim_event (GstPad * pad, GstEvent *event) { if (GST_EVENT_TYPE(event) == GST_EVENT_EOS) { GList *b; gint64 last_offset; AudioTrim *filter; g_debug("Got EOS"); filter = AUDIO_TRIM (GST_OBJECT_PARENT (pad)); b = g_list_last(filter->buffers); if (b) { filter->accumulator = 0.0; last_offset = GST_BUFFER_OFFSET_END((GstBuffer*)b->data); last_offset -= time_to_sample(filter, filter->end_skip); while(b) { gint64 pos = find_not_silence_rev(filter, b->data, last_offset); if (pos != GST_BUFFER_OFFSET_NONE) { pos += time_to_sample(filter, filter->post_silence); send_buffers_before(filter,pos); filter->sound_duration=sample_to_time(filter, pos - filter->ref_time); break; } b = g_list_previous(b); } } } return gst_pad_event_default (pad, event); }
/* chain function * this function does the actual processing */ static GstFlowReturn audio_trim_chain (GstPad * pad, GstBuffer * buf) { AudioTrim *filter; g_assert(GST_BUFFER_OFFSET(buf) != GST_BUFFER_OFFSET_NONE); g_assert(GST_BUFFER_OFFSET_END(buf) != GST_BUFFER_OFFSET_NONE); filter = AUDIO_TRIM (GST_OBJECT_PARENT (pad)); while(buf) { g_assert(GST_IS_BUFFER(buf)); switch(filter->trim_state) { case AUDIO_TRIM_NOT_STARTED: filter->ref_time = (GST_BUFFER_OFFSET(buf) + time_to_sample(filter, filter->start_skip)); if (filter->empty_start_packet) { GstFlowReturn ret; GstBuffer *first; first = gst_buffer_new_and_alloc (sizeof(gfloat)); *(gfloat*)GST_BUFFER_DATA(first) = 0.0; GST_BUFFER_SIZE(first) = 4; GST_BUFFER_OFFSET(first) = GST_BUFFER_OFFSET(buf); GST_BUFFER_OFFSET_END(first) = GST_BUFFER_OFFSET(buf); GST_BUFFER_TIMESTAMP(first) = GST_BUFFER_TIMESTAMP(buf); GST_BUFFER_DURATION(first) = 0; GST_BUFFER_CAPS(first) = gst_caps_ref(GST_BUFFER_CAPS(buf)); ret = gst_pad_push(filter->srcpad, first); if (ret != GST_FLOW_OK) { gst_buffer_unref(buf); return ret; } } filter->trim_state = AUDIO_TRIM_START_SKIP; break; case AUDIO_TRIM_START_SKIP: if (GST_BUFFER_OFFSET_END(buf) <= filter->ref_time) { gst_buffer_unref(buf); /* Ignore buffer completely */ } else { GstBuffer *tail = buffer_tail(filter, buf, filter->ref_time); if (buf) gst_buffer_unref(buf); buf = tail; filter->trim_state = AUDIO_TRIM_START_SILENCE; } break; case AUDIO_TRIM_START_SILENCE: { guint64 offset = find_not_silence(filter, buf); if (offset == GST_BUFFER_OFFSET_NONE) { while(filter->buffered > filter->pre_silence) { GstBuffer *old = filter->buffers->data; filter->buffered -= GST_BUFFER_DURATION(old); gst_buffer_unref(old); filter->buffers = g_list_delete_link(filter->buffers, filter->buffers); } save_buffer(filter, buf); buf = NULL; } else { GstBuffer *head; GstBuffer *tail; GstFlowReturn ret; gint64 clip_start; clip_start = offset - time_to_sample(filter, filter->pre_silence); ret = send_buffers_after(filter, clip_start); if (ret != GST_FLOW_OK) { gst_buffer_unref(buf); return ret; } head = buffer_slice(filter, buf, clip_start, offset); if (head) { ret = gst_pad_push(filter->srcpad, head); if (ret != GST_FLOW_OK) { gst_buffer_unref(buf); return ret; } } tail = buffer_tail(filter, buf, offset); filter->sound_duration = sample_to_time(filter, GST_BUFFER_OFFSET_END(buf) - clip_start); filter->ref_time = clip_start; gst_buffer_unref(buf); buf = tail; filter->trim_state = AUDIO_TRIM_NOT_SILENCE; g_debug("Got sound"); } } break; case AUDIO_TRIM_NOT_SILENCE: { GstFlowReturn ret; filter->sound_duration += GST_BUFFER_DURATION(buf); while(filter->buffered > filter->max_silence_duration) { GstBuffer *old = filter->buffers->data; filter->buffered -= GST_BUFFER_DURATION(old); filter->buffers = g_list_delete_link(filter->buffers,filter->buffers); ret = gst_pad_push(filter->srcpad, old); if (ret != GST_FLOW_OK) { gst_buffer_unref(buf); return ret; } } save_buffer(filter, buf); buf = 0; } break; default: gst_buffer_unref(buf); buf = NULL; } } return GST_FLOW_OK; }
// reported by everwanna: // av out of sync because: // audio track 0 without stss, seek to the exact time. // video track 1 with stss, seek to the nearest key frame time. // // fixed: // first pass we get the new aligned times for traks with an stss present // second pass is for traks without an stss static int get_aligned_start_and_end(mp4_context_t const* mp4_context, int64_t start, int64_t end, unsigned int* trak_sample_start, unsigned int* trak_sample_end) { unsigned int pass; moov_t const* moov = mp4_context->moov; long moov_time_scale = moov->mvhd_->timescale_; for(pass = 0; pass != 2; ++pass) { unsigned int i; for(i = 0; i != moov->tracks_; ++i) { trak_t const* trak = moov->traks_[i]; long trak_time_scale = trak->mdia_->mdhd_->timescale_; if(trak->samples_size_ == 0) { trak_sample_start[i] = 0; trak_sample_end[i] = 0; continue; } // get start { unsigned int sample_index = time_to_sample(trak, moov_time_to_trak_time(start, moov_time_scale, trak_time_scale)); // backtrack to nearest keyframe if(!trak->samples_[sample_index].is_ss_) { while(sample_index && !trak->samples_[sample_index].is_ss_) { --sample_index; } start = trak_time_to_moov_time(trak->samples_[sample_index].pts_, moov_time_scale, trak_time_scale); } trak_sample_start[i] = sample_index; // MP4_INFO("ts=%"PRId64" (moov time)\n", start); MP4_INFO("ts=%.2f (seconds)\n", trak->samples_[sample_index].pts_ / (float)trak_time_scale); } // get end { unsigned int sample_index; if(end == 0) { // The default is till-the-end of the track sample_index = trak->samples_size_; } else { sample_index = time_to_sample(trak, moov_time_to_trak_time(end, moov_time_scale, trak_time_scale)); } // backtrack to nearest keyframe if(!trak->samples_[sample_index].is_ss_) { while(sample_index && !trak->samples_[sample_index].is_ss_) { --sample_index; } end = trak_time_to_moov_time(trak->samples_[sample_index].pts_, moov_time_scale, trak_time_scale); } trak_sample_end[i] = sample_index; // MP4_INFO("te=%"PRId64" (moov time)\n", end); MP4_INFO("te=%.2f (seconds)\n", trak->samples_[sample_index].pts_ / (float)trak_time_scale); } } } MP4_INFO("final start=%"PRId64"\n", start); MP4_INFO("final end=%"PRId64"\n", end); if(end && start >= end) { return 0; } return 1; }