// Frees the specified buffer list. void hb_buffer_close( hb_buffer_t ** _b ) { hb_buffer_t * b = *_b; while( b ) { hb_buffer_t * next = b->next; hb_fifo_t *buffer_pool = size_to_pool( b->alloc ); b->next = NULL; if( buffer_pool && b->data && !hb_fifo_is_full( buffer_pool ) ) { hb_fifo_push_head( buffer_pool, b ); b = next; continue; } // either the pool is full or this size doesn't use a pool // free the buf if( b->data ) { free( b->data ); hb_lock(buffers.lock); buffers.allocated -= b->alloc; hb_unlock(buffers.lock); } free( b ); b = next; } *_b = NULL; }
/*********************************************************************** * SyncAudio *********************************************************************** * **********************************************************************/ static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_buffer_t ** buf_out ) { hb_work_private_t * pv = w->private_data; hb_job_t * job = pv->job; hb_sync_audio_t * sync = &pv->type.audio; hb_buffer_t * buf; int64_t start; *buf_out = NULL; buf = *buf_in; *buf_in = NULL; /* if the next buffer is an eof send it downstream */ if ( buf->size <= 0 ) { hb_buffer_close( &buf ); *buf_out = hb_buffer_init( 0 ); pv->common->first_pts[sync->index+1] = INT64_MAX - 1; return HB_WORK_DONE; } /* Wait till we can determine the initial pts of all streams */ if( pv->common->pts_offset == INT64_MIN ) { pv->common->first_pts[sync->index+1] = buf->s.start; hb_lock( pv->common->mutex ); while( pv->common->pts_offset == INT64_MIN ) { // Full fifos will make us wait forever, so get the // pts offset from the available streams if full if (hb_fifo_is_full(w->fifo_in)) { getPtsOffset( w ); hb_cond_broadcast( pv->common->next_frame ); } else if ( checkPtsOffset( w ) ) hb_cond_broadcast( pv->common->next_frame ); else hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 ); } hb_unlock( pv->common->mutex ); } /* Wait for start frame if doing point-to-point */ hb_lock( pv->common->mutex ); start = buf->s.start - pv->common->audio_pts_slip; while ( !pv->common->start_found ) { if ( pv->common->audio_pts_thresh < 0 ) { // I would initialize this in hb_sync_init, but // job->pts_to_start can be modified by reader // after hb_sync_init is called. pv->common->audio_pts_thresh = job->pts_to_start; } if ( buf->s.start < pv->common->audio_pts_thresh ) { hb_buffer_close( &buf ); hb_unlock( pv->common->mutex ); return HB_WORK_OK; } while ( !pv->common->start_found && buf->s.start >= pv->common->audio_pts_thresh ) { hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 ); // There is an unfortunate unavoidable deadlock that can occur. // Since we need to wait for a specific frame in syncVideoWork, // syncAudioWork can be stalled indefinitely. The video decoder // often drops multiple of the initial frames after starting // because they require references that have not been decoded yet. // This allows a lot of audio to be queued in the fifo and the // audio fifo fills before we get a single video frame. So we // must drop some audio to unplug the pipeline and allow the first // video frame to be decoded. if ( hb_fifo_is_full(w->fifo_in) ) { hb_buffer_t *tmp; tmp = buf = hb_fifo_get( w->fifo_in ); while ( tmp ) { tmp = hb_fifo_get( w->fifo_in ); if ( tmp ) { hb_buffer_close( &buf ); buf = tmp; } } } } start = buf->s.start - pv->common->audio_pts_slip; } if ( start < 0 ) { hb_buffer_close( &buf ); hb_unlock( pv->common->mutex ); return HB_WORK_OK; } hb_unlock( pv->common->mutex ); if( job->frame_to_stop && pv->common->count_frames >= job->frame_to_stop ) { hb_buffer_close( &buf ); *buf_out = hb_buffer_init( 0 ); return HB_WORK_DONE; } if( job->pts_to_stop && sync->next_start >= job->pts_to_stop ) { hb_buffer_close( &buf ); *buf_out = hb_buffer_init( 0 ); return HB_WORK_DONE; } // audio time went backwards. // If our output clock is more than a half frame ahead of the // input clock drop this frame to move closer to sync. // Otherwise drop frames until the input clock matches the output clock. if ( sync->next_start - start > 90*15 ) { // Discard data that's in the past. if ( sync->first_drop == 0 ) { sync->first_drop = start; } ++sync->drop_count; hb_buffer_close( &buf ); return HB_WORK_OK; } if ( sync->first_drop ) { // we were dropping old data but input buf time is now current hb_log( "sync: audio 0x%x time went backwards %d ms, dropped %d frames " "(start %"PRId64", next %"PRId64")", w->audio->id, (int)( sync->next_start - sync->first_drop ) / 90, sync->drop_count, sync->first_drop, (int64_t)sync->next_start ); sync->first_drop = 0; sync->drop_count = 0; } if ( start - sync->next_start >= (90 * 70) ) { if ( start - sync->next_start > (90000LL * 60) ) { // there's a gap of more than a minute between the last // frame and this. assume we got a corrupted timestamp // and just drop the next buf. hb_log( "sync: %d minute time gap in audio 0x%x - dropping buf" " start %"PRId64", next %"PRId64, (int)((start - sync->next_start) / (90000*60)), w->audio->id, start, (int64_t)sync->next_start ); hb_buffer_close( &buf ); return HB_WORK_OK; } /* * there's a gap of at least 70ms between the last * frame we processed & the next. Fill it with silence. * Or in the case of DCA, skip some frames from the * other streams. */ if ( sync->drop_video_to_sync ) { hb_log( "sync: audio gap %d ms. Skipping frames. Audio 0x%x" " start %"PRId64", next %"PRId64, (int)((start - sync->next_start) / 90), w->audio->id, start, (int64_t)sync->next_start ); hb_lock( pv->common->mutex ); pv->common->audio_pts_slip += (start - sync->next_start); pv->common->video_pts_slip += (start - sync->next_start); hb_unlock( pv->common->mutex ); *buf_out = OutputAudioFrame( w->audio, buf, sync ); return HB_WORK_OK; } hb_log( "sync: adding %d ms of silence to audio 0x%x" " start %"PRId64", next %"PRId64, (int)((start - sync->next_start) / 90), w->audio->id, start, (int64_t)sync->next_start ); InsertSilence( w, start - sync->next_start ); } /* * When we get here we've taken care of all the dups and gaps in the * audio stream and are ready to inject the next input frame into * the output stream. */ *buf_out = OutputAudioFrame( w->audio, buf, sync ); return HB_WORK_OK; }
/*********************************************************************** * syncVideoWork *********************************************************************** * **********************************************************************/ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_buffer_t ** buf_out ) { hb_buffer_t * cur, * next, * sub = NULL; hb_work_private_t * pv = w->private_data; hb_job_t * job = pv->job; hb_subtitle_t * subtitle; hb_sync_video_t * sync = &pv->type.video; int i; int64_t next_start; *buf_out = NULL; next = *buf_in; *buf_in = NULL; /* Wait till we can determine the initial pts of all streams */ if( next->size != 0 && pv->common->pts_offset == INT64_MIN ) { pv->common->first_pts[0] = next->s.start; hb_lock( pv->common->mutex ); while( pv->common->pts_offset == INT64_MIN ) { // Full fifos will make us wait forever, so get the // pts offset from the available streams if full if ( hb_fifo_is_full( job->fifo_raw ) ) { getPtsOffset( w ); hb_cond_broadcast( pv->common->next_frame ); } else if ( checkPtsOffset( w ) ) hb_cond_broadcast( pv->common->next_frame ); else hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 ); } hb_unlock( pv->common->mutex ); } hb_lock( pv->common->mutex ); next_start = next->s.start - pv->common->video_pts_slip; hb_unlock( pv->common->mutex ); /* Wait for start of point-to-point encoding */ if( !pv->common->start_found ) { hb_sync_video_t * sync = &pv->type.video; if( next->size == 0 ) { *buf_out = next; pv->common->start_found = 1; pv->common->first_pts[0] = INT64_MAX - 1; hb_cond_broadcast( pv->common->next_frame ); /* * Push through any subtitle EOFs in case they * were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } if ( pv->common->count_frames < job->frame_to_start || next->s.start < job->pts_to_start ) { // Flush any subtitles that have pts prior to the // current frame for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) ) { if ( sub->s.start > next->s.start ) break; sub = hb_fifo_get( subtitle->fifo_raw ); hb_buffer_close( &sub ); } } hb_lock( pv->common->mutex ); // Tell the audio threads what must be dropped pv->common->audio_pts_thresh = next_start + pv->common->video_pts_slip; hb_cond_broadcast( pv->common->next_frame ); hb_unlock( pv->common->mutex ); UpdateSearchState( w, next_start ); hb_buffer_close( &next ); return HB_WORK_OK; } hb_lock( pv->common->mutex ); pv->common->audio_pts_thresh = 0; pv->common->audio_pts_slip += next_start; pv->common->video_pts_slip += next_start; next_start = 0; pv->common->start_found = 1; pv->common->count_frames = 0; hb_cond_broadcast( pv->common->next_frame ); hb_unlock( pv->common->mutex ); sync->st_first = 0; } if( !sync->cur ) { sync->cur = next; if (next->size == 0) { /* we got an end-of-stream as our first video packet? * Feed it downstream & signal that we're done. */ *buf_out = next; pv->common->start_found = 1; pv->common->first_pts[0] = INT64_MAX - 1; hb_cond_broadcast( pv->common->next_frame ); /* * Push through any subtitle EOFs in case they * were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } return HB_WORK_OK; } cur = sync->cur; /* At this point we have a frame to process. Let's check 1) if we will be able to push into the fifo ahead 2) if the next frame is there already, since we need it to compute the duration of the current frame*/ if( next->size == 0 ) { hb_buffer_close( &next ); pv->common->first_pts[0] = INT64_MAX - 1; cur->s.start = sync->next_start; cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base); sync->next_start += cur->s.stop - cur->s.start;; /* Make sure last frame is reflected in frame count */ pv->common->count_frames++; /* Push the frame to the renderer */ *buf_out = cur; sync->cur = NULL; /* we got an end-of-stream. Feed it downstream & signal that * we're done. Note that this means we drop the final frame of * video (we don't know its duration). On DVDs the final frame * is often strange and dropping it seems to be a good idea. */ (*buf_out)->next = hb_buffer_init( 0 ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } pv->common->start_found = 1; hb_cond_broadcast( pv->common->next_frame ); return HB_WORK_DONE; } /* Check for end of point-to-point frame encoding */ if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop ) { // Drop an empty buffer into our output to ensure that things // get flushed all the way out. hb_buffer_close( &sync->cur ); hb_buffer_close( &next ); *buf_out = hb_buffer_init( 0 ); hb_log( "sync: reached %d frames, exiting early", pv->common->count_frames ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } /* Check for end of point-to-point pts encoding */ if( job->pts_to_stop && sync->next_start >= job->pts_to_stop ) { // Drop an empty buffer into our output to ensure that things // get flushed all the way out. hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start ); hb_buffer_close( &sync->cur ); hb_buffer_close( &next ); *buf_out = hb_buffer_init( 0 ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } if( sync->first_frame ) { /* This is our first frame */ if ( cur->s.start > 0 ) { /* * The first pts from a dvd should always be zero but * can be non-zero with a transport or program stream since * we're not guaranteed to start on an IDR frame. If we get * a non-zero initial PTS extend its duration so it behaves * as if it started at zero so that our audio timing will * be in sync. */ hb_log( "sync: first pts is %"PRId64, cur->s.start ); cur->s.start = 0; } sync->first_frame = 0; } /* * since the first frame is always 0 and the upstream reader code * is taking care of adjusting for pts discontinuities, we just have * to deal with the next frame's start being in the past. This can * happen when the PTS is adjusted after data loss but video frame * reordering causes some frames with the old clock to appear after * the clock change. This creates frames that overlap in time which * looks to us like time going backward. The downstream muxing code * can deal with overlaps of up to a frame time but anything larger * we handle by dropping frames here. */ if ( next_start - cur->s.start <= 0 ) { if ( sync->first_drop == 0 ) { sync->first_drop = next_start; } ++sync->drop_count; if ( next->s.new_chap ) { // don't drop a chapter mark when we drop the buffer sync->chap_mark = next->s.new_chap; } hb_buffer_close( &next ); return HB_WORK_OK; } if ( sync->first_drop ) { hb_log( "sync: video time didn't advance - dropped %d frames " "(delta %d ms, current %"PRId64", next %"PRId64", dur %d)", sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90, cur->s.start, next_start, (int)( next_start - cur->s.start ) ); sync->first_drop = 0; sync->drop_count = 0; } /* * Track the video sequence number locally so that we can sync the audio * to it using the sequence number as well as the PTS. */ sync->video_sequence = cur->sequence; /* Process subtitles that apply to this video frame */ // NOTE: There is no logic in either subtitle-sync algorithm that waits // for the subtitle-decoder if it is lagging behind the video-decoder. // // Therefore there is the implicit assumption that the subtitle-decoder // is always faster than the video-decoder. This assumption is definitely // incorrect in some cases where the SSA subtitle decoder is used. for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { int64_t sub_start, sub_stop, duration; subtitle = hb_list_item( job->list_subtitle, i ); // Sanitize subtitle start and stop times, then pass to // muxer or renderer filter. while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL ) { hb_lock( pv->common->mutex ); sub_start = sub->s.start - pv->common->video_pts_slip; hb_unlock( pv->common->mutex ); if (sub->s.stop == -1) { if (subtitle->config.dest != RENDERSUB && hb_fifo_size( subtitle->fifo_raw ) < 2) { // For passthru subs, we want to wait for the // next subtitle so that we can fill in the stop time. // This way the muxer can compute the duration of // the subtitle. // // For render subs, we need to ensure that they // get to the renderer before the associated video // that they are to be applied to. It is the // responsibility of the renderer to handle // stop == -1. break; } } sub = hb_fifo_get( subtitle->fifo_raw ); if ( sub->s.stop == -1 ) { hb_buffer_t *next; next = hb_fifo_see( subtitle->fifo_raw ); if (next != NULL) sub->s.stop = next->s.start; } // Need to re-write subtitle timestamps to account // for any slippage. sub_stop = -1; if ( sub->s.stop != -1 ) { duration = sub->s.stop - sub->s.start; sub_stop = sub_start + duration; } sub->s.start = sub_start; sub->s.stop = sub_stop; hb_fifo_push( subtitle->fifo_out, sub ); } } /* * Adjust the pts of the current frame so that it's contiguous * with the previous frame. The start time of the current frame * has to be the end time of the previous frame and the stop * time has to be the start of the next frame. We don't * make any adjustments to the source timestamps other than removing * the clock offsets (which also removes pts discontinuities). * This means we automatically encode at the source's frame rate. * MP2 uses an implicit duration (frames end when the next frame * starts) but more advanced containers like MP4 use an explicit * duration. Since we're looking ahead one frame we set the * explicit stop time from the start time of the next frame. */ *buf_out = cur; int64_t duration = next_start - cur->s.start; sync->cur = cur = next; cur->sub = NULL; cur->s.start -= pv->common->video_pts_slip; cur->s.stop -= pv->common->video_pts_slip; sync->pts_skip = 0; if ( duration <= 0 ) { hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"", duration, cur->s.start, next_start ); } (*buf_out)->s.start = sync->next_start; sync->next_start += duration; (*buf_out)->s.stop = sync->next_start; if ( sync->chap_mark ) { // we have a pending chapter mark from a recent drop - put it on this // buffer (this may make it one frame late but we can't do any better). (*buf_out)->s.new_chap = sync->chap_mark; sync->chap_mark = 0; } /* Update UI */ UpdateState( w ); return HB_WORK_OK; }