void hb_fifo_close( hb_fifo_t ** _f ) { hb_fifo_t * f = *_f; hb_buffer_t * b; hb_deep_log( 2, "fifo_close: trashing %d buffer(s)", hb_fifo_size( f ) ); while( ( b = hb_fifo_get( f ) ) ) { hb_buffer_close( &b ); } hb_lock_close( &f->lock ); hb_cond_close( &f->cond_empty ); hb_cond_close( &f->cond_full ); free( f ); *_f = NULL; }
/*********************************************************************** * syncVideoWork *********************************************************************** * **********************************************************************/ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_buffer_t ** buf_out ) { hb_buffer_t * cur, * next, * sub = NULL; hb_work_private_t * pv = w->private_data; hb_job_t * job = pv->job; hb_subtitle_t * subtitle; hb_sync_video_t * sync = &pv->type.video; int i; int64_t next_start; *buf_out = NULL; next = *buf_in; *buf_in = NULL; /* Wait till we can determine the initial pts of all streams */ if( next->size != 0 && pv->common->pts_offset == INT64_MIN ) { pv->common->first_pts[0] = next->s.start; hb_lock( pv->common->mutex ); while( pv->common->pts_offset == INT64_MIN ) { // Full fifos will make us wait forever, so get the // pts offset from the available streams if full if ( hb_fifo_is_full( job->fifo_raw ) ) { getPtsOffset( w ); hb_cond_broadcast( pv->common->next_frame ); } else if ( checkPtsOffset( w ) ) hb_cond_broadcast( pv->common->next_frame ); else hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 ); } hb_unlock( pv->common->mutex ); } hb_lock( pv->common->mutex ); next_start = next->s.start - pv->common->video_pts_slip; hb_unlock( pv->common->mutex ); /* Wait for start of point-to-point encoding */ if( !pv->common->start_found ) { hb_sync_video_t * sync = &pv->type.video; if( next->size == 0 ) { *buf_out = next; pv->common->start_found = 1; pv->common->first_pts[0] = INT64_MAX - 1; hb_cond_broadcast( pv->common->next_frame ); /* * Push through any subtitle EOFs in case they * were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } if ( pv->common->count_frames < job->frame_to_start || next->s.start < job->pts_to_start ) { // Flush any subtitles that have pts prior to the // current frame for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) ) { if ( sub->s.start > next->s.start ) break; sub = hb_fifo_get( subtitle->fifo_raw ); hb_buffer_close( &sub ); } } hb_lock( pv->common->mutex ); // Tell the audio threads what must be dropped pv->common->audio_pts_thresh = next_start + pv->common->video_pts_slip; hb_cond_broadcast( pv->common->next_frame ); hb_unlock( pv->common->mutex ); UpdateSearchState( w, next_start ); hb_buffer_close( &next ); return HB_WORK_OK; } hb_lock( pv->common->mutex ); pv->common->audio_pts_thresh = 0; pv->common->audio_pts_slip += next_start; pv->common->video_pts_slip += next_start; next_start = 0; pv->common->start_found = 1; pv->common->count_frames = 0; hb_cond_broadcast( pv->common->next_frame ); hb_unlock( pv->common->mutex ); sync->st_first = 0; } if( !sync->cur ) { sync->cur = next; if (next->size == 0) { /* we got an end-of-stream as our first video packet? * Feed it downstream & signal that we're done. */ *buf_out = next; pv->common->start_found = 1; pv->common->first_pts[0] = INT64_MAX - 1; hb_cond_broadcast( pv->common->next_frame ); /* * Push through any subtitle EOFs in case they * were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } return HB_WORK_OK; } cur = sync->cur; /* At this point we have a frame to process. Let's check 1) if we will be able to push into the fifo ahead 2) if the next frame is there already, since we need it to compute the duration of the current frame*/ if( next->size == 0 ) { hb_buffer_close( &next ); pv->common->first_pts[0] = INT64_MAX - 1; cur->s.start = sync->next_start; cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base); sync->next_start += cur->s.stop - cur->s.start;; /* Make sure last frame is reflected in frame count */ pv->common->count_frames++; /* Push the frame to the renderer */ *buf_out = cur; sync->cur = NULL; /* we got an end-of-stream. Feed it downstream & signal that * we're done. Note that this means we drop the final frame of * video (we don't know its duration). On DVDs the final frame * is often strange and dropping it seems to be a good idea. */ (*buf_out)->next = hb_buffer_init( 0 ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } pv->common->start_found = 1; hb_cond_broadcast( pv->common->next_frame ); return HB_WORK_DONE; } /* Check for end of point-to-point frame encoding */ if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop ) { // Drop an empty buffer into our output to ensure that things // get flushed all the way out. hb_buffer_close( &sync->cur ); hb_buffer_close( &next ); *buf_out = hb_buffer_init( 0 ); hb_log( "sync: reached %d frames, exiting early", pv->common->count_frames ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } /* Check for end of point-to-point pts encoding */ if( job->pts_to_stop && sync->next_start >= job->pts_to_stop ) { // Drop an empty buffer into our output to ensure that things // get flushed all the way out. hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start ); hb_buffer_close( &sync->cur ); hb_buffer_close( &next ); *buf_out = hb_buffer_init( 0 ); /* * Push through any subtitle EOFs in case they were not synced through. */ for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { subtitle = hb_list_item( job->list_subtitle, i ); if( subtitle->config.dest == PASSTHRUSUB ) { hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) ); } } return HB_WORK_DONE; } if( sync->first_frame ) { /* This is our first frame */ if ( cur->s.start > 0 ) { /* * The first pts from a dvd should always be zero but * can be non-zero with a transport or program stream since * we're not guaranteed to start on an IDR frame. If we get * a non-zero initial PTS extend its duration so it behaves * as if it started at zero so that our audio timing will * be in sync. */ hb_log( "sync: first pts is %"PRId64, cur->s.start ); cur->s.start = 0; } sync->first_frame = 0; } /* * since the first frame is always 0 and the upstream reader code * is taking care of adjusting for pts discontinuities, we just have * to deal with the next frame's start being in the past. This can * happen when the PTS is adjusted after data loss but video frame * reordering causes some frames with the old clock to appear after * the clock change. This creates frames that overlap in time which * looks to us like time going backward. The downstream muxing code * can deal with overlaps of up to a frame time but anything larger * we handle by dropping frames here. */ if ( next_start - cur->s.start <= 0 ) { if ( sync->first_drop == 0 ) { sync->first_drop = next_start; } ++sync->drop_count; if ( next->s.new_chap ) { // don't drop a chapter mark when we drop the buffer sync->chap_mark = next->s.new_chap; } hb_buffer_close( &next ); return HB_WORK_OK; } if ( sync->first_drop ) { hb_log( "sync: video time didn't advance - dropped %d frames " "(delta %d ms, current %"PRId64", next %"PRId64", dur %d)", sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90, cur->s.start, next_start, (int)( next_start - cur->s.start ) ); sync->first_drop = 0; sync->drop_count = 0; } /* * Track the video sequence number locally so that we can sync the audio * to it using the sequence number as well as the PTS. */ sync->video_sequence = cur->sequence; /* Process subtitles that apply to this video frame */ // NOTE: There is no logic in either subtitle-sync algorithm that waits // for the subtitle-decoder if it is lagging behind the video-decoder. // // Therefore there is the implicit assumption that the subtitle-decoder // is always faster than the video-decoder. This assumption is definitely // incorrect in some cases where the SSA subtitle decoder is used. for( i = 0; i < hb_list_count( job->list_subtitle ); i++) { int64_t sub_start, sub_stop, duration; subtitle = hb_list_item( job->list_subtitle, i ); // Sanitize subtitle start and stop times, then pass to // muxer or renderer filter. while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL ) { hb_lock( pv->common->mutex ); sub_start = sub->s.start - pv->common->video_pts_slip; hb_unlock( pv->common->mutex ); if (sub->s.stop == -1) { if (subtitle->config.dest != RENDERSUB && hb_fifo_size( subtitle->fifo_raw ) < 2) { // For passthru subs, we want to wait for the // next subtitle so that we can fill in the stop time. // This way the muxer can compute the duration of // the subtitle. // // For render subs, we need to ensure that they // get to the renderer before the associated video // that they are to be applied to. It is the // responsibility of the renderer to handle // stop == -1. break; } } sub = hb_fifo_get( subtitle->fifo_raw ); if ( sub->s.stop == -1 ) { hb_buffer_t *next; next = hb_fifo_see( subtitle->fifo_raw ); if (next != NULL) sub->s.stop = next->s.start; } // Need to re-write subtitle timestamps to account // for any slippage. sub_stop = -1; if ( sub->s.stop != -1 ) { duration = sub->s.stop - sub->s.start; sub_stop = sub_start + duration; } sub->s.start = sub_start; sub->s.stop = sub_stop; hb_fifo_push( subtitle->fifo_out, sub ); } } /* * Adjust the pts of the current frame so that it's contiguous * with the previous frame. The start time of the current frame * has to be the end time of the previous frame and the stop * time has to be the start of the next frame. We don't * make any adjustments to the source timestamps other than removing * the clock offsets (which also removes pts discontinuities). * This means we automatically encode at the source's frame rate. * MP2 uses an implicit duration (frames end when the next frame * starts) but more advanced containers like MP4 use an explicit * duration. Since we're looking ahead one frame we set the * explicit stop time from the start time of the next frame. */ *buf_out = cur; int64_t duration = next_start - cur->s.start; sync->cur = cur = next; cur->sub = NULL; cur->s.start -= pv->common->video_pts_slip; cur->s.stop -= pv->common->video_pts_slip; sync->pts_skip = 0; if ( duration <= 0 ) { hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"", duration, cur->s.start, next_start ); } (*buf_out)->s.start = sync->next_start; sync->next_start += duration; (*buf_out)->s.stop = sync->next_start; if ( sync->chap_mark ) { // we have a pending chapter mark from a recent drop - put it on this // buffer (this may make it one frame late but we can't do any better). (*buf_out)->s.new_chap = sync->chap_mark; sync->chap_mark = 0; } /* Update UI */ UpdateState( w ); return HB_WORK_OK; }