Exemple #1
0
void hb_buffer_pool_free( void )
{
    int i;
    int count;
    int64_t freed = 0;
    hb_buffer_t *b;

    hb_lock(buffers.lock);

    for( i = 10; i < 26; ++i)
    {
        count = 0;
        while( ( b = hb_fifo_get(buffers.pool[i]) ) )
        {
            freed += b->alloc;
            if( b->data )
            {
                free( b->data );
            }
            free( b );
            count++;
        }
        if ( count )
        {
            hb_deep_log( 2, "Freed %d buffers of size %d", count,
                    buffers.pool[i]->buffer_size);
        }
    }

    hb_deep_log( 2, "Allocated %"PRId64" bytes of buffers on this pass and Freed %"PRId64" bytes, "
           "%"PRId64" bytes leaked", buffers.allocated, freed, buffers.allocated - freed);
    buffers.allocated = 0;
    hb_unlock(buffers.lock);
}
Exemple #2
0
// Removes a list of packets from the specified FIFO that were stored as a single element.
hb_buffer_t *hb_fifo_get_list_element( hb_fifo_t *fifo )
{
    hb_buffer_t *container = hb_fifo_get( fifo );
    // XXX: Using an arbitrary hb_buffer_t pointer (other than 'next')
    //      to carry the list inside a single "container" buffer
    hb_buffer_t *buffer_list = container->sub;
    hb_buffer_close( &container );
    
    return buffer_list;
}
Exemple #3
0
void hb_fifo_flush( hb_fifo_t * f )
{
    hb_buffer_t * b;

    while( ( b = hb_fifo_get( f ) ) )
    {
        hb_buffer_close( &b );
    }
    hb_lock( f->lock );
    hb_cond_signal( f->cond_empty );
    hb_cond_signal( f->cond_full );
    hb_unlock( f->lock );

}
Exemple #4
0
void hb_fifo_close( hb_fifo_t ** _f )
{
    hb_fifo_t   * f = *_f;
    hb_buffer_t * b;

    hb_deep_log( 2, "fifo_close: trashing %d buffer(s)", hb_fifo_size( f ) );
    while( ( b = hb_fifo_get( f ) ) )
    {
        hb_buffer_close( &b );
    }

    hb_lock_close( &f->lock );
    hb_cond_close( &f->cond_empty );
    hb_cond_close( &f->cond_full );
    free( f );

    *_f = NULL;
}
Exemple #5
0
/***********************************************************************
 * SyncAudio
 ***********************************************************************
 *
 **********************************************************************/
static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
                       hb_buffer_t ** buf_out )
{
    hb_work_private_t * pv = w->private_data;
    hb_job_t        * job = pv->job;
    hb_sync_audio_t * sync = &pv->type.audio;
    hb_buffer_t     * buf;
    int64_t start;

    *buf_out = NULL;
    buf = *buf_in;
    *buf_in = NULL;
    /* if the next buffer is an eof send it downstream */
    if ( buf->size <= 0 )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        pv->common->first_pts[sync->index+1] = INT64_MAX - 1;
        return HB_WORK_DONE;
    }

    /* Wait till we can determine the initial pts of all streams */
    if( pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[sync->index+1] = buf->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if (hb_fifo_is_full(w->fifo_in))
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    /* Wait for start frame if doing point-to-point */
    hb_lock( pv->common->mutex );
    start = buf->s.start - pv->common->audio_pts_slip;
    while ( !pv->common->start_found )
    {
        if ( pv->common->audio_pts_thresh < 0 )
        {
            // I would initialize this in hb_sync_init, but 
            // job->pts_to_start can be modified by reader 
            // after hb_sync_init is called.
            pv->common->audio_pts_thresh = job->pts_to_start;
        }
        if ( buf->s.start < pv->common->audio_pts_thresh )
        {
            hb_buffer_close( &buf );
            hb_unlock( pv->common->mutex );
            return HB_WORK_OK;
        }
        while ( !pv->common->start_found && 
                buf->s.start >= pv->common->audio_pts_thresh )
        {
            hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 );
            // There is an unfortunate unavoidable deadlock that can occur.
            // Since we need to wait for a specific frame in syncVideoWork,
            // syncAudioWork can be stalled indefinitely.  The video decoder
            // often drops multiple of the initial frames after starting
            // because they require references that have not been decoded yet.
            // This allows a lot of audio to be queued in the fifo and the
            // audio fifo fills before we get a single video frame.  So we
            // must drop some audio to unplug the pipeline and allow the first
            // video frame to be decoded.
            if ( hb_fifo_is_full(w->fifo_in) )
            {
                hb_buffer_t *tmp;
                tmp = buf = hb_fifo_get( w->fifo_in );
                while ( tmp )
                {
                    tmp = hb_fifo_get( w->fifo_in );
                    if ( tmp )
                    {
                        hb_buffer_close( &buf );
                        buf = tmp;
                    }
                }
            }
        }
        start = buf->s.start - pv->common->audio_pts_slip;
    }
    if ( start < 0 )
    {
        hb_buffer_close( &buf );
        hb_unlock( pv->common->mutex );
        return HB_WORK_OK;
    }
    hb_unlock( pv->common->mutex );

    if( job->frame_to_stop && pv->common->count_frames >= job->frame_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    // audio time went backwards.
    // If our output clock is more than a half frame ahead of the
    // input clock drop this frame to move closer to sync.
    // Otherwise drop frames until the input clock matches the output clock.
    if ( sync->next_start - start > 90*15 )
    {
        // Discard data that's in the past.
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = start;
        }
        ++sync->drop_count;
        hb_buffer_close( &buf );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        // we were dropping old data but input buf time is now current
        hb_log( "sync: audio 0x%x time went backwards %d ms, dropped %d frames "
                "(start %"PRId64", next %"PRId64")", w->audio->id,
                (int)( sync->next_start - sync->first_drop ) / 90,
                sync->drop_count, sync->first_drop, (int64_t)sync->next_start );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }
    if ( start - sync->next_start >= (90 * 70) )
    {
        if ( start - sync->next_start > (90000LL * 60) )
        {
            // there's a gap of more than a minute between the last
            // frame and this. assume we got a corrupted timestamp
            // and just drop the next buf.
            hb_log( "sync: %d minute time gap in audio 0x%x - dropping buf"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / (90000*60)),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_buffer_close( &buf );
            return HB_WORK_OK;
        }
        /*
         * there's a gap of at least 70ms between the last
         * frame we processed & the next. Fill it with silence.
         * Or in the case of DCA, skip some frames from the
         * other streams.
         */
        if ( sync->drop_video_to_sync )
        {
            hb_log( "sync: audio gap %d ms. Skipping frames. Audio 0x%x"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / 90),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_lock( pv->common->mutex );
            pv->common->audio_pts_slip += (start - sync->next_start);
            pv->common->video_pts_slip += (start - sync->next_start);
            hb_unlock( pv->common->mutex );
            *buf_out = OutputAudioFrame( w->audio, buf, sync );
            return HB_WORK_OK;
        }
        hb_log( "sync: adding %d ms of silence to audio 0x%x"
                "  start %"PRId64", next %"PRId64,
                (int)((start - sync->next_start) / 90),
                w->audio->id, start, (int64_t)sync->next_start );
        InsertSilence( w, start - sync->next_start );
    }

    /*
     * When we get here we've taken care of all the dups and gaps in the
     * audio stream and are ready to inject the next input frame into
     * the output stream.
     */
    *buf_out = OutputAudioFrame( w->audio, buf, sync );
    return HB_WORK_OK;
}
Exemple #6
0
/***********************************************************************
 * syncVideoWork
 ***********************************************************************
 *
 **********************************************************************/
int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
              hb_buffer_t ** buf_out )
{
    hb_buffer_t * cur, * next, * sub = NULL;
    hb_work_private_t * pv = w->private_data;
    hb_job_t          * job = pv->job;
    hb_subtitle_t     * subtitle;
    hb_sync_video_t   * sync = &pv->type.video;
    int i;
    int64_t next_start;

    *buf_out = NULL;
    next = *buf_in;
    *buf_in = NULL;

    /* Wait till we can determine the initial pts of all streams */
    if( next->size != 0 && pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[0] = next->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if ( hb_fifo_is_full( job->fifo_raw ) )
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    hb_lock( pv->common->mutex );
    next_start = next->s.start - pv->common->video_pts_slip;
    hb_unlock( pv->common->mutex );

    /* Wait for start of point-to-point encoding */
    if( !pv->common->start_found )
    {
        hb_sync_video_t   * sync = &pv->type.video;

        if( next->size == 0 )
        {
            *buf_out = next;
            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        if ( pv->common->count_frames < job->frame_to_start ||
             next->s.start < job->pts_to_start )
        {
            // Flush any subtitles that have pts prior to the
            // current frame
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) )
                {
                    if ( sub->s.start > next->s.start )
                        break;
                    sub = hb_fifo_get( subtitle->fifo_raw );
                    hb_buffer_close( &sub );
                }
            }
            hb_lock( pv->common->mutex );
            // Tell the audio threads what must be dropped
            pv->common->audio_pts_thresh = next_start + pv->common->video_pts_slip;
            hb_cond_broadcast( pv->common->next_frame );
            hb_unlock( pv->common->mutex );

            UpdateSearchState( w, next_start );
            hb_buffer_close( &next );

            return HB_WORK_OK;
        }
        hb_lock( pv->common->mutex );
        pv->common->audio_pts_thresh = 0;
        pv->common->audio_pts_slip += next_start;
        pv->common->video_pts_slip += next_start;
        next_start = 0;
        pv->common->start_found = 1;
        pv->common->count_frames = 0;
        hb_cond_broadcast( pv->common->next_frame );
        hb_unlock( pv->common->mutex );
        sync->st_first = 0;
    }

    if( !sync->cur )
    {
        sync->cur = next;
        if (next->size == 0)
        {
            /* we got an end-of-stream as our first video packet? 
             * Feed it downstream & signal that we're done. 
             */
            *buf_out = next;

            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        return HB_WORK_OK;
    }
    cur = sync->cur;
    /* At this point we have a frame to process. Let's check
        1) if we will be able to push into the fifo ahead
        2) if the next frame is there already, since we need it to
           compute the duration of the current frame*/
    if( next->size == 0 )
    {
        hb_buffer_close( &next );

        pv->common->first_pts[0] = INT64_MAX - 1;
        cur->s.start = sync->next_start;
        cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base);
        sync->next_start += cur->s.stop - cur->s.start;;

        /* Make sure last frame is reflected in frame count */
        pv->common->count_frames++;

        /* Push the frame to the renderer */
        *buf_out = cur;
        sync->cur = NULL;

        /* we got an end-of-stream. Feed it downstream & signal that
         * we're done. Note that this means we drop the final frame of
         * video (we don't know its duration). On DVDs the final frame
         * is often strange and dropping it seems to be a good idea. */
        (*buf_out)->next = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        pv->common->start_found = 1;
        hb_cond_broadcast( pv->common->next_frame );
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point frame encoding */
    if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );
        hb_log( "sync: reached %d frames, exiting early",
                pv->common->count_frames );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point pts encoding */
    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start );
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    if( sync->first_frame )
    {
        /* This is our first frame */
        if ( cur->s.start > 0 )
        {
            /*
             * The first pts from a dvd should always be zero but
             * can be non-zero with a transport or program stream since
             * we're not guaranteed to start on an IDR frame. If we get
             * a non-zero initial PTS extend its duration so it behaves
             * as if it started at zero so that our audio timing will
             * be in sync.
             */
            hb_log( "sync: first pts is %"PRId64, cur->s.start );
            cur->s.start = 0;
        }
        sync->first_frame = 0;
    }

    /*
     * since the first frame is always 0 and the upstream reader code
     * is taking care of adjusting for pts discontinuities, we just have
     * to deal with the next frame's start being in the past. This can
     * happen when the PTS is adjusted after data loss but video frame
     * reordering causes some frames with the old clock to appear after
     * the clock change. This creates frames that overlap in time which
     * looks to us like time going backward. The downstream muxing code
     * can deal with overlaps of up to a frame time but anything larger
     * we handle by dropping frames here.
     */
    if ( next_start - cur->s.start <= 0 )
    {
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = next_start;
        }
        ++sync->drop_count;
        if ( next->s.new_chap )
        {
            // don't drop a chapter mark when we drop the buffer
            sync->chap_mark = next->s.new_chap;
        }
        hb_buffer_close( &next );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        hb_log( "sync: video time didn't advance - dropped %d frames "
                "(delta %d ms, current %"PRId64", next %"PRId64", dur %d)",
                sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90,
                cur->s.start, next_start, (int)( next_start - cur->s.start ) );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }

    /*
     * Track the video sequence number locally so that we can sync the audio
     * to it using the sequence number as well as the PTS.
     */
    sync->video_sequence = cur->sequence;
    
    /* Process subtitles that apply to this video frame */
    // NOTE: There is no logic in either subtitle-sync algorithm that waits
    // for the subtitle-decoder if it is lagging behind the video-decoder.
    //       
    // Therefore there is the implicit assumption that the subtitle-decoder 
    // is always faster than the video-decoder. This assumption is definitely 
    // incorrect in some cases where the SSA subtitle decoder is used.

    for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
    {
        int64_t sub_start, sub_stop, duration;

        subtitle = hb_list_item( job->list_subtitle, i );
        
        // Sanitize subtitle start and stop times, then pass to 
        // muxer or renderer filter.
        while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL )
        {
            hb_lock( pv->common->mutex );
            sub_start = sub->s.start - pv->common->video_pts_slip;
            hb_unlock( pv->common->mutex );

            if (sub->s.stop == -1)
            {
                if (subtitle->config.dest != RENDERSUB &&
                    hb_fifo_size( subtitle->fifo_raw ) < 2)
                {
                    // For passthru subs, we want to wait for the
                    // next subtitle so that we can fill in the stop time.
                    // This way the muxer can compute the duration of
                    // the subtitle.
                    //
                    // For render subs, we need to ensure that they
                    // get to the renderer before the associated video
                    // that they are to be applied to.  It is the 
                    // responsibility of the renderer to handle
                    // stop == -1.
                    break;
                }
            }

            sub = hb_fifo_get( subtitle->fifo_raw );
            if ( sub->s.stop == -1 )
            {
                hb_buffer_t *next;
                next = hb_fifo_see( subtitle->fifo_raw );
                if (next != NULL)
                    sub->s.stop = next->s.start;
            }
            // Need to re-write subtitle timestamps to account
            // for any slippage.
            sub_stop = -1;
            if ( sub->s.stop != -1 )
            {
                duration = sub->s.stop - sub->s.start;
                sub_stop = sub_start + duration;
            }

            sub->s.start = sub_start;
            sub->s.stop = sub_stop;

            hb_fifo_push( subtitle->fifo_out, sub );
        }
    }

    /*
     * Adjust the pts of the current frame so that it's contiguous
     * with the previous frame. The start time of the current frame
     * has to be the end time of the previous frame and the stop
     * time has to be the start of the next frame.  We don't
     * make any adjustments to the source timestamps other than removing
     * the clock offsets (which also removes pts discontinuities).
     * This means we automatically encode at the source's frame rate.
     * MP2 uses an implicit duration (frames end when the next frame
     * starts) but more advanced containers like MP4 use an explicit
     * duration. Since we're looking ahead one frame we set the
     * explicit stop time from the start time of the next frame.
     */
    *buf_out = cur;
    int64_t duration = next_start - cur->s.start;
    sync->cur = cur = next;
    cur->sub = NULL;
    cur->s.start -= pv->common->video_pts_slip;
    cur->s.stop -= pv->common->video_pts_slip;
    sync->pts_skip = 0;
    if ( duration <= 0 )
    {
        hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"",
                duration, cur->s.start, next_start );
    }

    (*buf_out)->s.start = sync->next_start;
    sync->next_start += duration;
    (*buf_out)->s.stop = sync->next_start;

    if ( sync->chap_mark )
    {
        // we have a pending chapter mark from a recent drop - put it on this
        // buffer (this may make it one frame late but we can't do any better).
        (*buf_out)->s.new_chap = sync->chap_mark;
        sync->chap_mark = 0;
    }

    /* Update UI */
    UpdateState( w );

    return HB_WORK_OK;
}
Exemple #7
0
hb_buffer_t * hb_buffer_init( int size )
{
    hb_buffer_t * b;
    // Certain libraries (hrm ffmpeg) expect buffers passed to them to
    // end on certain alignments (ffmpeg is 8). So allocate some extra bytes.
    // Note that we can't simply align the end of our buffer because
    // sometimes we feed data to these libraries starting from arbitrary
    // points within the buffer.
    int alloc = size + 16;
    hb_fifo_t *buffer_pool = size_to_pool( alloc );

    if( buffer_pool )
    {
        b = hb_fifo_get( buffer_pool );

        if( b )
        {
            /*
             * Zero the contents of the buffer, would be nice if we
             * didn't have to do this.
             */
            uint8_t *data = b->data;
            memset( b, 0, sizeof(hb_buffer_t) );
            b->alloc = buffer_pool->buffer_size;
            b->size = size;
            b->data = data;
            return( b );
        }
    }

    /*
     * No existing buffers, create a new one
     */
    if( !( b = calloc( sizeof( hb_buffer_t ), 1 ) ) )
    {
        hb_log( "out of memory" );
        return NULL;
    }

    b->size  = size;
    b->alloc  = buffer_pool ? buffer_pool->buffer_size : alloc;

    if (size)
    {
#if defined( SYS_DARWIN ) || defined( SYS_FREEBSD ) || defined( SYS_MINGW )
        b->data  = malloc( b->alloc );
#elif defined( SYS_CYGWIN )
        /* FIXME */
        b->data  = malloc( b->alloc + 17 );
#else
        b->data  = memalign( 16, b->alloc );
#endif
        if( !b->data )
        {
            hb_log( "out of memory" );
            free( b );
            return NULL;
        }
        hb_lock(buffers.lock);
        buffers.allocated += b->alloc;
        hb_unlock(buffers.lock);
    }
    return b;
}