示例#1
0
static int is_subtitle( hb_work_private_t *r, int id )
{
    int i;
    hb_subtitle_t *sub;

    for( i = 0; ( sub = hb_list_item( r->title->list_subtitle, i ) ); ++i )
    {
        if ( sub->id == id )
        {
            return 1;
        }
    }
    return 0;
}
示例#2
0
文件: opencl.c 项目: Doluci/HandBrake
void hb_opencl_info_print()
{
    /*
     * Note: this function should not log any warnings or errors.
     * Its only purpose is to list OpenCL-capable devices, so let's initialize
     * only what we absolutely need here, rather than calling library_open().
     */
    hb_opencl_library_t ocl, *opencl = &ocl;
    if ((opencl->library          = (void*)HB_OCL_DLOPEN)                                     == NULL ||
        (opencl->clGetDeviceIDs   = (void*)HB_OCL_DLSYM(opencl->library, "clGetDeviceIDs"  )) == NULL ||
        (opencl->clGetDeviceInfo  = (void*)HB_OCL_DLSYM(opencl->library, "clGetDeviceInfo" )) == NULL ||
        (opencl->clGetPlatformIDs = (void*)HB_OCL_DLSYM(opencl->library, "clGetPlatformIDs")) == NULL)
    {
        // zero or insufficient OpenCL support
        hb_log("OpenCL: library not available");
        goto end;
    }

    int i, idx;
    hb_list_t *device_list;
    hb_opencl_device_t *device;
    if ((device_list = hb_opencl_devices_list_get(opencl, CL_DEVICE_TYPE_ALL)) != NULL)
    {
        for (i = 0, idx = 1; i < hb_list_count(device_list); i++)
        {
            if ((device = hb_list_item(device_list, i)) != NULL)
            {
                // don't list CPU devices (always unsupported)
                if (!(device->type & CL_DEVICE_TYPE_CPU))
                {
                    hb_log("OpenCL device #%d: %s %s", idx++, device->vendor, device->name);
                    hb_log(" - OpenCL version: %s", device->version + 7 /* strlen("OpenCL ") */);
                    hb_log(" - driver version: %s", device->driver);
                    hb_log(" - device type:    %s%s",
                           device->type & CL_DEVICE_TYPE_CPU         ? "CPU"         :
                           device->type & CL_DEVICE_TYPE_GPU         ? "GPU"         :
                           device->type & CL_DEVICE_TYPE_CUSTOM      ? "Custom"      :
                           device->type & CL_DEVICE_TYPE_ACCELERATOR ? "Accelerator" : "Unknown",
                           device->type & CL_DEVICE_TYPE_DEFAULT     ? " (default)"  : "");
                    hb_log(" - supported:      %s",
                           hb_opencl_device_is_supported(device) ? "YES" : "no");
                }
            }
        }
        hb_opencl_devices_list_close(&device_list);
    }

end:
    hb_opencl_library_close(&opencl);
}
示例#3
0
文件: reader.c 项目: rdp/HandBrake
static int is_audio( hb_reader_t *r, int id )
{
    int i;
    hb_audio_t *audio;

    for( i = 0; ( audio = hb_list_item( r->title->list_audio, i ) ); ++i )
    {
        if ( audio->id == id )
        {
            return 1;
        }
    }
    return 0;
}
示例#4
0
/***********************************************************************
 * reader_init
 ***********************************************************************
 *
 **********************************************************************/
static int64_t chapter_end_pts(hb_title_t * title, int chapter_end )
{
    hb_chapter_t * chapter;
    int64_t        duration;
    int            ii;

    duration = 0;
    for (ii = 0; ii < chapter_end; ii++)
    {
        chapter = hb_list_item(title->list_chapter, ii);
        duration += chapter->duration;
    }
    return duration;
}
示例#5
0
static int hb_qsv_filter_pre_work( hb_filter_object_t * filter,
                               hb_buffer_t ** buf_in,
                               hb_buffer_t ** buf_out ){
    hb_filter_private_t * pv = filter->private_data;
    hb_buffer_t * in = *buf_in;
    hb_buffer_t * out = *buf_out;
    int sts = 0;

    av_qsv_context* qsv = pv->job->qsv;

    if(!in->qsv_details.filter_details)
        in->qsv_details.filter_details = pv;

    if ( in->size <= 0 )
    {
        *buf_out = in;
        *buf_in = NULL;
        return HB_FILTER_DONE;
    }

    while(1){
        int ret = filter_pre_init(qsv,pv);
        if(ret >= 2)
            av_qsv_sleep(1);
        else
            break;
    }

    pv->pre.in  = in;
    pv->pre.out = in;

    sts = pre_process_frame(in, qsv, pv);

    if(sts){
        hb_list_add(pv->list,out);
    }

    if( hb_list_count(pv->list) ){
        *buf_out = hb_list_item(pv->list,0);
        hb_list_rem(pv->list,*buf_out);
         *buf_in = NULL;
    }
    else{
        *buf_in = NULL;
        *buf_out = in;
    }

    return HB_FILTER_OK;
}
示例#6
0
文件: batch.c 项目: GTRsdk/HandBrake
/***********************************************************************
 * hb_batch_close
 ***********************************************************************
 * Closes and frees everything
 **********************************************************************/
void hb_batch_close( hb_batch_t ** _d )
{
    hb_batch_t * d = *_d;
    char       * filename;

    while ( ( filename = hb_list_item( d->list_file, 0 ) ) )
    {
        hb_list_rem( d->list_file, filename );
        free( filename );
    }
    hb_list_close( &d->list_file );
    free( d->path );
    free( d );
    *_d = NULL;
}
示例#7
0
文件: opencl.c 项目: Doluci/HandBrake
static void hb_opencl_devices_list_close(hb_list_t **_list)
{
    if (_list != NULL)
    {
        hb_list_t *list = *_list;
        hb_opencl_device_t *device;
        while (list != NULL && hb_list_count(list) > 0)
        {
            if ((device = hb_list_item(list, 0)) != NULL)
            {
                hb_list_rem(list, device);
                free(device);
            }
        }
    }
    hb_list_close(_list);
}
示例#8
0
文件: bd.c 项目: ming-hai/HandBrake
/***********************************************************************
 * hb_bd_main_feature
 **********************************************************************/
int hb_bd_main_feature( hb_bd_t * d, hb_list_t * list_title )
{
    int longest = 0;
    int ii;
    uint64_t longest_duration = 0;
    int highest_rank = 0;
    int most_chapters = 0;
    int rank[8] = {0, 1, 3, 2, 6, 5, 7, 4};
    BLURAY_TITLE_INFO * ti;

    for ( ii = 0; ii < hb_list_count( list_title ); ii++ )
    {
        hb_title_t * title = hb_list_item( list_title, ii );
        ti = d->title_info[title->index - 1];
        if ( ti ) 
        {
            BLURAY_STREAM_INFO * bdvideo = &ti->clips[0].video_streams[0];
            if ( title->duration > longest_duration * 0.7 && bdvideo->format < 8 )
            {
                if (highest_rank < rank[bdvideo->format] ||
                    ( title->duration > longest_duration &&
                          highest_rank == rank[bdvideo->format]))
                {
                    longest = title->index;
                    longest_duration = title->duration;
                    highest_rank = rank[bdvideo->format];
                    most_chapters = ti->chapter_count;
                }
                else if (highest_rank == rank[bdvideo->format] &&
                         title->duration == longest_duration &&
                         ti->chapter_count > most_chapters)
                {
                    longest = title->index;
                    most_chapters = ti->chapter_count;
                }
            }
        }
        else if ( title->duration > longest_duration )
        {
            longest_duration = title->duration;
            longest = title->index;
        }
    }
    return longest;
}
示例#9
0
void encx265Close(hb_work_object_t *w)
{
    hb_work_private_t *pv = w->private_data;

    if (pv->delayed_chapters != NULL)
    {
        struct chapter_s *item;
        while ((item = hb_list_item(pv->delayed_chapters, 0)) != NULL)
        {
            hb_list_rem(pv->delayed_chapters, item);
            free(item);
        }
        hb_list_close(&pv->delayed_chapters);
    }

    x265_param_free(pv->param);
    x265_encoder_close(pv->x265);
    free(pv);
    w->private_data = NULL;
}
示例#10
0
static int check_ff_audio( hb_list_t *list_audio, hb_audio_t *ff_audio )
{
    int i;

    for( i = 0; i < hb_list_count( list_audio ); i++ )
    {
        hb_audio_t * audio = hb_list_item( list_audio, i );

        if ( audio == ff_audio )
            break;

        if ( audio->config.in.codec == HB_ACODEC_FFMPEG && 
             audio->id == ff_audio->id )
        {
            hb_list_add( audio->priv.ff_audio_list, ff_audio );
            return 1;
        }
    }
    return 0;
}
示例#11
0
/**
 * Iterates through job list and calls do_job for each job.
 * @param _work Handle work object.
 */
static void work_func( void * _work )
{
    hb_work_t  * work = _work;
    hb_job_t   * job;

    hb_log( "%d job(s) to process", hb_list_count( work->jobs ) );

    while( !*work->die && ( job = hb_list_item( work->jobs, 0 ) ) )
    {
        hb_list_rem( work->jobs, job );
        job->die = work->die;
        *(work->current_job) = job;
        InitWorkState( job->h );
        do_job( job );
        *(work->current_job) = NULL;
    }

    *(work->error) = HB_ERROR_NONE;

    free( work );
}
示例#12
0
mfxStatus MFX_CDECL qsv_Submit(mfxHDL pthis, const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task){
    mfxStatus sts = MFX_ERR_NONE;

    qsv_filter_t *plugin = pthis;

    mfxFrameSurface1 *surface_in = (mfxFrameSurface1 *)in[0];
    mfxFrameSurface1 *surface_out = (mfxFrameSurface1 *)out[0];
    mfxFrameSurface1 *real_surface_in = surface_in;
    mfxFrameSurface1 *real_surface_out = surface_out;

    sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_in, &real_surface_in);
    AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);

    sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_out, &real_surface_out);
    AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);

    int task_idx = get_free_task(plugin->tasks);

    if (task_idx == -1)
    {
        return MFX_WRN_DEVICE_BUSY;
    }

    plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_in->Data));
    plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_out->Data));

    // to preserve timing if other filters are used in-between
    surface_out->Data.TimeStamp  = surface_in->Data.TimeStamp;
    surface_out->Data.FrameOrder = surface_in->Data.FrameOrder;

    qsv_filter_task_t *current_task = hb_list_item(plugin->tasks,task_idx);
    current_task->in    = real_surface_in;
    current_task->out   = real_surface_out;
    current_task->busy  = 1;
    current_task->pv    = plugin->pv;

    *task = (mfxThreadTask)current_task;

    return sts;
}
示例#13
0
mfxStatus plugin_close(qsv_filter_t* plugin){
    int i = 0;
    mfxStatus sts = MFX_ERR_NONE;

    if(!plugin->is_init_done) return sts;

    mfxExtOpaqueSurfaceAlloc* plugin_opaque_alloc = NULL;

    plugin_opaque_alloc = (mfxExtOpaqueSurfaceAlloc*) get_ext_buffer(plugin->videoparam->ExtParam,
                                    plugin->videoparam->NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);

    if(!plugin_opaque_alloc || !plugin_opaque_alloc->In.Surfaces || !plugin_opaque_alloc->Out.Surfaces)
        return MFX_ERR_INVALID_VIDEO_PARAM;

    sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->In.NumSurface,
            plugin_opaque_alloc->In.Type, plugin_opaque_alloc->In.Surfaces);
    AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->Out.NumSurface,
            plugin_opaque_alloc->Out.Type, plugin_opaque_alloc->Out.Surfaces);
    AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    if(plugin->tasks){
        for(i=hb_list_count(plugin->tasks);i>0;i--){
            qsv_filter_task_t *task = hb_list_item(plugin->tasks,i-1);
            hb_list_rem(plugin->tasks,task);
            free(task);
        }
        hb_list_close(&plugin->tasks);
    }

    plugin->is_init_done = 0;

    return sts;
}
示例#14
0
文件: opencl.c 项目: Doluci/HandBrake
int hb_opencl_available()
{
    static int opencl_available = -1;
    if (opencl_available >= 0)
    {
        return opencl_available;
    }
    opencl_available = 0;

    /*
     * Check whether we can load the OpenCL library, then check devices and make
     * sure we support running OpenCL code on at least one of them.
     */
    hb_opencl_library_t *opencl;
    if ((opencl = hb_opencl_library_init()) != NULL)
    {
        int i;
        hb_list_t *device_list;
        hb_opencl_device_t *device;
        if ((device_list = hb_opencl_devices_list_get(opencl, CL_DEVICE_TYPE_ALL)) != NULL)
        {
            for (i = 0; i < hb_list_count(device_list); i++)
            {
                if ((device = hb_list_item(device_list, i)) != NULL &&
                    (hb_opencl_device_is_supported(device)))
                {
                    opencl_available = 1;
                    break;
                }
            }
            hb_opencl_devices_list_close(&device_list);
        }
        hb_opencl_library_close(&opencl);
    }
    return opencl_available;
}
示例#15
0
/***********************************************************************
 * hb_work_sync_init
 ***********************************************************************
 * Initialize the work object
 **********************************************************************/
hb_work_object_t * hb_sync_init( hb_job_t * job )
{
    hb_title_t        * title = job->title;
    hb_chapter_t      * chapter;
    int                 i;
    uint64_t            duration;
    hb_work_private_t * pv;
    hb_sync_video_t   * sync;
    hb_work_object_t  * w;
    hb_work_object_t  * ret = NULL;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    sync = &pv->type.video;
    pv->common = calloc( 1, sizeof( hb_sync_common_t ) );
    pv->common->ref++;
    pv->common->mutex = hb_lock_init();
    pv->common->audio_pts_thresh = -1;
    pv->common->next_frame = hb_cond_init();
    pv->common->pts_count = 1;
    if ( job->frame_to_start || job->pts_to_start )
    {
        pv->common->start_found = 0;
    }
    else
    {
        pv->common->start_found = 1;
    }

    ret = w = hb_get_work( WORK_SYNC_VIDEO );
    w->private_data = pv;
    w->fifo_in = job->fifo_raw;

    // When doing subtitle indepth scan, the pipeline ends at sync
    if ( !job->indepth_scan )
        w->fifo_out = job->fifo_sync;
    else
        w->fifo_out = NULL;

    pv->job            = job;
    pv->common->pts_offset   = INT64_MIN;
    sync->first_frame = 1;

    if( job->pass == 2 )
    {
        /* We already have an accurate frame count from pass 1 */
        hb_interjob_t * interjob = hb_interjob_get( job->h );
        sync->count_frames_max = interjob->frame_count;
    }
    else
    {
        /* Calculate how many video frames we are expecting */
        if ( job->pts_to_stop )
        {
            duration = job->pts_to_stop + 90000;
        }
        else if( job->frame_to_stop )
        {
            /* Set the duration to a rough estimate */
            duration = ( job->frame_to_stop / ( title->rate / title->rate_base ) ) * 90000;
        }
        else
        {
            duration = 0;
            for( i = job->chapter_start; i <= job->chapter_end; i++ )
            {
                chapter   = hb_list_item( job->list_chapter, i - 1 );
                duration += chapter->duration;
            }
        }
        sync->count_frames_max = duration * title->rate / title->rate_base / 90000;
    }

    hb_log( "sync: expecting %d video frames", sync->count_frames_max );

    /* Initialize libsamplerate for every audio track we have */
    if ( ! job->indepth_scan )
    {
        for( i = 0; i < hb_list_count( job->list_audio ); i++ )
        {
            InitAudio( job, pv->common, i );
        }
    }
    pv->common->first_pts = malloc( sizeof(int64_t) * pv->common->pts_count );
    for ( i = 0; i < pv->common->pts_count; i++ )
        pv->common->first_pts[i] = INT64_MAX;

    return ret;
}
示例#16
0
static hb_buffer_t* nal_encode(hb_work_object_t *w,
                               x265_picture *pic_out,
                               x265_nal *nal, uint32_t nnal)
{
    hb_work_private_t *pv = w->private_data;
    hb_job_t *job         = pv->job;
    hb_buffer_t *buf      = NULL;
    int i;

    if (nnal <= 0)
    {
        return NULL;
    }

    buf = hb_video_buffer_init(job->width, job->height);
    if (buf == NULL)
    {
        return NULL;
    }

    buf->size = 0;
    // copy the bitstream data
    for (i = 0; i < nnal; i++)
    {
        memcpy(buf->data + buf->size, nal[i].payload, nal[i].sizeBytes);
        buf->size += nal[i].sizeBytes;
    }

    // use the pts to get the original frame's duration.
    buf->s.duration     = get_frame_duration(pv, pic_out->pts);
    buf->s.stop         = pic_out->pts + buf->s.duration;
    buf->s.start        = pic_out->pts;
    buf->s.renderOffset = pic_out->dts;
    if (w->config->h264.init_delay == 0 && pic_out->dts < 0)
    {
        w->config->h264.init_delay -= pic_out->dts;
    }

    switch (pic_out->sliceType)
    {
        case X265_TYPE_IDR:
            buf->s.frametype = HB_FRAME_IDR;
            break;
        case X265_TYPE_I:
            buf->s.frametype = HB_FRAME_I;
            break;
        case X265_TYPE_P:
            buf->s.frametype = HB_FRAME_P;
            break;
        case X265_TYPE_B:
            buf->s.frametype = HB_FRAME_B;
            break;
        case X265_TYPE_BREF:
            buf->s.frametype = HB_FRAME_BREF;
            break;
        default:
            buf->s.frametype = 0;
            break;
    }

    if (pv->next_chapter_pts != AV_NOPTS_VALUE &&
        pv->next_chapter_pts <= pic_out->pts   &&
        pic_out->sliceType   == X265_TYPE_IDR)
    {
        // we're no longer looking for this chapter
        pv->next_chapter_pts = AV_NOPTS_VALUE;

        // get the chapter index from the list
        struct chapter_s *item = hb_list_item(pv->delayed_chapters, 0);
        if (item != NULL)
        {
            // we're done with this chapter
            hb_list_rem(pv->delayed_chapters, item);
            buf->s.new_chap = item->index;
            free(item);

            // we may still have another pending chapter
            item = hb_list_item(pv->delayed_chapters, 0);
            if (item != NULL)
            {
                // we're looking for this one now
                // we still need it, don't remove it
                pv->next_chapter_pts = item->start;
            }
        }
    }

    // discard empty buffers (no video)
    if (buf->size <= 0)
    {
        hb_buffer_close(&buf);
    }
    return buf;
}
示例#17
0
文件: reader.c 项目: rdp/HandBrake
/***********************************************************************
 * ReaderFunc
 ***********************************************************************
 *
 **********************************************************************/
static void ReaderFunc( void * _r )
{
    hb_reader_t  * r = _r;
    hb_fifo_t   ** fifos;
    hb_buffer_t  * buf;
    hb_list_t    * list;
    int            n;
    int            chapter = -1;
    int            chapter_end = r->job->chapter_end;

    if ( r->title->type == HB_BD_TYPE )
    {
        if ( !( r->bd = hb_bd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_DVD_TYPE )
    {
        if ( !( r->dvd = hb_dvd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_STREAM_TYPE ||
              r->title->type == HB_FF_STREAM_TYPE )
    {
        if ( !( r->stream = hb_stream_open( r->title->path, r->title ) ) )
            return;
    }
    else
    {
        // Unknown type, should never happen
        return;
    }

    if (r->bd)
    {
        if( !hb_bd_start( r->bd, r->title ) )
        {
            hb_bd_close( &r->bd );
            return;
        }
        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_bd_seek( r->bd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
        else if ( r->job->pts_to_start )
        {
            // Note, bd seeks always put us to an i-frame.  no need
            // to start decoding early using r->pts_to_start
            hb_bd_seek_pts( r->bd, r->job->pts_to_start );
            r->job->pts_to_start = 0;
            r->start_found = 1;
        }
        else
        {
            hb_bd_seek_chapter( r->bd, r->job->chapter_start );
        }
        if (r->job->angle > 1)
        {
            hb_bd_set_angle( r->bd, r->job->angle - 1 );
        }
    }
    else if (r->dvd)
    {
        /*
         * XXX this code is a temporary hack that should go away if/when
         *     chapter merging goes away in libhb/dvd.c
         * map the start and end chapter numbers to on-media chapter
         * numbers since chapter merging could cause the handbrake numbers
         * to diverge from the media numbers and, if our chapter_end is after
         * a media chapter that got merged, we'll stop ripping too early.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );

        chapter_end = chap->index;
        if (start > 1)
        {
           chap = hb_list_item( r->title->list_chapter, start - 1 );
           start = chap->index;
        }
        /* end chapter mapping XXX */

        if( !hb_dvd_start( r->dvd, r->title, start ) )
        {
            hb_dvd_close( &r->dvd );
            return;
        }
        if (r->job->angle)
        {
            hb_dvd_set_angle( r->dvd, r->job->angle );
        }

        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_dvd_seek( r->dvd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
    }
    else if ( r->stream && r->job->start_at_preview )
    {
        
        // XXX code from DecodePreviews - should go into its own routine
        hb_stream_seek( r->stream, (float)( r->job->start_at_preview - 1 ) /
                        ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );

    } 
    else if ( r->stream && r->job->pts_to_start )
    {
        int64_t pts_to_start = r->job->pts_to_start;
        
        // Find out what the first timestamp of the stream is
        // and then seek to the appropriate offset from it
        if ( ( buf = hb_stream_read( r->stream ) ) )
        {
            if ( buf->start > 0 )
            {
                pts_to_start += buf->start;
                r->pts_to_start += buf->start;
                r->job->pts_to_start += buf->start;
            }
        }
        
        if ( hb_stream_seek_ts( r->stream, pts_to_start ) >= 0 )
        {
            // Seek takes us to the nearest I-frame before the timestamp
            // that we want.  So we will retrieve the start time of the
            // first packet we get, subtract that from pts_to_start, and
            // inspect the reset of the frames in sync.
            r->start_found = 2;
            r->job->pts_to_start = pts_to_start;
        }
    } 
    else if( r->stream )
    {
        /*
         * Standard stream, seek to the starting chapter, if set, and track the
         * end chapter so that we end at the right time.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );
        
        chapter_end = chap->index;
        if (start > 1)
        {
            chap = hb_list_item( r->title->list_chapter, start - 1 );
            start = chap->index;
        }
        
        /*
         * Seek to the start chapter.
         */
        hb_stream_seek_chapter( r->stream, start );
    }

    list  = hb_list_init();

    while( !*r->die && !r->job->done )
    {
        if (r->bd)
            chapter = hb_bd_chapter( r->bd );
        else if (r->dvd)
            chapter = hb_dvd_chapter( r->dvd );
        else if (r->stream)
            chapter = hb_stream_chapter( r->stream );

        if( chapter < 0 )
        {
            hb_log( "reader: end of the title reached" );
            break;
        }
        if( chapter > chapter_end )
        {
            hb_log( "reader: end of chapter %d (media %d) reached at media chapter %d",
                    r->job->chapter_end, chapter_end, chapter );
            break;
        }

        if (r->bd)
        {
          if( (buf = hb_bd_read( r->bd )) == NULL )
          {
              break;
          }
        }
        else if (r->dvd)
        {
          if( (buf = hb_dvd_read( r->dvd )) == NULL )
          {
              break;
          }
        }
        else if (r->stream)
        {
          if ( (buf = hb_stream_read( r->stream )) == NULL )
          {
            break;
          }
          if ( r->start_found == 2 )
          {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if ( buf->start > 0 && buf->start < r->job->pts_to_start )
            {
                r->job->pts_to_start -= buf->start;
            }
            else if ( buf->start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
                r->start_found = 1;
            }
          }
        }

        if( r->job->indepth_scan )
        {
            /*
             * Need to update the progress during a subtitle scan
             */
            hb_state_t state;

#define p state.param.working

            state.state = HB_STATE_WORKING;
            p.progress = (double)chapter / (double)r->job->chapter_end;
            if( p.progress > 1.0 )
            {
                p.progress = 1.0;
            }
            p.rate_avg = 0.0;
            p.hours    = -1;
            p.minutes  = -1;
            p.seconds  = -1;
            hb_set_state( r->job->h, &state );
        }

        (hb_demux[r->title->demuxer])( buf, list, &r->demux );

        while( ( buf = hb_list_item( list, 0 ) ) )
        {
            hb_list_rem( list, buf );
            fifos = GetFifoForId( r->job, buf->id );

            if ( fifos && ! r->saw_video && !r->job->indepth_scan )
            {
                // The first data packet with a PTS from an audio or video stream
                // that we're decoding defines 'time zero'. Discard packets until
                // we get one.
                if ( buf->start != -1 && buf->renderOffset != -1 &&
                     ( buf->id == r->title->video_id || is_audio( r, buf->id ) ) )
                {
                    // force a new scr offset computation
                    r->scr_changes = r->demux.scr_changes - 1;
                    // create a stream state if we don't have one so the
                    // offset will get computed correctly.
                    id_to_st( r, buf, 1 );
                    r->saw_video = 1;
                    hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
                            r->demux.last_scr, buf->id, buf->renderOffset );
                }
                else
                {
                    fifos = NULL;
                }
            }
            if( fifos )
            {
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes != r->demux.scr_changes )
                    {
                        // This is the first audio or video packet after an SCR
                        // change. Compute a new scr offset that would make this
                        // packet follow the last of this stream with the 
                        // correct average spacing.
                        stream_timing_t *st = id_to_st( r, buf, 0 );

                        // if this is the video stream and we don't have
                        // audio yet or this is an audio stream
                        // generate a new scr
                        if ( st->is_audio ||
                             ( st == r->stream_timing && !r->saw_audio ) )
                        {
                            new_scr_offset( r, buf );
                        }
                        else
                        {
                            // defer the scr change until we get some
                            // audio since audio has a timestamp per
                            // frame but video & subtitles don't. Clear
                            // the timestamps so the decoder will generate
                            // them from the frame durations.
                            buf->start = -1;
                            buf->renderOffset = -1;
                        }
                    }
                }
                if ( buf->start != -1 )
                {
                    int64_t start = buf->start - r->scr_offset;
                    if ( !r->start_found )
                        UpdateState( r, start );

                    if ( !r->start_found &&
                        start >= r->pts_to_start )
                    {
                        // pts_to_start point found
                        r->start_found = 1;
                    }
                    // This log is handy when you need to debug timing problems
                    //hb_log("id %x scr_offset %ld start %ld --> %ld", 
                    //        buf->id, r->scr_offset, buf->start, 
                    //        buf->start - r->scr_offset);
                    buf->start -= r->scr_offset;
                }
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes == r->demux.scr_changes )
                    {
                        // This packet is referenced to the same SCR as the last.
                        // Adjust timestamp to remove the System Clock Reference
                        // offset then update the average inter-packet time
                        // for this stream.
                        buf->renderOffset -= r->scr_offset;
                        update_ipt( r, buf );
                    }
                }
                if ( !r->start_found )
                {
                    hb_buffer_close( &buf );
                    continue;
                }

                buf->sequence = r->sequence++;
                /* if there are mutiple output fifos, send a copy of the
                 * buffer down all but the first (we have to not ship the
                 * original buffer or we'll race with the thread that's
                 * consuming the buffer & inject garbage into the data stream). */
                for( n = 1; fifos[n] != NULL; n++)
                {
                    hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
                    hb_buffer_copy_settings( buf_copy, buf );
                    memcpy( buf_copy->data, buf->data, buf->size );
                    push_buf( r, fifos[n], buf_copy );
                }
                push_buf( r, fifos[0], buf );
            }
            else
            {
                hb_buffer_close( &buf );
            }
        }
    }

    // send empty buffers downstream to video & audio decoders to signal we're done.
    if( !*r->die && !r->job->done )
    {
        push_buf( r, r->job->fifo_mpeg2, hb_buffer_init(0) );

        hb_audio_t *audio;
        for( n = 0; (audio = hb_list_item( r->job->title->list_audio, n)); ++n )
        {
            if ( audio->priv.fifo_in )
                push_buf( r, audio->priv.fifo_in, hb_buffer_init(0) );
        }

        hb_subtitle_t *subtitle;
        for( n = 0; (subtitle = hb_list_item( r->job->title->list_subtitle, n)); ++n )
        {
            if ( subtitle->fifo_in && subtitle->source == VOBSUB)
                push_buf( r, subtitle->fifo_in, hb_buffer_init(0) );
        }
    }

    hb_list_empty( &list );
    if (r->bd)
    {
        hb_bd_stop( r->bd );
        hb_bd_close( &r->bd );
    }
    else if (r->dvd)
    {
        hb_dvd_stop( r->dvd );
        hb_dvd_close( &r->dvd );
    }
    else if (r->stream)
    {
        hb_stream_close(&r->stream);
    }

    if ( r->stream_timing )
    {
        free( r->stream_timing );
    }

    hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
    if ( r->demux.dts_drops )
    {
        hb_log( "reader: %d drops because DTS out of range", r->demux.dts_drops );
    }

    free( r );
    _r = NULL;
}
示例#18
0
static GValue*  subtitle_add_track(
    signal_user_data_t *ud,
    GValue *settings,
    const hb_title_t *title,
    int track,
    int mux,
    gboolean default_track,
    gboolean srt,
    gboolean *burned)
{
    int source = 0;

    if (track >= 0 && !srt)
    {
        hb_subtitle_t *subtitle = hb_list_item(title->list_subtitle, track);
        source = subtitle->source;
    }
    else if (srt)
    {
        source = SRTSUB;
    }

    if (*burned && !hb_subtitle_can_pass(source, mux))
    {
        // Can only burn one.  Skip others that must be burned.
        return NULL;
    }

    GValue *subsettings = ghb_dict_value_new();
    ghb_settings_set_int(subsettings, "SubtitleTrack", track);
    ghb_settings_set_int(subsettings, "SubtitleSource", source);

    // Set default SRT settings
    gchar *pref_lang, *dir, *filename;

    pref_lang = ghb_settings_get_string(settings, "PreferredLanguage");
    ghb_settings_set_string(subsettings, "SrtLanguage", pref_lang);
    g_free(pref_lang);

    ghb_settings_set_string(subsettings, "SrtCodeset", "UTF-8");

    dir = ghb_settings_get_string(ud->prefs, "SrtDir");
    filename = g_strdup_printf("%s/none", dir);
    ghb_settings_set_string(subsettings, "SrtFile", filename);
    g_free(dir);
    g_free(filename);

    ghb_settings_set_int(subsettings, "SrtOffset", 0);

    subtitle_set_track_description(settings, subsettings);

    if (!hb_subtitle_can_pass(source, mux))
    {
        ghb_settings_set_boolean(subsettings, "SubtitleBurned", TRUE);
        *burned = TRUE;
    }
    else
    {
        ghb_settings_set_boolean(subsettings, "SubtitleBurned", FALSE);
    }
    if (track == -1)
    {
        // Foreign audio search "track"
        ghb_settings_set_boolean(subsettings, "SubtitleForced", TRUE);
    }
    else
    {
        ghb_settings_set_boolean(subsettings, "SubtitleForced", FALSE);
    }
    if (default_track)
    {
        ghb_settings_set_boolean(subsettings, "SubtitleDefaultTrack", TRUE);
    }
    else
    {
        ghb_settings_set_boolean(subsettings, "SubtitleDefaultTrack", FALSE);
    }
    subtitle_add_to_settings(settings, subsettings);

    return subsettings;
}
示例#19
0
void
ghb_set_pref_subtitle_settings(signal_user_data_t *ud, const hb_title_t *title, GValue *settings)
{
    gint track;
    gboolean *used;
    const gchar *audio_lang, *pref_lang = NULL;
    gboolean foreign_audio_search, foreign_audio_subs;
    gboolean one_burned = FALSE;

    const GValue *lang_list;
    gint lang_count, sub_count, ii;
    int behavior;

    behavior = ghb_settings_combo_int(settings,
                                      "SubtitleTrackSelectionBehavior");
    // Clear the subtitle list
    ghb_clear_subtitle_list_settings(settings);

    if (title == NULL)
    {
        // no source title
        return;
    }
    sub_count = hb_list_count(title->list_subtitle);
    if (sub_count == 0)
    {
        // No source subtitles
        return;
    }

    const char *mux_id;
    const hb_container_t *mux;

    mux_id = ghb_settings_get_const_string(settings, "FileFormat");
    mux = ghb_lookup_container_by_name(mux_id);

    // Check to see if we need to add a subtitle track for foreign audio
    // language films. A subtitle track will be added if:
    //
    // The first (default) audio track language does NOT match the users
    // chosen Preferred Language AND the Preferred Language is NOT Any (und).
    //
    audio_lang = ghb_get_user_audio_lang(settings, title, 0);
    foreign_audio_search = ghb_settings_get_boolean(
                                settings, "SubtitleAddForeignAudioSearch");
    foreign_audio_subs = ghb_settings_get_boolean(
                                settings, "SubtitleAddForeignAudioSubtitle");
    lang_list = ghb_settings_get_value(settings, "SubtitleLanguageList");
    lang_count = ghb_array_len(lang_list);
    if (lang_count > 0)
    {
        GValue *glang = ghb_array_get_nth(lang_list, 0);
        pref_lang = g_value_get_string(glang);
    }

    if (pref_lang == NULL || !strncmp(pref_lang, "und", 4))
    {
        foreign_audio_search = foreign_audio_subs = FALSE;
        pref_lang = NULL;
    }

    used = g_malloc0(sub_count * sizeof(gboolean));

    if (foreign_audio_subs &&
        (audio_lang == NULL || strncmp(audio_lang, pref_lang, 4)))
    {
        // Add preferred language subtitle since first audio track
        // is foreign language.
        foreign_audio_search = FALSE;
        track = ghb_find_subtitle_track(title, pref_lang, 0);
        if (track > 0)
        {
            used[track] = TRUE;
            subtitle_add_track(ud, settings, title, track, mux->format,
                               TRUE, FALSE, &one_burned);
        }
    }

    if (foreign_audio_search &&
        (audio_lang != NULL && !strncmp(audio_lang, pref_lang, 4)))
    {
        // Add search for foreign audio segments
        subtitle_add_track(ud, settings, title, -1, mux->format,
                           TRUE, FALSE, &one_burned);
    }

    if (behavior != 0)
    {
        // Find "best" subtitle based on subtitle preferences
        for (ii = 0; ii < lang_count; ii++)
        {
            GValue *glang = ghb_array_get_nth(lang_list, ii);
            const gchar *lang = g_value_get_string(glang);

            int next_track = 0;
            track = ghb_find_subtitle_track(title, lang, next_track);
            while (track >= 0)
            {
                if (!used[track])
                {
                    used[track] = TRUE;
                    subtitle_add_track(ud, settings, title, track, mux->format,
                                       FALSE, FALSE, &one_burned);
                }
                next_track = track + 1;
                if (behavior == 2)
                {
                    track = ghb_find_subtitle_track(title, lang, next_track);
                }
                else
                {
                    break;
                }
            }
        }
    }

    if (ghb_settings_get_boolean(settings, "SubtitleAddCC"))
    {
        for (track = 0; track < sub_count; track++)
        {
            hb_subtitle_t *subtitle = hb_list_item(title->list_subtitle, track);
            if (subtitle->source == CC608SUB || subtitle->source == CC708SUB)
                break;
        }

        if (track < sub_count && !used[track])
        {
            used[track] = TRUE;
            subtitle_add_track(ud, settings, title, track, mux->format,
                               FALSE, FALSE, &one_burned);
        }
    }
    g_free(used);
}
示例#20
0
文件: scan.c 项目: GTRsdk/HandBrake
/***********************************************************************
 * DecodePreviews
 ***********************************************************************
 * Decode 10 pictures for the given title.
 * It assumes that data->reader and data->vts have successfully been
 * DVDOpen()ed and ifoOpen()ed.
 **********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
{
    int             i, npreviews = 0;
    hb_buffer_t   * buf, * buf_es;
    hb_list_t     * list_es;
    int progressive_count = 0;
    int pulldown_count = 0;
    int doubled_frame_count = 0;
    int interlaced_preview_count = 0;
    info_list_t * info_list = calloc( data->preview_count+1, sizeof(*info_list) );
    crop_record_t *crops = crop_record_init( data->preview_count );

    list_es  = hb_list_init();

    if( data->batch )
    {
        hb_log( "scan: decoding previews for title %d (%s)", title->index, title->path );
    }
    else
    {
        hb_log( "scan: decoding previews for title %d", title->index );
    }

    if (data->bd)
    {
        hb_bd_start( data->bd, title );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->dvd)
    {
        hb_dvd_start( data->dvd, title, 1 );
        title->angle_count = hb_dvd_angle_count( data->dvd );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->batch)
    {
        data->stream = hb_stream_open( title->path, title, 1 );
    }

    int vcodec = title->video_codec? title->video_codec : WORK_DECMPEG2;
#if defined(USE_FF_MPEG2)
    if (vcodec == WORK_DECMPEG2)
    {
        vcodec = WORK_DECAVCODECV;
        title->video_codec_param = CODEC_ID_MPEG2VIDEO;
    }
#endif
    hb_work_object_t *vid_decoder = hb_get_work( vcodec );
    vid_decoder->codec_param = title->video_codec_param;
    vid_decoder->title = title;
    vid_decoder->init( vid_decoder, NULL );

    for( i = 0; i < data->preview_count; i++ )
    {
        int j;

        if ( *data->die )
        {
            free( info_list );
            crop_record_free( crops );
            return 0;
        }
        if (data->bd)
        {
            if( !hb_bd_seek( data->bd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        if (data->dvd)
        {
            if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        else if (data->stream)
        {
          /* we start reading streams at zero rather than 1/11 because
           * short streams may have only one sequence header in the entire
           * file and we need it to decode any previews. */
          if (!hb_stream_seek(data->stream, (float) i / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }

        hb_deep_log( 2, "scan: preview %d", i + 1 );

        if ( vid_decoder->flush )
            vid_decoder->flush( vid_decoder );

        hb_buffer_t * vid_buf = NULL;

        for( j = 0; j < 10240 ; j++ )
        {
            if (data->bd)
            {
              if( (buf = hb_bd_read( data->bd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else if (data->dvd)
            {
              if( (buf = hb_dvd_read( data->dvd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else if (data->stream)
            {
              if ( (buf = hb_stream_read( data->stream )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else
            {
                // Silence compiler warning
                buf = NULL;
                hb_error( "Error: This can't happen!" );
                goto skip_preview;
            }

            (hb_demux[title->demuxer])(buf, list_es, 0 );

            while( ( buf_es = hb_list_item( list_es, 0 ) ) )
            {
                hb_list_rem( list_es, buf_es );
                if( buf_es->s.id == title->video_id && vid_buf == NULL )
                {
                    vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
                }
                else if( ! AllAudioOK( title ) ) 
                {
                    LookForAudio( title, buf_es );
                    buf_es = NULL;
                }
                if ( buf_es )
                    hb_buffer_close( &buf_es );
            }

            if( vid_buf && AllAudioOK( title ) )
                break;
        }

        if( ! vid_buf )
        {
            hb_log( "scan: could not get a decoded picture" );
            continue;
        }

        /* Get size and rate infos */

        hb_work_info_t vid_info;
        if( !vid_decoder->info( vid_decoder, &vid_info ) )
        {
            /*
             * Could not fill vid_info, don't continue and try to use vid_info
             * in this case.
             */
            if (vid_buf)
            {
                hb_buffer_close( &vid_buf );
            }
            hb_log( "scan: could not get a video information" );
            continue;
        }

        remember_info( info_list, &vid_info );

        if( is_close_to( vid_info.rate_base, 900900, 100 ) &&
            ( vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD ) )
        {
            /* Potentially soft telecine material */
            pulldown_count++;
        }

        if( vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME )
        {
            // AVCHD-Lite specifies that all streams are
            // 50 or 60 fps.  To produce 25 or 30 fps, camera
            // makers are repeating all frames.
            doubled_frame_count++;
        }

        if( is_close_to( vid_info.rate_base, 1126125, 100 ) )
        {
            // Frame FPS is 23.976 (meaning it's progressive), so start keeping
            // track of how many are reporting at that speed. When enough 
            // show up that way, we want to make that the overall title FPS.
            progressive_count++;
        }

        while( ( buf_es = hb_list_item( list_es, 0 ) ) )
        {
            hb_list_rem( list_es, buf_es );
            hb_buffer_close( &buf_es );
        }

        /* Check preview for interlacing artifacts */
        if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
        {
            hb_deep_log( 2, "Interlacing detected in preview frame %i", i+1);
            interlaced_preview_count++;
        }
        
        if( data->store_previews )
        {
            hb_save_preview( data->h, title->index, i, vid_buf );
        }

        /* Detect black borders */

        int top, bottom, left, right;
        int h4 = vid_info.height / 4, w4 = vid_info.width / 4;

        // When widescreen content is matted to 16:9 or 4:3 there's sometimes
        // a thin border on the outer edge of the matte. On TV content it can be
        // "line 21" VBI data that's normally hidden in the overscan. For HD
        // content it can just be a diagnostic added in post production so that
        // the frame borders are visible. We try to ignore these borders so
        // we can crop the matte. The border width depends on the resolution
        // (12 pixels on 1080i looks visually the same as 4 pixels on 480i)
        // so we allow the border to be up to 1% of the frame height.
        const int border = vid_info.height / 100;

        for ( top = border; top < h4; ++top )
        {
            if ( ! row_all_dark( vid_buf, top ) )
                break;
        }
        if ( top <= border )
        {
            // we never made it past the border region - see if the rows we
            // didn't check are dark or if we shouldn't crop at all.
            for ( top = 0; top < border; ++top )
            {
                if ( ! row_all_dark( vid_buf, top ) )
                    break;
            }
            if ( top >= border )
            {
                top = 0;
            }
        }
        for ( bottom = border; bottom < h4; ++bottom )
        {
            if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
                break;
        }
        if ( bottom <= border )
        {
            for ( bottom = 0; bottom < border; ++bottom )
            {
                if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
                    break;
            }
            if ( bottom >= border )
            {
                bottom = 0;
            }
        }
        for ( left = 0; left < w4; ++left )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, left ) )
                break;
        }
        for ( right = 0; right < w4; ++right )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, vid_info.width - 1 - right ) )
                break;
        }

        // only record the result if all the crops are less than a quarter of
        // the frame otherwise we can get fooled by frames with a lot of black
        // like titles, credits & fade-thru-black transitions.
        if ( top < h4 && bottom < h4 && left < w4 && right < w4 )
        {
            record_crop( crops, top, bottom, left, right );
        }
        ++npreviews;

skip_preview:
        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); j++ )
        {
            hb_audio_t * audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
            }
        }
        if (vid_buf)
        {
            hb_buffer_close( &vid_buf );
        }
    }
    vid_decoder->close( vid_decoder );
    free( vid_decoder );

    if ( data->batch && data->stream )
    {
        hb_stream_close( &data->stream );
    }

    if ( npreviews )
    {
        // use the most common frame info for our final title dimensions
        hb_work_info_t vid_info;
        most_common_info( info_list, &vid_info );

        title->has_resolution_change = has_resolution_change( info_list );
        if ( title->video_codec_name == NULL )
        {
            title->video_codec_name = strdup( vid_info.name );
        }
        title->width = vid_info.width;
        title->height = vid_info.height;
        if ( vid_info.rate && vid_info.rate_base )
        {
            // if the frame rate is very close to one of our "common" framerates,
            // assume it actually is said frame rate; e.g. some 24000/1001 sources
            // may have a rate_base of 1126124 (instead of 1126125)
            for( i = 0; i < hb_video_rates_count; i++ )
            {
                if( is_close_to( vid_info.rate_base, hb_video_rates[i].rate, 100 ) )
                {
                    vid_info.rate_base = hb_video_rates[i].rate;
                    break;
                }
            }
            title->rate = vid_info.rate;
            title->rate_base = vid_info.rate_base;
            if( vid_info.rate_base == 900900 )
            {
                if( pulldown_count >= npreviews / 4 )
                {
                    title->rate_base = 1126125;
                    hb_deep_log( 2, "Pulldown detected, setting fps to 23.976" );
                }
                if( progressive_count >= npreviews / 2 )
                {
                    // We've already deduced that the frame rate is 23.976,
                    // so set it back again.
                    title->rate_base = 1126125;
                    hb_deep_log( 2, "Title's mostly NTSC Film, setting fps to 23.976" );
                }
            }
            if( doubled_frame_count >= 3 * npreviews / 4 )
            {
                // We've detected that a significant number of the frames
                // have been doubled in duration by repeat flags.
                title->rate_base = 2 * vid_info.rate_base;
                hb_deep_log( 2, "Repeat frames detected, setting fps to %.3f", (float)title->rate / title->rate_base );
            }
        }
        title->video_bitrate = vid_info.bitrate;

        if( vid_info.pixel_aspect_width && vid_info.pixel_aspect_height )
        {
            title->pixel_aspect_width = vid_info.pixel_aspect_width;
            title->pixel_aspect_height = vid_info.pixel_aspect_height;
        }
        title->color_prim = vid_info.color_prim;
        title->color_transfer = vid_info.color_transfer;
        title->color_matrix = vid_info.color_matrix;

        // compute the aspect ratio based on the storage dimensions and the
        // pixel aspect ratio (if supplied) or just storage dimensions if no PAR.
        title->aspect = (double)title->width / (double)title->height;
        title->aspect *= (double)title->pixel_aspect_width /
                         (double)title->pixel_aspect_height;

        // For unknown reasons some French PAL DVDs put the original
        // content's aspect ratio into the mpeg PAR even though it's
        // the wrong PAR for the DVD. Apparently they rely on the fact
        // that DVD players ignore the content PAR and just use the
        // aspect ratio from the DVD metadata. So, if the aspect computed
        // from the PAR is different from the container's aspect we use
        // the container's aspect & recompute the PAR from it.
        if( title->container_aspect && (int)(title->aspect * 9) != (int)(title->container_aspect * 9) )
        {
            hb_log("scan: content PAR gives wrong aspect %.2f; "
                   "using container aspect %.2f", title->aspect,
                   title->container_aspect );
            title->aspect = title->container_aspect;
            hb_reduce( &title->pixel_aspect_width, &title->pixel_aspect_height,
                       (int)(title->aspect * title->height + 0.5), title->width );
        }

        // don't try to crop unless we got at least 3 previews
        if ( crops->n > 2 )
        {
            sort_crops( crops );
            // The next line selects median cropping - at least
            // 50% of the frames will have their borders removed.
            // Other possible choices are loose cropping (i = 0) where 
            // no non-black pixels will be cropped from any frame and a
            // tight cropping (i = crops->n - (crops->n >> 2)) where at
            // least 75% of the frames will have their borders removed.
            i = crops->n >> 1;
            title->crop[0] = EVEN( crops->t[i] );
            title->crop[1] = EVEN( crops->b[i] );
            title->crop[2] = EVEN( crops->l[i] );
            title->crop[3] = EVEN( crops->r[i] );
        }

        hb_log( "scan: %d previews, %dx%d, %.3f fps, autocrop = %d/%d/%d/%d, "
                "aspect %s, PAR %d:%d",
                npreviews, title->width, title->height, (float) title->rate /
                (float) title->rate_base,
                title->crop[0], title->crop[1], title->crop[2], title->crop[3],
                aspect_to_string( title->aspect ), title->pixel_aspect_width,
                title->pixel_aspect_height );

        if( interlaced_preview_count >= ( npreviews / 2 ) )
        {
            hb_log("Title is likely interlaced or telecined (%i out of %i previews). You should do something about that.",
                   interlaced_preview_count, npreviews);
            title->detected_interlacing = 1;
        }
        else
        {
            title->detected_interlacing = 0;
        }
    }
static int decsrtInit( hb_work_object_t * w, hb_job_t * job )
{
    int retval = 1;
    hb_work_private_t * pv;
    hb_buffer_t *buffer;
    int i;
    hb_chapter_t * chapter;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    if( pv )
    {
        w->private_data = pv;

        pv->job = job;

        buffer = hb_buffer_init( 0 );
        hb_fifo_push( w->fifo_in, buffer);

        pv->current_state = k_state_potential_new_entry;
        pv->number_of_entries = 0;
        pv->last_entry_number = 0;
        pv->current_time = 0;
        pv->subtitle = w->subtitle;

        /*
         * Figure out the start and stop times from teh chapters being
         * encoded - drop subtitle not in this range.
         */
        pv->start_time = 0;
        for( i = 1; i < job->chapter_start; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->start_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }
        pv->stop_time = pv->start_time;
        for( i = job->chapter_start; i <= job->chapter_end; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->stop_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }

        hb_deep_log( 3, "SRT Start time %"PRId64", stop time %"PRId64, pv->start_time, pv->stop_time);

        pv->iconv_context = iconv_open( "utf-8", pv->subtitle->config.src_codeset );


        if( pv->iconv_context == (iconv_t) -1 )
        {
            hb_error("Could not open the iconv library with those file formats\n");

        } else {
            memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );

            pv->file = hb_fopen(w->subtitle->config.src_filename, "r");

            if( !pv->file )
            {
                hb_error("Could not open the SRT subtitle file '%s'\n",
                         w->subtitle->config.src_filename);
            } else {
                retval = 0;
            }
        }
    }
    if (!retval)
    {
        // Generate generic SSA Script Info.
        int height = job->title->geometry.height - job->crop[0] - job->crop[1];
        int width = job->title->geometry.width - job->crop[2] - job->crop[3];
        hb_subtitle_add_ssa_header(w->subtitle, "Arial", .066 * height,
                                   width, height);
    }
    return retval;
}
示例#22
0
/***********************************************************************
 * syncVideoWork
 ***********************************************************************
 *
 **********************************************************************/
int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
              hb_buffer_t ** buf_out )
{
    hb_buffer_t * cur, * next, * sub = NULL;
    hb_work_private_t * pv = w->private_data;
    hb_job_t          * job = pv->job;
    hb_subtitle_t     * subtitle;
    hb_sync_video_t   * sync = &pv->type.video;
    int i;
    int64_t next_start;

    *buf_out = NULL;
    next = *buf_in;
    *buf_in = NULL;

    /* Wait till we can determine the initial pts of all streams */
    if( next->size != 0 && pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[0] = next->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if ( hb_fifo_is_full( job->fifo_raw ) )
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    hb_lock( pv->common->mutex );
    next_start = next->s.start - pv->common->video_pts_slip;
    hb_unlock( pv->common->mutex );

    /* Wait for start of point-to-point encoding */
    if( !pv->common->start_found )
    {
        hb_sync_video_t   * sync = &pv->type.video;

        if( next->size == 0 )
        {
            *buf_out = next;
            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        if ( pv->common->count_frames < job->frame_to_start ||
             next->s.start < job->pts_to_start )
        {
            // Flush any subtitles that have pts prior to the
            // current frame
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) )
                {
                    if ( sub->s.start > next->s.start )
                        break;
                    sub = hb_fifo_get( subtitle->fifo_raw );
                    hb_buffer_close( &sub );
                }
            }
            hb_lock( pv->common->mutex );
            // Tell the audio threads what must be dropped
            pv->common->audio_pts_thresh = next_start + pv->common->video_pts_slip;
            hb_cond_broadcast( pv->common->next_frame );
            hb_unlock( pv->common->mutex );

            UpdateSearchState( w, next_start );
            hb_buffer_close( &next );

            return HB_WORK_OK;
        }
        hb_lock( pv->common->mutex );
        pv->common->audio_pts_thresh = 0;
        pv->common->audio_pts_slip += next_start;
        pv->common->video_pts_slip += next_start;
        next_start = 0;
        pv->common->start_found = 1;
        pv->common->count_frames = 0;
        hb_cond_broadcast( pv->common->next_frame );
        hb_unlock( pv->common->mutex );
        sync->st_first = 0;
    }

    if( !sync->cur )
    {
        sync->cur = next;
        if (next->size == 0)
        {
            /* we got an end-of-stream as our first video packet? 
             * Feed it downstream & signal that we're done. 
             */
            *buf_out = next;

            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        return HB_WORK_OK;
    }
    cur = sync->cur;
    /* At this point we have a frame to process. Let's check
        1) if we will be able to push into the fifo ahead
        2) if the next frame is there already, since we need it to
           compute the duration of the current frame*/
    if( next->size == 0 )
    {
        hb_buffer_close( &next );

        pv->common->first_pts[0] = INT64_MAX - 1;
        cur->s.start = sync->next_start;
        cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base);
        sync->next_start += cur->s.stop - cur->s.start;;

        /* Make sure last frame is reflected in frame count */
        pv->common->count_frames++;

        /* Push the frame to the renderer */
        *buf_out = cur;
        sync->cur = NULL;

        /* we got an end-of-stream. Feed it downstream & signal that
         * we're done. Note that this means we drop the final frame of
         * video (we don't know its duration). On DVDs the final frame
         * is often strange and dropping it seems to be a good idea. */
        (*buf_out)->next = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        pv->common->start_found = 1;
        hb_cond_broadcast( pv->common->next_frame );
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point frame encoding */
    if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );
        hb_log( "sync: reached %d frames, exiting early",
                pv->common->count_frames );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point pts encoding */
    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start );
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    if( sync->first_frame )
    {
        /* This is our first frame */
        if ( cur->s.start > 0 )
        {
            /*
             * The first pts from a dvd should always be zero but
             * can be non-zero with a transport or program stream since
             * we're not guaranteed to start on an IDR frame. If we get
             * a non-zero initial PTS extend its duration so it behaves
             * as if it started at zero so that our audio timing will
             * be in sync.
             */
            hb_log( "sync: first pts is %"PRId64, cur->s.start );
            cur->s.start = 0;
        }
        sync->first_frame = 0;
    }

    /*
     * since the first frame is always 0 and the upstream reader code
     * is taking care of adjusting for pts discontinuities, we just have
     * to deal with the next frame's start being in the past. This can
     * happen when the PTS is adjusted after data loss but video frame
     * reordering causes some frames with the old clock to appear after
     * the clock change. This creates frames that overlap in time which
     * looks to us like time going backward. The downstream muxing code
     * can deal with overlaps of up to a frame time but anything larger
     * we handle by dropping frames here.
     */
    if ( next_start - cur->s.start <= 0 )
    {
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = next_start;
        }
        ++sync->drop_count;
        if ( next->s.new_chap )
        {
            // don't drop a chapter mark when we drop the buffer
            sync->chap_mark = next->s.new_chap;
        }
        hb_buffer_close( &next );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        hb_log( "sync: video time didn't advance - dropped %d frames "
                "(delta %d ms, current %"PRId64", next %"PRId64", dur %d)",
                sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90,
                cur->s.start, next_start, (int)( next_start - cur->s.start ) );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }

    /*
     * Track the video sequence number locally so that we can sync the audio
     * to it using the sequence number as well as the PTS.
     */
    sync->video_sequence = cur->sequence;
    
    /* Process subtitles that apply to this video frame */
    // NOTE: There is no logic in either subtitle-sync algorithm that waits
    // for the subtitle-decoder if it is lagging behind the video-decoder.
    //       
    // Therefore there is the implicit assumption that the subtitle-decoder 
    // is always faster than the video-decoder. This assumption is definitely 
    // incorrect in some cases where the SSA subtitle decoder is used.

    for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
    {
        int64_t sub_start, sub_stop, duration;

        subtitle = hb_list_item( job->list_subtitle, i );
        
        // Sanitize subtitle start and stop times, then pass to 
        // muxer or renderer filter.
        while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL )
        {
            hb_lock( pv->common->mutex );
            sub_start = sub->s.start - pv->common->video_pts_slip;
            hb_unlock( pv->common->mutex );

            if (sub->s.stop == -1)
            {
                if (subtitle->config.dest != RENDERSUB &&
                    hb_fifo_size( subtitle->fifo_raw ) < 2)
                {
                    // For passthru subs, we want to wait for the
                    // next subtitle so that we can fill in the stop time.
                    // This way the muxer can compute the duration of
                    // the subtitle.
                    //
                    // For render subs, we need to ensure that they
                    // get to the renderer before the associated video
                    // that they are to be applied to.  It is the 
                    // responsibility of the renderer to handle
                    // stop == -1.
                    break;
                }
            }

            sub = hb_fifo_get( subtitle->fifo_raw );
            if ( sub->s.stop == -1 )
            {
                hb_buffer_t *next;
                next = hb_fifo_see( subtitle->fifo_raw );
                if (next != NULL)
                    sub->s.stop = next->s.start;
            }
            // Need to re-write subtitle timestamps to account
            // for any slippage.
            sub_stop = -1;
            if ( sub->s.stop != -1 )
            {
                duration = sub->s.stop - sub->s.start;
                sub_stop = sub_start + duration;
            }

            sub->s.start = sub_start;
            sub->s.stop = sub_stop;

            hb_fifo_push( subtitle->fifo_out, sub );
        }
    }

    /*
     * Adjust the pts of the current frame so that it's contiguous
     * with the previous frame. The start time of the current frame
     * has to be the end time of the previous frame and the stop
     * time has to be the start of the next frame.  We don't
     * make any adjustments to the source timestamps other than removing
     * the clock offsets (which also removes pts discontinuities).
     * This means we automatically encode at the source's frame rate.
     * MP2 uses an implicit duration (frames end when the next frame
     * starts) but more advanced containers like MP4 use an explicit
     * duration. Since we're looking ahead one frame we set the
     * explicit stop time from the start time of the next frame.
     */
    *buf_out = cur;
    int64_t duration = next_start - cur->s.start;
    sync->cur = cur = next;
    cur->sub = NULL;
    cur->s.start -= pv->common->video_pts_slip;
    cur->s.stop -= pv->common->video_pts_slip;
    sync->pts_skip = 0;
    if ( duration <= 0 )
    {
        hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"",
                duration, cur->s.start, next_start );
    }

    (*buf_out)->s.start = sync->next_start;
    sync->next_start += duration;
    (*buf_out)->s.stop = sync->next_start;

    if ( sync->chap_mark )
    {
        // we have a pending chapter mark from a recent drop - put it on this
        // buffer (this may make it one frame late but we can't do any better).
        (*buf_out)->s.new_chap = sync->chap_mark;
        sync->chap_mark = 0;
    }

    /* Update UI */
    UpdateState( w );

    return HB_WORK_OK;
}
示例#23
0
文件: reader.c 项目: GTRsdk/HandBrake
static int hb_reader_init( hb_work_object_t * w, hb_job_t * job )
{
    hb_work_private_t * r;

    r = calloc( sizeof( hb_work_private_t ), 1 );
    w->private_data = r;

    r->job   = job;
    r->title = job->title;
    r->die   = job->die;
    r->sequence = 0;

    r->st_slots = 4;
    r->stream_timing = calloc( sizeof(stream_timing_t), r->st_slots );
    r->stream_timing[0].id = r->title->video_id;
    r->stream_timing[0].average = 90000. * (double)job->vrate_base /
                                           (double)job->vrate;
    r->stream_timing[0].last = -r->stream_timing[0].average;
    r->stream_timing[0].valid = 1;
    r->stream_timing[0].startup = 10;
    r->stream_timing[1].id = -1;

    r->demux.last_scr = -1;

    if ( !job->pts_to_start )
        r->start_found = 1;
    else
    {
        // The frame at the actual start time may not be an i-frame
        // so can't be decoded without starting a little early.
        // sync.c will drop early frames.
        r->pts_to_start = MAX(0, job->pts_to_start - 180000);
    }

    if (job->pts_to_stop)
    {
        r->duration = job->pts_to_start + job->pts_to_stop;
    }
    else if (job->frame_to_stop)
    {
        int frames = job->frame_to_start + job->frame_to_stop;
        r->duration = (int64_t)frames * job->title->rate_base * 90000 / job->title->rate;
    }
    else
    {
        hb_chapter_t *chapter;
        int ii;

        r->duration = 0;
        for (ii = job->chapter_start; ii < job->chapter_end; ii++)
        {
            chapter = hb_list_item( job->title->list_chapter, ii - 1);
            r->duration += chapter->duration;
        }
    }

    // The stream needs to be open before starting the reader thead
    // to prevent a race with decoders that may share information
    // with the reader. Specifically avcodec needs this.
    if ( hb_reader_open( r ) )
    {
        free( r->stream_timing );
        free( r );
        return 1;
    }
    return 0;
}
示例#24
0
文件: scan.c 项目: GTRsdk/HandBrake
static void ScanFunc( void * _data )
{
    hb_scan_t  * data = (hb_scan_t *) _data;
    hb_title_t * title;
    int          i;
    int          feature = 0;

    data->bd = NULL;
    data->dvd = NULL;
    data->stream = NULL;

    /* Try to open the path as a DVD. If it fails, try as a file */
    if( ( data->bd = hb_bd_init( data->path ) ) )
    {
        hb_log( "scan: BD has %d title(s)",
                hb_bd_title_count( data->bd ) );
        if( data->title_index )
        {
            /* Scan this title only */
            hb_list_add( data->list_title, hb_bd_title_scan( data->bd,
                         data->title_index, 0 ) );
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_bd_title_count( data->bd ); i++ )
            {
                hb_list_add( data->list_title, hb_bd_title_scan( data->bd, 
                             i + 1, data->min_title_duration ) );
            }
            feature = hb_bd_main_feature( data->bd, data->list_title );
        }
    }
    else if( ( data->dvd = hb_dvd_init( data->path ) ) )
    {
        hb_log( "scan: DVD has %d title(s)",
                hb_dvd_title_count( data->dvd ) );
        if( data->title_index )
        {
            /* Scan this title only */
            hb_list_add( data->list_title, hb_dvd_title_scan( data->dvd,
                            data->title_index, 0 ) );
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_dvd_title_count( data->dvd ); i++ )
            {
                hb_list_add( data->list_title, hb_dvd_title_scan( data->dvd, 
                            i + 1, data->min_title_duration ) );
            }
            feature = hb_dvd_main_feature( data->dvd, data->list_title );
        }
    }
    else if ( ( data->batch = hb_batch_init( data->path ) ) )
    {
        if( data->title_index )
        {
            /* Scan this title only */
            title = hb_batch_title_scan( data->batch, data->title_index );
            if ( title )
            {
                hb_list_add( data->list_title, title );
            }
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_batch_title_count( data->batch ); i++ )
            {
                hb_title_t * title;

                title = hb_batch_title_scan( data->batch, i + 1 );
                if ( title != NULL )
                {
                    hb_list_add( data->list_title, title );
                }
            }
        }
    }
    else
    {
        hb_title_t * title = hb_title_init( data->path, 0 );
        if ( (data->stream = hb_stream_open( data->path, title, 1 ) ) != NULL )
        {
            title = hb_stream_title_scan( data->stream, title );
            if ( title )
                hb_list_add( data->list_title, title );
        }
        else
        {
            hb_title_close( &title );
            hb_log( "scan: unrecognized file type" );
            return;
        }
    }

    for( i = 0; i < hb_list_count( data->list_title ); )
    {
        int j;
        hb_state_t state;
        hb_audio_t * audio;

        if ( *data->die )
        {
            goto finish;
        }
        title = hb_list_item( data->list_title, i );

#define p state.param.scanning
        /* Update the UI */
        state.state   = HB_STATE_SCANNING;
        p.title_cur   = title->index;
        p.title_count = data->dvd ? hb_dvd_title_count( data->dvd ) : 
                        data->bd ? hb_bd_title_count( data->bd ) :
                        data->batch ? hb_batch_title_count( data->batch ) :
                                   hb_list_count(data->list_title);
        hb_set_state( data->h, &state );
#undef p

        /* Decode previews */
        /* this will also detect more AC3 / DTS information */
        if( !DecodePreviews( data, title ) )
        {
            /* TODO: free things */
            hb_list_rem( data->list_title, title );
            for( j = 0; j < hb_list_count( title->list_audio ); j++)
            {
                audio = hb_list_item( title->list_audio, j );
                if ( audio->priv.scan_cache )
                {
                    hb_fifo_flush( audio->priv.scan_cache );
                    hb_fifo_close( &audio->priv.scan_cache );
                }
            }
            hb_title_close( &title );
            continue;
        }

        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); )
        {
            audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
                hb_fifo_close( &audio->priv.scan_cache );
            }
            if( !audio->config.in.bitrate )
            {
                hb_log( "scan: removing audio 0x%x because no bitrate found",
                        audio->id );
                hb_list_rem( title->list_audio, audio );
                free( audio );
                continue;
            }
            j++;
        }

        if ( data->dvd || data->bd )
        {
            // The subtitle width and height needs to be set to the 
            // title widht and height for DVDs.  title width and
            // height don't get set until we decode previews, so
            // we can't set subtitle width/height till we get here.
            for( j = 0; j < hb_list_count( title->list_subtitle ); j++ )
            {
                hb_subtitle_t *subtitle = hb_list_item( title->list_subtitle, j );
                if ( subtitle->source == VOBSUB || subtitle->source == PGSSUB )
                {
                    subtitle->width = title->width;
                    subtitle->height = title->height;
                }
            }
        }
        i++;
    }

    /* Init jobs templates */
    for( i = 0; i < hb_list_count( data->list_title ); i++ )
    {
        hb_job_t * job;

        title      = hb_list_item( data->list_title, i );
        job        = calloc( sizeof( hb_job_t ), 1 );
        title->job = job;

        job->title = title;
        job->feature = feature;

        /* Set defaults settings */
        job->chapter_start = 1;
        job->chapter_end   = hb_list_count( title->list_chapter );

        /* Autocrop by default. Gnark gnark */
        memcpy( job->crop, title->crop, 4 * sizeof( int ) );

        /* Preserve a source's pixel aspect, if it's available. */
        if( title->pixel_aspect_width && title->pixel_aspect_height )
        {
            job->anamorphic.par_width  = title->pixel_aspect_width;
            job->anamorphic.par_height = title->pixel_aspect_height;
        }

        if( title->aspect != 0 && title->aspect != 1. &&
            !job->anamorphic.par_width && !job->anamorphic.par_height)
        {
            hb_reduce( &job->anamorphic.par_width, &job->anamorphic.par_height,
                       (int)(title->aspect * title->height + 0.5), title->width );
        }

        job->width = title->width - job->crop[2] - job->crop[3];
        hb_fix_aspect( job, HB_KEEP_WIDTH );
        if( job->height > title->height - job->crop[0] - job->crop[1] )
        {
            job->height = title->height - job->crop[0] - job->crop[1];
            hb_fix_aspect( job, HB_KEEP_HEIGHT );
        }

        hb_log( "scan: title (%d) job->width:%d, job->height:%d",
                i, job->width, job->height );

        job->keep_ratio = 1;

        job->vcodec     = HB_VCODEC_FFMPEG_MPEG4;
        job->vquality   = -1.0;
        job->vbitrate   = 1000;
        job->pass       = 0;
        job->vrate      = title->rate;
        job->vrate_base = title->rate_base;

        job->list_audio = hb_list_init();
        job->list_subtitle = hb_list_init();
        job->list_filter = hb_list_init();

        job->mux = HB_MUX_MP4;
    }

finish:

    if( data->bd )
    {
        hb_bd_close( &data->bd );
    }
    if( data->dvd )
    {
        hb_dvd_close( &data->dvd );
    }
    if (data->stream)
    {
        hb_stream_close(&data->stream);
    }
    if( data->batch )
    {
        hb_batch_close( &data->batch );
    }
    free( data->path );
    free( data );
    _data = NULL;
}
示例#25
0
文件: scan.c 项目: ming-hai/HandBrake
/***********************************************************************
 * DecodePreviews
 ***********************************************************************
 * Decode 10 pictures for the given title.
 * It assumes that data->reader and data->vts have successfully been
 * DVDOpen()ed and ifoOpen()ed.
 **********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title, int flush )
{
    int                i, npreviews = 0, abort = 0;
    hb_buffer_t      * buf, * buf_es;
    hb_buffer_list_t   list_es;
    int                progressive_count = 0;
    int                pulldown_count = 0;
    int                doubled_frame_count = 0;
    int                interlaced_preview_count = 0;
    int                vid_samples = 0;
    int                frame_wait = 0;
    int                cc_wait = 10;
    int                frames;
    hb_stream_t      * stream = NULL;
    info_list_t      * info_list;
    int                abort_audio = 0;

    info_list = calloc(data->preview_count+1, sizeof(*info_list));
    crop_record_t *crops = crop_record_init( data->preview_count );

    hb_buffer_list_clear(&list_es);

    if( data->batch )
    {
        hb_log( "scan: decoding previews for title %d (%s)", title->index, title->path );
    }
    else
    {
        hb_log( "scan: decoding previews for title %d", title->index );
    }

    if (data->bd)
    {
        hb_bd_start( data->bd, title );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->dvd)
    {
        hb_dvd_start( data->dvd, title, 1 );
        title->angle_count = hb_dvd_angle_count( data->dvd );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->batch)
    {
        stream = hb_stream_open(data->h, title->path, title, 0);
    }
    else if (data->stream)
    {
        stream = hb_stream_open(data->h, data->path, title, 0);
    }

    if (title->video_codec == WORK_NONE)
    {
        hb_error("No video decoder set!");
        free(info_list);
        crop_record_free(crops);
        hb_stream_close(&stream);
        return 0;
    }
    hb_work_object_t *vid_decoder = hb_get_work(data->h, title->video_codec);
    vid_decoder->codec_param = title->video_codec_param;
    vid_decoder->title = title;

    if (vid_decoder->init(vid_decoder, NULL))
    {
        hb_error("Decoder init failed!");
        free(info_list);
        crop_record_free(crops);
        free( vid_decoder );
        hb_stream_close(&stream);
        return 0;
    }

    for( i = 0; i < data->preview_count; i++ )
    {
        int j;

        UpdateState3(data, i + 1);

        if ( *data->die )
        {
            free( info_list );
            crop_record_free( crops );
            vid_decoder->close( vid_decoder );
            free( vid_decoder );
            hb_stream_close(&stream);
            return 0;
        }
        if (data->bd)
        {
            if( !hb_bd_seek( data->bd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        if (data->dvd)
        {
            if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        else if (stream)
        {
            /* we start reading streams at zero rather than 1/11 because
             * short streams may have only one sequence header in the entire
             * file and we need it to decode any previews.
             *
             * Also, seeking to position 0 loses the palette of avi files
             * so skip initial seek */
            if (i != 0)
            {
                if (!hb_stream_seek(stream,
                                    (float)i / (data->preview_count + 1.0)))
                {
                    continue;
                }
            }
            else
            {
                hb_stream_set_need_keyframe(stream, 1);
            }
        }

        hb_deep_log( 2, "scan: preview %d", i + 1 );

        if (flush && vid_decoder->flush)
            vid_decoder->flush( vid_decoder );
        if (title->flags & HBTF_NO_IDR)
        {
            if (!flush)
            {
                // If we are doing the first previews decode attempt,
                // set this threshold high so that we get the best
                // quality frames possible.
                frame_wait = 100;
            }
            else
            {
                // If we failed to get enough valid frames in the first
                // previews decode attempt, lower the threshold to improve
                // our chances of getting something to work with.
                frame_wait = 10;
            }
        }
        else
        {
            // For certain mpeg-2 streams, libav is delivering a
            // dummy first frame that is all black.  So always skip
            // one frame
            frame_wait = 1;
        }
        frames = 0;

        hb_buffer_t * vid_buf = NULL, * last_vid_buf = NULL;

        int packets = 0;
        vid_decoder->frame_count = 0;
        while (vid_decoder->frame_count < PREVIEW_READ_THRESH ||
              (!AllAudioOK(title) && packets < 10000))
        {
            if ((buf = read_buf(data, stream)) == NULL)
            {
                // If we reach EOF and no audio, don't continue looking for
                // audio
                abort_audio = 1;
                if (vid_buf != NULL || last_vid_buf != NULL)
                {
                    break;
                }
                hb_log("Warning: Could not read data for preview %d, skipped",
                       i + 1 );

                // If we reach EOF and no video, don't continue looking for
                // video
                abort = 1;
                goto skip_preview;
            }

            packets++;
            if (buf->size <= 0)
            {
                // Ignore "null" frames
                hb_buffer_close(&buf);
                continue;
            }

            (hb_demux[title->demuxer])(buf, &list_es, 0 );

            while ((buf_es = hb_buffer_list_rem_head(&list_es)) != NULL)
            {
                if( buf_es->s.id == title->video_id && vid_buf == NULL )
                {
                    vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
                    // There are 2 conditions we decode additional
                    // video frames for during scan.
                    // 1. We did not detect IDR frames, so the initial video
                    //    frames may be corrupt.  We docode extra frames to
                    //    increase the probability of a complete preview frame
                    // 2. Some frames do not contain CC data, even though
                    //    CCs are present in the stream.  So we need to decode
                    //    additional frames to find the CCs.
                    if (vid_buf != NULL && (frame_wait || cc_wait))
                    {
                        hb_work_info_t vid_info;
                        if (vid_decoder->info(vid_decoder, &vid_info))
                        {
                            if (is_close_to(vid_info.rate.den, 900900, 100) &&
                                (vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD))
                            {
                                /* Potentially soft telecine material */
                                pulldown_count++;
                            }

                            if (vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME)
                            {
                                // AVCHD-Lite specifies that all streams are
                                // 50 or 60 fps.  To produce 25 or 30 fps, camera
                                // makers are repeating all frames.
                                doubled_frame_count++;
                            }

                            if (is_close_to(vid_info.rate.den, 1126125, 100 ))
                            {
                                // Frame FPS is 23.976 (meaning it's
                                // progressive), so start keeping track of
                                // how many are reporting at that speed. When
                                // enough show up that way, we want to make
                                // that the overall title FPS.
                                progressive_count++;
                            }
                            vid_samples++;
                        }

                        if (frames > 0 && vid_buf->s.frametype == HB_FRAME_I)
                            frame_wait = 0;
                        if (frame_wait || cc_wait)
                        {
                            hb_buffer_close(&last_vid_buf);
                            last_vid_buf = vid_buf;
                            vid_buf = NULL;
                            if (frame_wait) frame_wait--;
                            if (cc_wait) cc_wait--;
                        }
                        frames++;
                    }
                }
                else if (!AllAudioOK(title) && !abort_audio)
                {
                    LookForAudio( data, title, buf_es );
                    buf_es = NULL;
                }
                if ( buf_es )
                    hb_buffer_close( &buf_es );
            }

            if (vid_buf && (abort_audio || AllAudioOK(title)))
                break;
        }
        hb_buffer_list_close(&list_es);

        if (vid_buf == NULL)
        {
            vid_buf = last_vid_buf;
            last_vid_buf = NULL;
        }
        hb_buffer_close(&last_vid_buf);

        if (vid_buf == NULL)
        {
            hb_log( "scan: could not get a decoded picture" );
            continue;
        }

        /* Get size and rate infos */

        hb_work_info_t vid_info;
        if( !vid_decoder->info( vid_decoder, &vid_info ) )
        {
            /*
             * Could not fill vid_info, don't continue and try to use vid_info
             * in this case.
             */
            hb_log( "scan: could not get a video information" );
            hb_buffer_close( &vid_buf );
            continue;
        }

        if (vid_info.geometry.width  != vid_buf->f.width ||
            vid_info.geometry.height != vid_buf->f.height)
        {
            hb_log( "scan: video geometry information does not match buffer" );
            hb_buffer_close( &vid_buf );
            continue;
        }
        remember_info( info_list, &vid_info );

        /* Check preview for interlacing artifacts */
        if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
        {
            hb_deep_log( 2, "Interlacing detected in preview frame %i", i+1);
            interlaced_preview_count++;
        }

        if( data->store_previews )
        {
            hb_save_preview( data->h, title->index, i, vid_buf );
        }

        /* Detect black borders */

        int top, bottom, left, right;
        int h4 = vid_info.geometry.height / 4, w4 = vid_info.geometry.width / 4;

        // When widescreen content is matted to 16:9 or 4:3 there's sometimes
        // a thin border on the outer edge of the matte. On TV content it can be
        // "line 21" VBI data that's normally hidden in the overscan. For HD
        // content it can just be a diagnostic added in post production so that
        // the frame borders are visible. We try to ignore these borders so
        // we can crop the matte. The border width depends on the resolution
        // (12 pixels on 1080i looks visually the same as 4 pixels on 480i)
        // so we allow the border to be up to 1% of the frame height.
        const int border = vid_info.geometry.height / 100;

        for ( top = border; top < h4; ++top )
        {
            if ( ! row_all_dark( vid_buf, top ) )
                break;
        }
        if ( top <= border )
        {
            // we never made it past the border region - see if the rows we
            // didn't check are dark or if we shouldn't crop at all.
            for ( top = 0; top < border; ++top )
            {
                if ( ! row_all_dark( vid_buf, top ) )
                    break;
            }
            if ( top >= border )
            {
                top = 0;
            }
        }
        for ( bottom = border; bottom < h4; ++bottom )
        {
            if ( ! row_all_dark( vid_buf, vid_info.geometry.height - 1 - bottom ) )
                break;
        }
        if ( bottom <= border )
        {
            for ( bottom = 0; bottom < border; ++bottom )
            {
                if ( ! row_all_dark( vid_buf, vid_info.geometry.height - 1 - bottom ) )
                    break;
            }
            if ( bottom >= border )
            {
                bottom = 0;
            }
        }
        for ( left = 0; left < w4; ++left )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, left ) )
                break;
        }
        for ( right = 0; right < w4; ++right )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, vid_info.geometry.width - 1 - right ) )
                break;
        }

        // only record the result if all the crops are less than a quarter of
        // the frame otherwise we can get fooled by frames with a lot of black
        // like titles, credits & fade-thru-black transitions.
        if ( top < h4 && bottom < h4 && left < w4 && right < w4 )
        {
            record_crop( crops, top, bottom, left, right );
        }
        ++npreviews;

skip_preview:
        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); j++ )
        {
            hb_audio_t * audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
            }
        }
        if (vid_buf)
        {
            hb_buffer_close( &vid_buf );
        }
        if (abort)
        {
            break;
        }
    }
    UpdateState3(data, i);

    vid_decoder->close( vid_decoder );
    free( vid_decoder );

    if (stream != NULL)
    {
        hb_stream_close(&stream);
    }

    if ( npreviews )
    {
        // use the most common frame info for our final title dimensions
        hb_work_info_t vid_info;
        most_common_info( info_list, &vid_info );

        title->has_resolution_change = has_resolution_change( info_list );
        if ( title->video_codec_name == NULL )
        {
            title->video_codec_name = strdup( vid_info.name );
        }
        title->geometry.width = vid_info.geometry.width;
        title->geometry.height = vid_info.geometry.height;
        if (vid_info.rate.num && vid_info.rate.den)
        {
            // if the frame rate is very close to one of our "common"
            // framerates, assume it actually is said frame rate;
            // e.g. some 24000/1001 sources may have a rate.den of 1126124
            // instead of 1126125
            const hb_rate_t *video_framerate = NULL;
            while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
            {
                if (is_close_to(vid_info.rate.den, video_framerate->rate, 100))
                {
                    vid_info.rate.den = video_framerate->rate;
                    break;
                }
            }
            title->vrate = vid_info.rate;
            if( vid_info.rate.den == 900900 )
            {
                if (vid_samples >= 4 && pulldown_count >= vid_samples / 4)
                {
                    title->vrate.den = 1126125;
                    hb_deep_log( 2, "Pulldown detected, setting fps to 23.976" );
                }
                if (vid_samples >= 2 && progressive_count >= vid_samples / 2)
                {
                    // We've already deduced that the frame rate is 23.976,
                    // so set it back again.
                    title->vrate.den = 1126125;
                    hb_deep_log( 2, "Title's mostly NTSC Film, setting fps to 23.976" );
                }
            }
            if (vid_samples >= 2 && doubled_frame_count >= 3 * vid_samples / 4)
            {
                // We've detected that a significant number of the frames
                // have been doubled in duration by repeat flags.
                title->vrate.den = 2 * vid_info.rate.den;
                hb_deep_log(2, "Repeat frames detected, setting fps to %.3f",
                            (float)title->vrate.num / title->vrate.den );
            }
        }
        title->video_bitrate = vid_info.bitrate;

        if( vid_info.geometry.par.num && vid_info.geometry.par.den )
        {
            title->geometry.par = vid_info.geometry.par;
        }
        title->color_prim = vid_info.color_prim;
        title->color_transfer = vid_info.color_transfer;
        title->color_matrix = vid_info.color_matrix;

        title->video_decode_support = vid_info.video_decode_support;

        // compute the aspect ratio based on the storage dimensions and PAR.
        hb_reduce(&title->dar.num, &title->dar.den,
                  title->geometry.par.num * title->geometry.width,
                  title->geometry.height * title->geometry.par.den);

        // For unknown reasons some French PAL DVDs put the original
        // content's aspect ratio into the mpeg PAR even though it's
        // the wrong PAR for the DVD. Apparently they rely on the fact
        // that DVD players ignore the content PAR and just use the
        // aspect ratio from the DVD metadata. So, if the aspect computed
        // from the PAR is different from the container's aspect we use
        // the container's aspect & recompute the PAR from it.
        if (data->dvd &&
            (title->dar.num != title->container_dar.num ||
             title->dar.den != title->container_dar.den))
        {
            hb_log("scan: content PAR gives wrong aspect %d:%d; "
                   "using container aspect %d:%d",
                   title->dar.num, title->dar.den,
                   title->container_dar.num, title->container_dar.den);
            title->dar = title->container_dar;
            hb_reduce(&title->geometry.par.num, &title->geometry.par.den,
                      title->geometry.height * title->dar.num,
                      title->geometry.width * title->dar.den);
        }

        // don't try to crop unless we got at least 3 previews
        if ( crops->n > 2 )
        {
            sort_crops( crops );
            // The next line selects median cropping - at least
            // 50% of the frames will have their borders removed.
            // Other possible choices are loose cropping (i = 0) where
            // no non-black pixels will be cropped from any frame and a
            // tight cropping (i = crops->n - (crops->n >> 2)) where at
            // least 75% of the frames will have their borders removed.
            i = crops->n >> 1;
            title->crop[0] = EVEN( crops->t[i] );
            title->crop[1] = EVEN( crops->b[i] );
            title->crop[2] = EVEN( crops->l[i] );
            title->crop[3] = EVEN( crops->r[i] );
        }

        hb_log( "scan: %d previews, %dx%d, %.3f fps, autocrop = %d/%d/%d/%d, "
                "aspect %s, PAR %d:%d",
                npreviews, title->geometry.width, title->geometry.height,
                (float)title->vrate.num / title->vrate.den,
                title->crop[0], title->crop[1], title->crop[2], title->crop[3],
                aspect_to_string(&title->dar),
                title->geometry.par.num, title->geometry.par.den);

        if (title->video_decode_support != HB_DECODE_SUPPORT_SW)
        {
            hb_log("scan: supported video decoders:%s%s",
                   !(title->video_decode_support & HB_DECODE_SUPPORT_SW)    ? "" : " avcodec",
                   !(title->video_decode_support & HB_DECODE_SUPPORT_QSV)   ? "" : " qsv");
        }

        if( interlaced_preview_count >= ( npreviews / 2 ) )
        {
            hb_log("Title is likely interlaced or telecined (%i out of %i previews). You should do something about that.",
                   interlaced_preview_count, npreviews);
            title->detected_interlacing = 1;
        }
        else
        {
            title->detected_interlacing = 0;
        }
    }
示例#26
0
文件: scan.c 项目: ming-hai/HandBrake
static void ScanFunc( void * _data )
{
    hb_scan_t  * data = (hb_scan_t *) _data;
    hb_title_t * title;
    int          i;
    int          feature = 0;

    data->bd = NULL;
    data->dvd = NULL;
    data->stream = NULL;

    /* Try to open the path as a DVD. If it fails, try as a file */
    if( ( data->bd = hb_bd_init( data->h, data->path ) ) )
    {
        hb_log( "scan: BD has %d title(s)",
                hb_bd_title_count( data->bd ) );
        if( data->title_index )
        {
            /* Scan this title only */
            hb_list_add( data->title_set->list_title,
                         hb_bd_title_scan( data->bd,
                         data->title_index, 0 ) );
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_bd_title_count( data->bd ); i++ )
            {
                UpdateState1(data, i + 1);
                hb_list_add( data->title_set->list_title,
                             hb_bd_title_scan( data->bd,
                             i + 1, data->min_title_duration ) );
            }
            feature = hb_bd_main_feature( data->bd,
                                          data->title_set->list_title );
        }
    }
    else if( ( data->dvd = hb_dvd_init( data->h, data->path ) ) )
    {
        hb_log( "scan: DVD has %d title(s)",
                hb_dvd_title_count( data->dvd ) );
        if( data->title_index )
        {
            /* Scan this title only */
            hb_list_add( data->title_set->list_title,
                         hb_dvd_title_scan( data->dvd,
                            data->title_index, 0 ) );
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_dvd_title_count( data->dvd ); i++ )
            {
                UpdateState1(data, i + 1);
                hb_list_add( data->title_set->list_title,
                             hb_dvd_title_scan( data->dvd,
                            i + 1, data->min_title_duration ) );
            }
            feature = hb_dvd_main_feature( data->dvd,
                                           data->title_set->list_title );
        }
    }
    else if ( ( data->batch = hb_batch_init( data->h, data->path ) ) )
    {
        if( data->title_index )
        {
            /* Scan this title only */
            title = hb_batch_title_scan(data->batch, data->title_index, 0);
            if ( title )
            {
                hb_list_add( data->title_set->list_title, title );
            }
        }
        else
        {
            /* Scan all titles */
            for( i = 0; i < hb_batch_title_count( data->batch ); i++ )
            {
                hb_title_t * title;

                UpdateState1(data, i + 1);
                title = hb_batch_title_scan(data->batch, i + 1,
                                            data->min_title_duration);
                if ( title != NULL )
                {
                    hb_list_add( data->title_set->list_title, title );
                }
            }
        }
    }
    else
    {
        // Title index 0 is not a valid title number and means scan all titles.
        // So set title index to 1 in this scenario.
        //
        // Otherwise, set title index in new title to the index that was
        // requested.  This preserves the original index created in batch
        // mode.
        if (data->title_index == 0)
            data->title_index = 1;
        hb_title_t * title = hb_title_init( data->path, data->title_index );
        data->stream = hb_stream_open(data->h, data->path, title, 1);
        if (data->stream != NULL)
        {
            title = hb_stream_title_scan( data->stream, title );
            if ( title )
                hb_list_add( data->title_set->list_title, title );
        }
        else
        {
            hb_title_close( &title );
            hb_log( "scan: unrecognized file type" );
            goto finish;
        }
    }

    for( i = 0; i < hb_list_count( data->title_set->list_title ); )
    {
        int j, npreviews;
        hb_audio_t * audio;

        if ( *data->die )
        {
            goto finish;
        }
        title = hb_list_item( data->title_set->list_title, i );

        UpdateState2(data, i + 1);

        /* Decode previews */
        /* this will also detect more AC3 / DTS information */
        npreviews = DecodePreviews( data, title, 1 );
        if (npreviews < 2)
        {
            // Try harder to get some valid frames
            // Allow libav to return "corrupt" frames
            hb_log("scan: Too few previews (%d), trying harder", npreviews);
            title->flags |= HBTF_NO_IDR;
            npreviews = DecodePreviews( data, title, 0 );
        }
        if (npreviews == 0)
        {
            /* TODO: free things */
            hb_list_rem( data->title_set->list_title, title );
            for( j = 0; j < hb_list_count( title->list_audio ); j++)
            {
                audio = hb_list_item( title->list_audio, j );
                if ( audio->priv.scan_cache )
                {
                    hb_fifo_flush( audio->priv.scan_cache );
                    hb_fifo_close( &audio->priv.scan_cache );
                }
            }
            hb_title_close( &title );
            continue;
        }
        title->preview_count = npreviews;

        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); )
        {
            audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
                hb_fifo_close( &audio->priv.scan_cache );
            }
            if( !audio->config.in.bitrate )
            {
                hb_log( "scan: removing audio 0x%x because no bitrate found",
                        audio->id );
                hb_list_rem( title->list_audio, audio );
                free( audio );
                continue;
            }
            j++;
        }

        // VOBSUB and PGS width and height needs to be set to the
        // title width and height for any stream type that does
        // not provide this information (DVDs, BDs, VOBs, and M2TSs).
        // Title width and height don't get set until we decode
        // previews, so we can't set subtitle width/height till
        // we get here.
        for (j = 0; j < hb_list_count(title->list_subtitle); j++)
        {
            hb_subtitle_t *subtitle = hb_list_item(title->list_subtitle, j);
            if ((subtitle->source == VOBSUB || subtitle->source == PGSSUB) &&
                (subtitle->width <= 0 || subtitle->height <= 0))
            {
                subtitle->width  = title->geometry.width;
                subtitle->height = title->geometry.height;
            }
        }
        i++;
    }

    data->title_set->feature = feature;

    /* Mark title scan complete and init jobs */
    for( i = 0; i < hb_list_count( data->title_set->list_title ); i++ )
    {
        title      = hb_list_item( data->title_set->list_title, i );
        title->flags |= HBTF_SCAN_COMPLETE;
    }
    if (hb_list_count(data->title_set->list_title) > 0)
    {
        strncpy(data->title_set->path, data->path, 1024);
        data->title_set->path[1023] = 0;
    }
    else
    {
        data->title_set->path[0] = 0;
    }

finish:

    if( data->bd )
    {
        hb_bd_close( &data->bd );
    }
    if( data->dvd )
    {
        hb_dvd_close( &data->dvd );
    }
    if (data->stream)
    {
        hb_stream_close(&data->stream);
    }
    if( data->batch )
    {
        hb_batch_close( &data->batch );
    }
    free( data->path );
    free( data );
    _data = NULL;
    hb_buffer_pool_free();
}
示例#27
0
/**********************************************************************
 * avformatInit
 **********************************************************************
 * Allocates hb_mux_data_t structures, create file and write headers
 *********************************************************************/
static int avformatInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_audio_t    * audio;
    hb_mux_data_t * track;
    int meta_mux;
    int max_tracks;
    int ii, ret;

    const char *muxer_name = NULL;

    uint8_t         default_track_flag = 1;
    uint8_t         need_fonts = 0;
    char *lang;


    m->delay = -1;
    max_tracks = 1 + hb_list_count( job->list_audio ) +
                     hb_list_count( job->list_subtitle );

    m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*));

    m->oc = avformat_alloc_context();
    if (m->oc == NULL)
    {
        hb_error( "Could not initialize avformat context." );
        goto error;
    }

    switch (job->mux)
    {
        case HB_MUX_AV_MP4:
            m->time_base.num = 1;
            m->time_base.den = 90000;
            if( job->ipod_atom )
                muxer_name = "ipod";
            else
                muxer_name = "mp4";
            meta_mux = META_MUX_MP4;
            break;

        case HB_MUX_AV_MKV:
            // libavformat is essentially hard coded such that it only
            // works with a timebase of 1/1000
            m->time_base.num = 1;
            m->time_base.den = 1000;
            muxer_name = "matroska";
            meta_mux = META_MUX_MKV;
            break;

        default:
        {
            hb_error("Invalid Mux %x", job->mux);
            goto error;
        }
    }
    m->oc->oformat = av_guess_format(muxer_name, NULL, NULL);
    if(m->oc->oformat == NULL)
    {
        hb_error("Could not guess output format %s", muxer_name);
        goto error;
    }
    av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename));
    ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE,
                     &m->oc->interrupt_callback, NULL);
    if( ret < 0 )
    {
        hb_error( "avio_open2 failed, errno %d", ret);
        goto error;
    }

    /* Video track */
    track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
    job->mux_data = track;

    track->type = MUX_TYPE_VIDEO;
    track->st = avformat_new_stream(m->oc, NULL);
    if (track->st == NULL)
    {
        hb_error("Could not initialize video stream");
        goto error;
    }
    track->st->time_base = m->time_base;
    avcodec_get_context_defaults3(track->st->codec, NULL);

    track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    uint8_t *priv_data = NULL;
    int priv_size = 0;
    switch (job->vcodec)
    {
        case HB_VCODEC_X264:
        case HB_VCODEC_QSV_H264:
            track->st->codec->codec_id = AV_CODEC_ID_H264;

            /* Taken from x264 muxers.c */
            priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 +
                        job->config.h264.pps_length;
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }

            priv_data[0] = 1;
            priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */
            priv_data[2] = job->config.h264.sps[2]; /* profile_compat */
            priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */
            priv_data[4] = 0xff; // nalu size length is four bytes
            priv_data[5] = 0xe1; // one sps

            priv_data[6] = job->config.h264.sps_length >> 8;
            priv_data[7] = job->config.h264.sps_length;

            memcpy(priv_data+8, job->config.h264.sps,
                   job->config.h264.sps_length);

            priv_data[8+job->config.h264.sps_length] = 1; // one pps
            priv_data[9+job->config.h264.sps_length] =
                                        job->config.h264.pps_length >> 8;
            priv_data[10+job->config.h264.sps_length] =
                                        job->config.h264.pps_length;

            memcpy(priv_data+11+job->config.h264.sps_length,
                   job->config.h264.pps, job->config.h264.pps_length );
            break;

        case HB_VCODEC_FFMPEG_MPEG4:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG4;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_FFMPEG_MPEG2:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_THEORA:
        {
            track->st->codec->codec_id = AV_CODEC_ID_THEORA;

            int size = 0;
            ogg_packet *ogg_headers[3];

            for (ii = 0; ii < 3; ii++)
            {
                ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii];
                size += ogg_headers[ii]->bytes + 2;
            }

            priv_size = size;
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }

            size = 0;
            for(ii = 0; ii < 3; ii++)
            {
                AV_WB16(priv_data + size, ogg_headers[ii]->bytes);
                size += 2;
                memcpy(priv_data+size, ogg_headers[ii]->packet,
                                       ogg_headers[ii]->bytes);
                size += ogg_headers[ii]->bytes;
            }
        } break;

        default:
            hb_error("muxavformat: Unknown video codec: %x", job->vcodec);
            goto error;
    }
    track->st->codec->extradata = priv_data;
    track->st->codec->extradata_size = priv_size;

    if (job->anamorphic.mode > 0)
    {
        track->st->sample_aspect_ratio.num        = job->anamorphic.par_width;
        track->st->sample_aspect_ratio.den        = job->anamorphic.par_height;
        track->st->codec->sample_aspect_ratio.num = job->anamorphic.par_width;
        track->st->codec->sample_aspect_ratio.den = job->anamorphic.par_height;
    }
    else
    {
        track->st->sample_aspect_ratio.num        = 1;
        track->st->sample_aspect_ratio.den        = 1;
        track->st->codec->sample_aspect_ratio.num = 1;
        track->st->codec->sample_aspect_ratio.den = 1;
    }
    track->st->codec->width = job->width;
    track->st->codec->height = job->height;
    track->st->disposition |= AV_DISPOSITION_DEFAULT;

    int vrate_base, vrate;
    if( job->pass == 2 )
    {
        hb_interjob_t * interjob = hb_interjob_get( job->h );
        vrate_base = interjob->vrate_base;
        vrate = interjob->vrate;
    }
    else
    {
        vrate_base = job->vrate_base;
        vrate = job->vrate;
    }

    // If the vrate is 27000000, there's a good chance this is
    // a standard rate that we have in our hb_video_rates table.
    // Because of rounding errors and approximations made while
    // measuring framerate, the actual value may not be exact.  So
    // we look for rates that are "close" and make an adjustment
    // to fps.den.
    if (vrate == 27000000)
    {
        const hb_rate_t *video_framerate = NULL;
        while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
        {
            if (abs(vrate_base - video_framerate->rate) < 10)
            {
                vrate_base = video_framerate->rate;
                break;
            }
        }
    }
    hb_reduce(&vrate_base, &vrate, vrate_base, vrate);
    if (job->mux == HB_MUX_AV_MP4)
    {
        // libavformat mp4 muxer requires that the codec time_base have the
        // same denominator as the stream time_base, it uses it for the
        // mdhd timescale.
        double scale = (double)track->st->time_base.den / vrate;
        track->st->codec->time_base.den = track->st->time_base.den;
        track->st->codec->time_base.num = vrate_base * scale;
    }
    else
    {
        track->st->codec->time_base.num = vrate_base;
        track->st->codec->time_base.den = vrate;
    }

    /* add the audio tracks */
    for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ )
    {
        audio = hb_list_item( job->list_audio, ii );
        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        audio->priv.mux_data = track;

        track->type = MUX_TYPE_AUDIO;

        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize audio stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        if (job->mux == HB_MUX_AV_MP4)
        {
            track->st->codec->time_base.num = audio->config.out.samples_per_frame;
            track->st->codec->time_base.den = audio->config.out.samplerate;
            track->st->time_base.num = 1;
            track->st->time_base.den = audio->config.out.samplerate;
        }
        else
        {
            track->st->codec->time_base = m->time_base;
        }

        priv_data = NULL;
        priv_size = 0;
        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_DCA:
            case HB_ACODEC_DCA_HD:
                track->st->codec->codec_id = AV_CODEC_ID_DTS;
                break;
            case HB_ACODEC_AC3:
                track->st->codec->codec_id = AV_CODEC_ID_AC3;
                break;
            case HB_ACODEC_LAME:
            case HB_ACODEC_MP3:
                track->st->codec->codec_id = AV_CODEC_ID_MP3;
                break;
            case HB_ACODEC_VORBIS:
            {
                track->st->codec->codec_id = AV_CODEC_ID_VORBIS;

                int jj, size = 0;
                ogg_packet *ogg_headers[3];

                for (jj = 0; jj < 3; jj++)
                {
                    ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj];
                    size += ogg_headers[jj]->bytes + 2;
                }

                priv_size = size;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }

                size = 0;
                for(jj = 0; jj < 3; jj++)
                {
                    AV_WB16(priv_data + size, ogg_headers[jj]->bytes);
                    size += 2;
                    memcpy(priv_data+size, ogg_headers[jj]->packet,
                                           ogg_headers[jj]->bytes);
                    size += ogg_headers[jj]->bytes;
                }
            } break;
            case HB_ACODEC_FFFLAC:
            case HB_ACODEC_FFFLAC24:
                track->st->codec->codec_id = AV_CODEC_ID_FLAC;

                if (audio->priv.config.extradata.bytes)
                {
                    priv_size = audio->priv.config.extradata.length;
                    priv_data = av_malloc(priv_size);
                    if (priv_data == NULL)
                    {
                        hb_error("malloc failure");
                        goto error;
                    }
                    memcpy(priv_data,
                           audio->priv.config.extradata.bytes,
                           audio->priv.config.extradata.length);
                }
                break;
            case HB_ACODEC_FAAC:
            case HB_ACODEC_FFAAC:
            case HB_ACODEC_CA_AAC:
            case HB_ACODEC_CA_HAAC:
            case HB_ACODEC_FDK_AAC:
            case HB_ACODEC_FDK_HAAC:
                track->st->codec->codec_id = AV_CODEC_ID_AAC;

                if (audio->priv.config.extradata.bytes)
                {
                    priv_size = audio->priv.config.extradata.length;
                    priv_data = av_malloc(priv_size);
                    if (priv_data == NULL)
                    {
                        hb_error("malloc failure");
                        goto error;
                    }
                    memcpy(priv_data,
                           audio->priv.config.extradata.bytes,
                           audio->priv.config.extradata.length);
                }
                break;
            default:
                hb_error("muxavformat: Unknown audio codec: %x",
                         audio->config.out.codec);
                goto error;
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if( default_track_flag )
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
            default_track_flag = 0;
        }

        lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
        track->st->codec->sample_rate = audio->config.out.samplerate;
        if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
        {
            track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
            track->st->codec->channel_layout = audio->config.in.channel_layout;
        }
        else
        {
            track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
            track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
        }

        char *name;
        if (audio->config.out.name == NULL)
        {
            switch (track->st->codec->channels)
            {
                case 1:
                    name = "Mono";
                    break;

                case 2:
                    name = "Stereo";
                    break;

                default:
                    name = "Surround";
                    break;
            }
        }
        else
        {
            name = audio->config.out.name;
        }
        av_dict_set(&track->st->metadata, "title", name, 0);
    }

    char * subidx_fmt =
        "size: %dx%d\n"
        "org: %d, %d\n"
        "scale: 100%%, 100%%\n"
        "alpha: 100%%\n"
        "smooth: OFF\n"
        "fadein/out: 50, 50\n"
        "align: OFF at LEFT TOP\n"
        "time offset: 0\n"
        "forced subs: %s\n"
        "palette: %06x, %06x, %06x, %06x, %06x, %06x, "
        "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n"
        "custom colors: OFF, tridx: 0000, "
        "colors: 000000, 000000, 000000, 000000\n";

    int subtitle_default = -1;
    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii );

        if( subtitle->config.dest == PASSTHRUSUB )
        {
            if ( subtitle->config.default_track )
                subtitle_default = ii;
        }
    }
    // Quicktime requires that at least one subtitle is enabled,
    // else it doesn't show any of the subtitles.
    // So check to see if any of the subtitles are flagged to be
    // the defualt.  The default will the the enabled track, else
    // enable the first track.
    if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1)
    {
        subtitle_default = 0;
    }

    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t * subtitle;
        uint32_t        rgb[16];
        char            subidx[2048];
        int             len;

        subtitle = hb_list_item( job->list_subtitle, ii );
        if (subtitle->config.dest != PASSTHRUSUB)
            continue;

        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        subtitle->mux_data = track;

        track->type = MUX_TYPE_SUBTITLE;
        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize subtitle stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        track->st->time_base = m->time_base;
        track->st->codec->time_base = m->time_base;
        track->st->codec->width = subtitle->width;
        track->st->codec->height = subtitle->height;

        priv_data = NULL;
        priv_size = 0;
        switch (subtitle->source)
        {
            case VOBSUB:
            {
                int jj;
                track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;

                for (jj = 0; jj < 16; jj++)
                    rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]);
                len = snprintf(subidx, 2048, subidx_fmt,
                        subtitle->width, subtitle->height,
                        0, 0, "OFF",
                        rgb[0], rgb[1], rgb[2], rgb[3],
                        rgb[4], rgb[5], rgb[6], rgb[7],
                        rgb[8], rgb[9], rgb[10], rgb[11],
                        rgb[12], rgb[13], rgb[14], rgb[15]);

                priv_size = len + 1;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, subidx, priv_size);
            } break;

            case PGSSUB:
            {
                track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE;
            } break;

            case SSASUB:
            {
                if (job->mux == HB_MUX_AV_MP4)
                {
                    track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
                }
                else
                {
                    track->st->codec->codec_id = AV_CODEC_ID_SSA;
                    need_fonts = 1;

                    if (subtitle->extradata_size)
                    {
                        priv_size = subtitle->extradata_size;
                        priv_data = av_malloc(priv_size);
                        if (priv_data == NULL)
                        {
                            hb_error("malloc failure");
                            goto error;
                        }
                        memcpy(priv_data, subtitle->extradata, priv_size);
                    }
                }
            } break;

            case CC608SUB:
            case CC708SUB:
            case UTF8SUB:
            case TX3GSUB:
            case SRTSUB:
            {
                if (job->mux == HB_MUX_AV_MP4)
                    track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
                else
                    track->st->codec->codec_id = AV_CODEC_ID_TEXT;
            } break;

            default:
                continue;
        }
        if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT)
        {
            // Build codec extradata for tx3g.
            // If we were using a libav codec to generate this data
            // this would (or should) be done for us.
            uint8_t properties[] = {
                0x00, 0x00, 0x00, 0x00,     // Display Flags
                0x01,                       // Horiz. Justification
                0xff,                       // Vert. Justification
                0x00, 0x00, 0x00, 0xff,     // Bg color
                0x00, 0x00, 0x00, 0x00,     // Default text box
                0x00, 0x00, 0x00, 0x00,
                0x00, 0x00, 0x00, 0x00,     // Reserved
                0x00, 0x01,                 // Font ID
                0x00,                       // Font face
                0x18,                       // Font size
                0xff, 0xff, 0xff, 0xff,     // Fg color
                // Font table:
                0x00, 0x00, 0x00, 0x12,     // Font table size
                'f','t','a','b',            // Tag
                0x00, 0x01,                 // Count
                0x00, 0x01,                 // Font ID
                0x05,                       // Font name length
                'A','r','i','a','l'         // Font name
            };

            int width, height = 60;
            if (job->anamorphic.mode)
                width = job->width * ((float)job->anamorphic.par_width / job->anamorphic.par_height);
            else
                width = job->width;
            track->st->codec->width = width;
            track->st->codec->height = height;
            properties[14] = height >> 8;
            properties[15] = height & 0xff;
            properties[16] = width >> 8;
            properties[17] = width & 0xff;

            priv_size = sizeof(properties);
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }
            memcpy(priv_data, properties, priv_size);
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if ( ii == subtitle_default )
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
        }

        lang = lookup_lang_code(job->mux, subtitle->iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
    }
示例#28
0
/**
 * Displays job parameters in the debug log.
 * @param job Handle work hb_job_t.
 */
void hb_display_job_info( hb_job_t * job )
{
    hb_title_t * title = job->title;
    hb_audio_t   * audio;
    hb_subtitle_t * subtitle;
    int             i, j;
    
    hb_log("job configuration:");
    hb_log( " * source");
    
    hb_log( "   + %s", title->path );

    hb_log( "   + title %d, chapter(s) %d to %d", title->index,
            job->chapter_start, job->chapter_end );

    if( title->container_name != NULL )
        hb_log( "   + container: %s", title->container_name);

    if( title->data_rate )
    {
        hb_log( "   + data rate: %d kbps", title->data_rate / 1000 );
    }
    
    hb_log( " * destination");

    hb_log( "   + %s", job->file );

    switch( job->mux )
    {
        case HB_MUX_MP4:
            hb_log("   + container: MPEG-4 (.mp4 and .m4v)");
            
            if( job->ipod_atom )
                hb_log( "     + compatibility atom for iPod 5G");

            if( job->largeFileSize )
                hb_log( "     + 64-bit formatting");

            if( job->mp4_optimize )
                hb_log( "     + optimized for progressive web downloads");
            
            if( job->color_matrix )
                hb_log( "     + custom color matrix: %s", job->color_matrix == 1 ? "ITU Bt.601 (SD)" : "ITU Bt.709 (HD)");
            break;

        case HB_MUX_MKV:
            hb_log("   + container: Matroska (.mkv)");
            break;
    }

    if( job->chapter_markers )
    {
        hb_log( "     + chapter markers" );
    }
    
    hb_log(" * video track");
    
    hb_log("   + decoder: %s", title->video_codec_name );
    
    if( title->video_bitrate )
    {
        hb_log( "     + bitrate %d kbps", title->video_bitrate / 1000 );
    }
    
    if( job->cfr == 0 )
    {
        hb_log( "   + frame rate: same as source (around %.3f fps)",
            (float) title->rate / (float) title->rate_base );
    }
    else if( job->cfr == 1 )
    {
        hb_log( "   + frame rate: %.3f fps -> constant %.3f fps",
            (float) title->rate / (float) title->rate_base,
            (float) job->vrate / (float) job->vrate_base );
    }
    else if( job->cfr == 2 )
    {
        hb_log( "   + frame rate: %.3f fps -> peak rate limited to %.3f fps",
            (float) title->rate / (float) title->rate_base,
            (float) job->pfr_vrate / (float) job->pfr_vrate_base );
    }

    if( job->anamorphic.mode )
    {
        hb_log( "   + %s anamorphic", job->anamorphic.mode == 1 ? "strict" : job->anamorphic.mode == 2? "loose" : "custom" );
        if( job->anamorphic.mode == 3 && job->anamorphic.keep_display_aspect )
        {
            hb_log( "     + keeping source display aspect ratio"); 
        }
        hb_log( "     + storage dimensions: %d * %d -> %d * %d, crop %d/%d/%d/%d, mod %i",
                    title->width, title->height, job->width, job->height,
                    job->crop[0], job->crop[1], job->crop[2], job->crop[3], job->modulus );
        if( job->anamorphic.itu_par )
        {
            hb_log( "     + using ITU pixel aspect ratio values"); 
        }
        hb_log( "     + pixel aspect ratio: %i / %i", job->anamorphic.par_width, job->anamorphic.par_height );
        hb_log( "     + display dimensions: %.0f * %i",
            (float)( job->width * job->anamorphic.par_width / job->anamorphic.par_height ), job->height );
    }
    else
    {
        hb_log( "   + dimensions: %d * %d -> %d * %d, crop %d/%d/%d/%d, mod %i",
                title->width, title->height, job->width, job->height,
                job->crop[0], job->crop[1], job->crop[2], job->crop[3], job->modulus );
    }

    if ( job->grayscale )
        hb_log( "   + grayscale mode" );

    if( hb_list_count( job->filters ) )
    {
        hb_log("   + %s", hb_list_count( job->filters) > 1 ? "filters" : "filter" );
        for( i = 0; i < hb_list_count( job->filters ); i++ )
        {
            hb_filter_object_t * filter = hb_list_item( job->filters, i );
            if (filter->settings)
                hb_log("     + %s (%s)", filter->name, filter->settings);
            else
                hb_log("     + %s (default settings)", filter->name);
        }
    }
    
    if( !job->indepth_scan )
    {
        /* Video encoder */
        switch( job->vcodec )
        {
            case HB_VCODEC_FFMPEG_MPEG4:
                hb_log( "   + encoder: FFmpeg MPEG-4" );
                break;

            case HB_VCODEC_FFMPEG_MPEG2:
                hb_log( "   + encoder: FFmpeg MPEG-2" );
                break;

            case HB_VCODEC_X264:
                hb_log( "   + encoder: x264" );
                if( job->advanced_opts != NULL && *job->advanced_opts != '\0' )
                    hb_log( "     + options: %s", job->advanced_opts);
                break;

            case HB_VCODEC_THEORA:
                hb_log( "   + encoder: Theora" );
                break;
        }

        if( job->vquality >= 0 )
        {
            hb_log( "     + quality: %.2f %s", job->vquality, job->vcodec == HB_VCODEC_X264 ? "(RF)" : "(QP)" ); 
        }
        else
        {
            hb_log( "     + bitrate: %d kbps, pass: %d", job->vbitrate, job->pass );
        }
    }

    for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
    {
        subtitle =  hb_list_item( title->list_subtitle, i );

        if( subtitle )
        {
            if( subtitle->source == SRTSUB )
            {
                /* For SRT, print offset and charset too */
                hb_log( " * subtitle track %i, %s (id %x) %s [%s] -> %s%s, offset: %"PRId64", charset: %s",
                        subtitle->track, subtitle->lang, subtitle->id, "Text", "SRT", "Pass-Through",
                        subtitle->config.default_track ? ", Default" : "",
                        subtitle->config.offset, subtitle->config.src_codeset );
            }
            else
            {
                hb_log( " * subtitle track %i, %s (id %x) %s [%s] -> %s%s%s", subtitle->track, subtitle->lang, subtitle->id,
                        subtitle->format == PICTURESUB ? "Picture" : "Text",
                        hb_subsource_name( subtitle->source ),
                        job->indepth_scan ? "Foreign Audio Search" :
                        subtitle->config.dest == RENDERSUB ? "Render/Burn in" : "Pass-Through",
                        subtitle->config.force ? ", Forced Only" : "",
                        subtitle->config.default_track ? ", Default" : "" );
            }
        }
    }

    if( !job->indepth_scan )
    {
        for( i = 0; i < hb_list_count( title->list_audio ); i++ )
        {
            audio = hb_list_item( title->list_audio, i );

            hb_log( " * audio track %d", audio->config.out.track );
            
            if( audio->config.out.name )
                hb_log( "   + name: %s", audio->config.out.name );

            hb_log( "   + decoder: %s (track %d, id %x)", audio->config.lang.description, audio->config.in.track + 1, audio->id );

            if( ( audio->config.in.codec == HB_ACODEC_AC3 ) || ( audio->config.in.codec == HB_ACODEC_DCA) )
            {
                hb_log( "     + bitrate: %d kbps, samplerate: %d Hz", audio->config.in.bitrate / 1000, audio->config.in.samplerate );
            }

            if( (audio->config.out.codec != HB_ACODEC_AC3_PASS) && (audio->config.out.codec != HB_ACODEC_DCA_PASS) )
            {
                for (j = 0; j < hb_audio_mixdowns_count; j++)
                {
                    if (hb_audio_mixdowns[j].amixdown == audio->config.out.mixdown) {
                        hb_log( "   + mixdown: %s", hb_audio_mixdowns[j].human_readable_name );
                        break;
                    }
                }
            }

            if ( audio->config.out.dynamic_range_compression && (audio->config.out.codec != HB_ACODEC_AC3_PASS) && (audio->config.out.codec != HB_ACODEC_DCA_PASS))
            {
                hb_log("   + dynamic range compression: %f", audio->config.out.dynamic_range_compression);
            }
            
            if( (audio->config.out.codec == HB_ACODEC_AC3_PASS) || (audio->config.out.codec == HB_ACODEC_DCA_PASS) )
            {
                hb_log( "   + %s passthrough", (audio->config.out.codec == HB_ACODEC_AC3_PASS) ?
                    "AC3" : "DCA" );
            }
            else
            {
                hb_log( "   + encoder: %s", 
                    ( audio->config.out.codec == HB_ACODEC_FAAC ) ?  "faac" : 
                    ( ( audio->config.out.codec == HB_ACODEC_LAME ) ?  "lame" : 
                    ( ( audio->config.out.codec == HB_ACODEC_CA_AAC ) ?  "ca_aac" : 
                    ( ( audio->config.out.codec == HB_ACODEC_AC3 ) ?  "ffac3" : 
                    "vorbis"  ) ) ) );
                hb_log( "     + bitrate: %d kbps, samplerate: %d Hz", audio->config.out.bitrate, audio->config.out.samplerate );            
            }
        }
    }
}
示例#29
0
static void InitAudio( hb_job_t * job, hb_sync_common_t * common, int i )
{
    hb_work_object_t  * w;
    hb_work_private_t * pv;
    hb_sync_audio_t   * sync;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    sync = &pv->type.audio;
    sync->index = i;
    pv->job    = job;
    pv->common = common;
    pv->common->ref++;
    pv->common->pts_count++;

    w = hb_get_work( WORK_SYNC_AUDIO );
    w->private_data = pv;
    w->audio = hb_list_item( job->list_audio, i );
    w->fifo_in = w->audio->priv.fifo_raw;

    if ( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
    {
        w->fifo_out = w->audio->priv.fifo_out;
    }
    else
    {
        w->fifo_out = w->audio->priv.fifo_sync;
    }

    if( w->audio->config.out.codec == HB_ACODEC_AC3_PASS ||
        w->audio->config.out.codec == HB_ACODEC_AAC_PASS )
    {
        /* Have a silent AC-3/AAC frame ready in case we have to fill a
           gap */
        AVCodec        * codec;
        AVCodecContext * c;

        switch ( w->audio->config.out.codec )
        {
            case HB_ACODEC_AC3_PASS:
            {
                codec = avcodec_find_encoder( AV_CODEC_ID_AC3 );
            } break;
            case HB_ACODEC_AAC_PASS:
            {
                codec = avcodec_find_encoder( AV_CODEC_ID_AAC );
            } break;
            default:
            {
                // Never gets here
                codec = NULL; // Silence compiler warning
            } break;
        }

        c              = avcodec_alloc_context3(codec);
        c->bit_rate    = w->audio->config.in.bitrate;
        c->sample_rate = w->audio->config.in.samplerate;
        c->channels    =
            av_get_channel_layout_nb_channels(w->audio->config.in.channel_layout);
        hb_ff_set_sample_fmt(c, codec, AV_SAMPLE_FMT_FLT);

        if (w->audio->config.in.channel_layout == AV_CH_LAYOUT_STEREO_DOWNMIX)
        {
            c->channel_layout = AV_CH_LAYOUT_STEREO;
        }
        else
        {
            c->channel_layout = w->audio->config.in.channel_layout;
        }

        if( hb_avcodec_open( c, codec, NULL, 0 ) < 0 )
        {
            hb_log( "sync: avcodec_open failed" );
            return;
        }

        // Prepare input frame
        AVFrame frame = { .nb_samples = c->frame_size, .pts = 0, };
        int input_size = av_samples_get_buffer_size(NULL, c->channels,
                                                    frame.nb_samples,
                                                    c->sample_fmt, 1);
        uint8_t *zeros = calloc(1, input_size);
        avcodec_fill_audio_frame(&frame, c->channels, c->sample_fmt, zeros,
                                 input_size, 1);

        // Allocate enough space for the encoded silence
        // The output should be < the input
        sync->silence_buf  = malloc( input_size );

        // There is some delay in getting output from some audio encoders.
        // So encode a few packets till we get output.
        int ii;
        for ( ii = 0; ii < 10; ii++ )
        {
            // Prepare output packet
            AVPacket pkt;
            int got_packet;
            av_init_packet(&pkt);
            pkt.data = sync->silence_buf;
            pkt.size = input_size;

            int ret = avcodec_encode_audio2( c, &pkt, &frame, &got_packet);
            if ( ret < 0 )
            {
                hb_log( "sync: avcodec_encode_audio failed" );
                break;
            }

            if ( got_packet )
            {
                sync->silence_size = pkt.size;
                break;
            }
        }
        free( zeros );
        hb_avcodec_close( c );
        av_free( c );
    }
    else
    {
        if( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
示例#30
0
/**********************************************************************
 * avformatInit
 **********************************************************************
 * Allocates hb_mux_data_t structures, create file and write headers
 *********************************************************************/
static int avformatInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_audio_t    * audio;
    hb_mux_data_t * track;
    int meta_mux;
    int max_tracks;
    int ii, jj, ret;

    int clock_min, clock_max, clock;
    hb_video_framerate_get_limits(&clock_min, &clock_max, &clock);

    const char *muxer_name = NULL;

    uint8_t         default_track_flag = 1;
    uint8_t         need_fonts = 0;
    char *lang;


    max_tracks = 1 + hb_list_count( job->list_audio ) +
                     hb_list_count( job->list_subtitle );

    m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*));

    m->oc = avformat_alloc_context();
    if (m->oc == NULL)
    {
        hb_error( "Could not initialize avformat context." );
        goto error;
    }

    AVDictionary * av_opts = NULL;
    switch (job->mux)
    {
        case HB_MUX_AV_MP4:
            m->time_base.num = 1;
            m->time_base.den = 90000;
            if( job->ipod_atom )
                muxer_name = "ipod";
            else
                muxer_name = "mp4";
            meta_mux = META_MUX_MP4;

            av_dict_set(&av_opts, "brand", "mp42", 0);
            if (job->mp4_optimize)
                av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0);
            else
                av_dict_set(&av_opts, "movflags", "+disable_chpl", 0);
            break;

        case HB_MUX_AV_MKV:
            // libavformat is essentially hard coded such that it only
            // works with a timebase of 1/1000
            m->time_base.num = 1;
            m->time_base.den = 1000;
            muxer_name = "matroska";
            meta_mux = META_MUX_MKV;
            break;

        default:
        {
            hb_error("Invalid Mux %x", job->mux);
            goto error;
        }
    }
    m->oc->oformat = av_guess_format(muxer_name, NULL, NULL);
    if(m->oc->oformat == NULL)
    {
        hb_error("Could not guess output format %s", muxer_name);
        goto error;
    }
    av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename));
    ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE,
                     &m->oc->interrupt_callback, NULL);
    if( ret < 0 )
    {
        hb_error( "avio_open2 failed, errno %d", ret);
        goto error;
    }

    /* Video track */
    track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
    job->mux_data = track;

    track->type = MUX_TYPE_VIDEO;
    track->prev_chapter_tc = AV_NOPTS_VALUE;
    track->st = avformat_new_stream(m->oc, NULL);
    if (track->st == NULL)
    {
        hb_error("Could not initialize video stream");
        goto error;
    }
    track->st->time_base = m->time_base;
    avcodec_get_context_defaults3(track->st->codec, NULL);

    track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    uint8_t *priv_data = NULL;
    int priv_size = 0;
    switch (job->vcodec)
    {
        case HB_VCODEC_X264_8BIT:
        case HB_VCODEC_X264_10BIT:
        case HB_VCODEC_QSV_H264:
            track->st->codec->codec_id = AV_CODEC_ID_H264;

            /* Taken from x264 muxers.c */
            priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 +
                        job->config.h264.pps_length;
            priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (priv_data == NULL)
            {
                hb_error("H.264 extradata: malloc failure");
                goto error;
            }

            priv_data[0] = 1;
            priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */
            priv_data[2] = job->config.h264.sps[2]; /* profile_compat */
            priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */
            priv_data[4] = 0xff; // nalu size length is four bytes
            priv_data[5] = 0xe1; // one sps

            priv_data[6] = job->config.h264.sps_length >> 8;
            priv_data[7] = job->config.h264.sps_length;

            memcpy(priv_data+8, job->config.h264.sps,
                   job->config.h264.sps_length);

            priv_data[8+job->config.h264.sps_length] = 1; // one pps
            priv_data[9+job->config.h264.sps_length] =
                                        job->config.h264.pps_length >> 8;
            priv_data[10+job->config.h264.sps_length] =
                                        job->config.h264.pps_length;

            memcpy(priv_data+11+job->config.h264.sps_length,
                   job->config.h264.pps, job->config.h264.pps_length );
            break;

        case HB_VCODEC_FFMPEG_MPEG4:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG4;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("MPEG4 extradata: malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_FFMPEG_MPEG2:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("MPEG2 extradata: malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_FFMPEG_VP8:
            track->st->codec->codec_id = AV_CODEC_ID_VP8;
            priv_data                  = NULL;
            priv_size                  = 0;
            break;

        case HB_VCODEC_FFMPEG_VP9:
            track->st->codec->codec_id = AV_CODEC_ID_VP9;
            priv_data                  = NULL;
            priv_size                  = 0;
            break;

        case HB_VCODEC_THEORA:
        {
            track->st->codec->codec_id = AV_CODEC_ID_THEORA;

            int size = 0;
            ogg_packet *ogg_headers[3];

            for (ii = 0; ii < 3; ii++)
            {
                ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii];
                size += ogg_headers[ii]->bytes + 2;
            }

            priv_size = size;
            priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (priv_data == NULL)
            {
                hb_error("Theora extradata: malloc failure");
                goto error;
            }

            size = 0;
            for(ii = 0; ii < 3; ii++)
            {
                AV_WB16(priv_data + size, ogg_headers[ii]->bytes);
                size += 2;
                memcpy(priv_data+size, ogg_headers[ii]->packet,
                                       ogg_headers[ii]->bytes);
                size += ogg_headers[ii]->bytes;
            }
        } break;

        case HB_VCODEC_X265_8BIT:
        case HB_VCODEC_X265_10BIT:
        case HB_VCODEC_X265_12BIT:
        case HB_VCODEC_X265_16BIT:
        case HB_VCODEC_QSV_H265:
            track->st->codec->codec_id = AV_CODEC_ID_HEVC;

            if (job->config.h265.headers_length > 0)
            {
                priv_size = job->config.h265.headers_length;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("H.265 extradata: malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.h265.headers, priv_size);
            }
            break;

        default:
            hb_error("muxavformat: Unknown video codec: %x", job->vcodec);
            goto error;
    }
    track->st->codec->extradata = priv_data;
    track->st->codec->extradata_size = priv_size;

    track->st->sample_aspect_ratio.num        = job->par.num;
    track->st->sample_aspect_ratio.den        = job->par.den;
    track->st->codec->sample_aspect_ratio.num = job->par.num;
    track->st->codec->sample_aspect_ratio.den = job->par.den;
    track->st->codec->width                   = job->width;
    track->st->codec->height                  = job->height;
    track->st->disposition |= AV_DISPOSITION_DEFAULT;

    hb_rational_t vrate = job->vrate;

    // If the vrate is the internal clock rate, there's a good chance
    // this is a standard rate that we have in our hb_video_rates table.
    // Because of rounding errors and approximations made while
    // measuring framerate, the actual value may not be exact.  So
    // we look for rates that are "close" and make an adjustment
    // to fps.den.
    if (vrate.num == clock)
    {
        const hb_rate_t *video_framerate = NULL;
        while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
        {
            if (abs(vrate.den - video_framerate->rate) < 10)
            {
                vrate.den = video_framerate->rate;
                break;
            }
        }
    }
    hb_reduce(&vrate.num, &vrate.den, vrate.num, vrate.den);
    if (job->mux == HB_MUX_AV_MP4)
    {
        // libavformat mp4 muxer requires that the codec time_base have the
        // same denominator as the stream time_base, it uses it for the
        // mdhd timescale.
        double scale = (double)track->st->time_base.den / vrate.num;
        track->st->codec->time_base.den = track->st->time_base.den;
        track->st->codec->time_base.num = vrate.den * scale;
    }
    else
    {
        track->st->codec->time_base.num = vrate.den;
        track->st->codec->time_base.den = vrate.num;
    }
    track->st->avg_frame_rate.num = vrate.num;
    track->st->avg_frame_rate.den = vrate.den;

    /* add the audio tracks */
    for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ )
    {
        audio = hb_list_item( job->list_audio, ii );
        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        audio->priv.mux_data = track;

        track->type = MUX_TYPE_AUDIO;

        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize audio stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        if (job->mux == HB_MUX_AV_MP4)
        {
            track->st->codec->time_base.num = audio->config.out.samples_per_frame;
            track->st->codec->time_base.den = audio->config.out.samplerate;
            track->st->time_base.num = 1;
            track->st->time_base.den = audio->config.out.samplerate;
        }
        else
        {
            track->st->codec->time_base = m->time_base;
            track->st->time_base = m->time_base;
        }

        priv_data = NULL;
        priv_size = 0;
        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_DCA:
            case HB_ACODEC_DCA_HD:
                track->st->codec->codec_id = AV_CODEC_ID_DTS;
                break;
            case HB_ACODEC_AC3:
                track->st->codec->codec_id = AV_CODEC_ID_AC3;
                break;
            case HB_ACODEC_FFEAC3:
                track->st->codec->codec_id = AV_CODEC_ID_EAC3;
                break;
            case HB_ACODEC_FFTRUEHD:
                track->st->codec->codec_id = AV_CODEC_ID_TRUEHD;
                break;
            case HB_ACODEC_LAME:
            case HB_ACODEC_MP3:
                track->st->codec->codec_id = AV_CODEC_ID_MP3;
                break;
            case HB_ACODEC_VORBIS:
            {
                track->st->codec->codec_id = AV_CODEC_ID_VORBIS;

                int jj, size = 0;
                ogg_packet *ogg_headers[3];

                for (jj = 0; jj < 3; jj++)
                {
                    ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj];
                    size += ogg_headers[jj]->bytes + 2;
                }

                priv_size = size;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("Vorbis extradata: malloc failure");
                    goto error;
                }

                size = 0;
                for(jj = 0; jj < 3; jj++)
                {
                    AV_WB16(priv_data + size, ogg_headers[jj]->bytes);
                    size += 2;
                    memcpy(priv_data+size, ogg_headers[jj]->packet,
                                           ogg_headers[jj]->bytes);
                    size += ogg_headers[jj]->bytes;
                }
            } break;
            case HB_ACODEC_FFFLAC:
            case HB_ACODEC_FFFLAC24:
                track->st->codec->codec_id = AV_CODEC_ID_FLAC;

                if (audio->priv.config.extradata.length)
                {
                    priv_size = audio->priv.config.extradata.length;
                    priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                    if (priv_data == NULL)
                    {
                        hb_error("FLAC extradata: malloc failure");
                        goto error;
                    }
                    memcpy(priv_data,
                           audio->priv.config.extradata.bytes,
                           audio->priv.config.extradata.length);
                }
                break;
            case HB_ACODEC_FFAAC:
            case HB_ACODEC_CA_AAC:
            case HB_ACODEC_CA_HAAC:
            case HB_ACODEC_FDK_AAC:
            case HB_ACODEC_FDK_HAAC:
                track->st->codec->codec_id = AV_CODEC_ID_AAC;

                // libav mkv muxer expects there to be extradata for
                // AAC and will crash if it is NULL.
                //
                // Also, libav can over-read the buffer by up to 8 bytes
                // when it fills it's get_bits cache.
                //
                // So allocate extra bytes
                priv_size = audio->priv.config.extradata.length;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("AAC extradata: malloc failure");
                    goto error;
                }
                memcpy(priv_data,
                       audio->priv.config.extradata.bytes,
                       audio->priv.config.extradata.length);

                // AAC from pass-through source may be ADTS.
                // Therefore inserting "aac_adtstoasc" bitstream filter is
                // preferred.
                // The filter does nothing for non-ADTS bitstream.
                if (audio->config.out.codec == HB_ACODEC_AAC_PASS)
                {
                    track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc");
                }
                break;
            default:
                hb_error("muxavformat: Unknown audio codec: %x",
                         audio->config.out.codec);
                goto error;
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if( default_track_flag )
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
            default_track_flag = 0;
        }

        lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
        track->st->codec->sample_rate = audio->config.out.samplerate;
        if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
        {
            track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
            track->st->codec->channel_layout = audio->config.in.channel_layout;
        }
        else
        {
            track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
            track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
        }

        char *name;
        if (audio->config.out.name == NULL)
        {
            switch (track->st->codec->channels)
            {
                case 1:
                    name = "Mono";
                    break;

                case 2:
                    name = "Stereo";
                    break;

                default:
                    name = "Surround";
                    break;
            }
        }
        else
        {
            name = audio->config.out.name;
        }
        // Set audio track title
        av_dict_set(&track->st->metadata, "title", name, 0);
        if (job->mux == HB_MUX_AV_MP4)
        {
            // Some software (MPC, mediainfo) use hdlr description
            // for track title
            av_dict_set(&track->st->metadata, "handler", name, 0);
        }
    }

    // Check for audio track associations
    for (ii = 0; ii < hb_list_count(job->list_audio); ii++)
    {
        audio = hb_list_item(job->list_audio, ii);
        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_FFAAC:
            case HB_ACODEC_CA_AAC:
            case HB_ACODEC_CA_HAAC:
            case HB_ACODEC_FDK_AAC:
            case HB_ACODEC_FDK_HAAC:
                break;

            default:
            {
                // Mark associated fallback audio tracks for any non-aac track
                for(jj = 0; jj < hb_list_count( job->list_audio ); jj++ )
                {
                    hb_audio_t    * fallback;
                    int             codec;

                    if (ii == jj) continue;

                    fallback = hb_list_item( job->list_audio, jj );
                    codec = fallback->config.out.codec & HB_ACODEC_MASK;
                    if (fallback->config.in.track == audio->config.in.track &&
                        (codec == HB_ACODEC_FFAAC ||
                         codec == HB_ACODEC_CA_AAC ||
                         codec == HB_ACODEC_CA_HAAC ||
                         codec == HB_ACODEC_FDK_AAC ||
                         codec == HB_ACODEC_FDK_HAAC))
                    {
                        hb_mux_data_t * fallback_track;
                        int           * sd;

                        track = audio->priv.mux_data;
                        fallback_track = fallback->priv.mux_data;
                        sd = (int*)av_stream_new_side_data(track->st,
                                                     AV_PKT_DATA_FALLBACK_TRACK,
                                                     sizeof(int));
                        if (sd != NULL)
                        {
                            *sd = fallback_track->st->index;
                        }
                    }
                }
            } break;
        }
    }

    char * subidx_fmt =
        "size: %dx%d\n"
        "org: %d, %d\n"
        "scale: 100%%, 100%%\n"
        "alpha: 100%%\n"
        "smooth: OFF\n"
        "fadein/out: 50, 50\n"
        "align: OFF at LEFT TOP\n"
        "time offset: 0\n"
        "forced subs: %s\n"
        "palette: %06x, %06x, %06x, %06x, %06x, %06x, "
        "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n"
        "custom colors: OFF, tridx: 0000, "
        "colors: 000000, 000000, 000000, 000000\n";

    int subtitle_default = -1;
    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii );

        if( subtitle->config.dest == PASSTHRUSUB )
        {
            if ( subtitle->config.default_track )
                subtitle_default = ii;
        }
    }
    // Quicktime requires that at least one subtitle is enabled,
    // else it doesn't show any of the subtitles.
    // So check to see if any of the subtitles are flagged to be
    // the defualt.  The default will the the enabled track, else
    // enable the first track.
    if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1)
    {
        subtitle_default = 0;
    }

    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t * subtitle;
        uint32_t        rgb[16];
        char            subidx[2048];
        int             len;

        subtitle = hb_list_item( job->list_subtitle, ii );
        if (subtitle->config.dest != PASSTHRUSUB)
            continue;

        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        subtitle->mux_data = track;

        track->type = MUX_TYPE_SUBTITLE;
        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize subtitle stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        track->st->time_base = m->time_base;
        track->st->codec->time_base = m->time_base;
        track->st->codec->width = subtitle->width;
        track->st->codec->height = subtitle->height;

        priv_data = NULL;
        priv_size = 0;
        switch (subtitle->source)
        {
            case VOBSUB:
            {
                int jj;
                track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;

                for (jj = 0; jj < 16; jj++)
                    rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]);
                len = snprintf(subidx, 2048, subidx_fmt,
                        subtitle->width, subtitle->height,
                        0, 0, "OFF",
                        rgb[0], rgb[1], rgb[2], rgb[3],
                        rgb[4], rgb[5], rgb[6], rgb[7],
                        rgb[8], rgb[9], rgb[10], rgb[11],
                        rgb[12], rgb[13], rgb[14], rgb[15]);

                priv_size = len + 1;
                priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                if (priv_data == NULL)
                {
                    hb_error("VOBSUB extradata: malloc failure");
                    goto error;
                }
                memcpy(priv_data, subidx, priv_size);
            } break;

            case PGSSUB:
            {
                track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE;
            } break;

            case CC608SUB:
            case CC708SUB:
            case TX3GSUB:
            case SRTSUB:
            case UTF8SUB:
            case SSASUB:
            {
                if (job->mux == HB_MUX_AV_MP4)
                {
                    track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
                }
                else
                {
                    track->st->codec->codec_id = AV_CODEC_ID_SSA;
                    need_fonts = 1;

                    if (subtitle->extradata_size)
                    {
                        priv_size = subtitle->extradata_size;
                        priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
                        if (priv_data == NULL)
                        {
                            hb_error("SSA extradata: malloc failure");
                            goto error;
                        }
                        memcpy(priv_data, subtitle->extradata, priv_size);
                    }
                }
            } break;

            default:
                continue;
        }
        if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT)
        {
            // Build codec extradata for tx3g.
            // If we were using a libav codec to generate this data
            // this would (or should) be done for us.
            uint8_t properties[] = {
                0x00, 0x00, 0x00, 0x00,     // Display Flags
                0x01,                       // Horiz. Justification
                0xff,                       // Vert. Justification
                0x00, 0x00, 0x00, 0xff,     // Bg color
                0x00, 0x00, 0x00, 0x00,     // Default text box
                0x00, 0x00, 0x00, 0x00,
                0x00, 0x00, 0x00, 0x00,     // Reserved
                0x00, 0x01,                 // Font ID
                0x00,                       // Font face
                0x18,                       // Font size
                0xff, 0xff, 0xff, 0xff,     // Fg color
                // Font table:
                0x00, 0x00, 0x00, 0x12,     // Font table size
                'f','t','a','b',            // Tag
                0x00, 0x01,                 // Count
                0x00, 0x01,                 // Font ID
                0x05,                       // Font name length
                'A','r','i','a','l'         // Font name
            };

            int width, height = 60;
            width = job->width * job->par.num / job->par.den;
            track->st->codec->width = width;
            track->st->codec->height = height;
            properties[14] = height >> 8;
            properties[15] = height & 0xff;
            properties[16] = width >> 8;
            properties[17] = width & 0xff;

            priv_size = sizeof(properties);
            priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (priv_data == NULL)
            {
                hb_error("TX3G extradata: malloc failure");
                goto error;
            }
            memcpy(priv_data, properties, priv_size);
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if (ii == subtitle_default)
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
        }
        if (subtitle->config.default_track)
        {
            track->st->disposition |= AV_DISPOSITION_FORCED;
        }

        lang = lookup_lang_code(job->mux, subtitle->iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
    }