Example #1
0
static int reader_work( hb_work_object_t * w, hb_buffer_t ** buf_in,
                        hb_buffer_t ** buf_out)
{
    hb_work_private_t  * r = w->private_data;
    hb_fifo_t         ** fifos;
    hb_buffer_t        * buf;
    hb_buffer_list_t     list;
    int                  ii, chapter = -1;

    hb_buffer_list_clear(&list);

    if (r->bd)
        chapter = hb_bd_chapter( r->bd );
    else if (r->dvd)
        chapter = hb_dvd_chapter( r->dvd );
    else if (r->stream)
        chapter = hb_stream_chapter( r->stream );

    if( chapter < 0 )
    {
        hb_log( "reader: end of the title reached" );
        reader_send_eof(r);
        return HB_WORK_DONE;
    }
    if( chapter > r->chapter_end )
    {
        hb_log("reader: end of chapter %d (media %d) reached at media chapter %d",
                r->job->chapter_end, r->chapter_end, chapter);
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    if (r->bd)
    {
        if( (buf = hb_bd_read( r->bd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->dvd)
    {
        if( (buf = hb_dvd_read( r->dvd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->stream)
    {
        if ( (buf = hb_stream_read( r->stream )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }

    (hb_demux[r->title->demuxer])(buf, &list, &r->demux);

    while ((buf = hb_buffer_list_rem_head(&list)) != NULL)
    {
        fifos = GetFifoForId( r, buf->s.id );

        if (fifos && r->stream && r->start_found == 2 )
        {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.start < r->job->pts_to_start)
            {
                r->job->pts_to_start -= buf->s.start;
            }
            else if ( buf->s.start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
            }
            r->start_found = 1;
        }

        if ( fifos && ! r->saw_video && !r->job->indepth_scan )
        {
            // The first data packet with a PTS from an audio or video stream
            // that we're decoding defines 'time zero'. Discard packets until
            // we get one.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.renderOffset != AV_NOPTS_VALUE &&
                 (buf->s.id == r->title->video_id ||
                  is_audio( r, buf->s.id)))
            {
                // force a new scr offset computation
                r->scr_changes = r->demux.scr_changes - 1;
                // create a stream state if we don't have one so the
                // offset will get computed correctly.
                id_to_st( r, buf, 1 );
                r->saw_video = 1;
                hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
                        r->demux.last_scr, buf->s.id, buf->s.renderOffset );
            }
            else
            {
                fifos = NULL;
            }
        }

        if ( r->job->indepth_scan || fifos )
        {
            if ( buf->s.renderOffset != AV_NOPTS_VALUE )
            {
                if ( r->scr_changes != r->demux.scr_changes )
                {
                    // This is the first audio or video packet after an SCR
                    // change. Compute a new scr offset that would make this
                    // packet follow the last of this stream with the
                    // correct average spacing.
                    stream_timing_t *st = id_to_st( r, buf, 0 );

                    // if this is the video stream and we don't have
                    // audio yet or this is an audio stream
                    // generate a new scr
                    if ( st->is_audio ||
                         ( st == r->stream_timing && !r->saw_audio ) )
                    {
                        new_scr_offset( r, buf );
                        r->sub_scr_set = 0;
                    }
                    else
                    {
                        // defer the scr change until we get some
                        // audio since audio has a timestamp per
                        // frame but video & subtitles don't. Clear
                        // the timestamps so the decoder will generate
                        // them from the frame durations.
                        if (is_subtitle(r, buf->s.id) &&
                            buf->s.start != AV_NOPTS_VALUE)
                        {
                            if (!r->sub_scr_set)
                            {
                                // We can't generate timestamps in the
                                // subtitle decoder as we can for
                                // audio & video.  So we need to make
                                // the closest guess that we can
                                // for the subtitles start time here.
                                int64_t last = r->stream_timing[0].last;
                                r->scr_offset = buf->s.start - last;
                                r->sub_scr_set = 1;
                            }
                        }
                        else
                        {
                            buf->s.start = AV_NOPTS_VALUE;
                            buf->s.renderOffset = AV_NOPTS_VALUE;
                        }
                    }
                }
            }
            if ( buf->s.start != AV_NOPTS_VALUE )
            {
                int64_t start = buf->s.start - r->scr_offset;

                if (!r->start_found || r->job->indepth_scan)
                {
                    UpdateState( r, start );
                }

                if (r->job->indepth_scan && r->job->pts_to_stop &&
                    start >= r->pts_to_start + r->job->pts_to_stop)
                {
                    // sync normally would terminate p-to-p
                    // but sync doesn't run during indepth scan
                    hb_log("reader: reached pts %"PRId64", exiting early", start);
                    reader_send_eof(r);
                    hb_buffer_list_close(&list);
                    return HB_WORK_DONE;
                }

                if (!r->start_found && start >= r->pts_to_start)
                {
                    // pts_to_start point found
                    // Note that this code path only gets executed for
                    // medai where we have not performed an initial seek
                    // to get close to the start time. So the 'start' time
                    // is the time since the first frame.

                    if (r->stream)
                    {
                        // libav multi-threaded decoders can get into
                        // a bad state if the initial data is not
                        // decodable.  So try to improve the chances of
                        // a good start by waiting for an initial iframe
                        hb_stream_set_need_keyframe(r->stream, 1);
                        hb_buffer_close( &buf );
                        continue;
                    }
                    r->start_found = 1;
                    // sync.c also pays attention to job->pts_to_start
                    // It eats up the 10 second slack that we build in
                    // to the start time here in reader (so that video
                    // decode is clean at the start time).
                    // sync.c expects pts_to_start to be relative to the
                    // first timestamp it sees.
                    if (r->job->pts_to_start > start)
                    {
                        r->job->pts_to_start -= start;
                    }
                    else
                    {
                        r->job->pts_to_start = 0;
                    }
                }
                // This log is handy when you need to debug timing problems
                //hb_log("id %x scr_offset %"PRId64
                //       " start %"PRId64" --> %"PRId64"",
                //        buf->s.id, r->scr_offset, buf->s.start,
                //        buf->s.start - r->scr_offset);
                buf->s.start -= r->scr_offset;
                if ( buf->s.stop != AV_NOPTS_VALUE )
                {
                    buf->s.stop -= r->scr_offset;
                }
            }
            if ( buf->s.renderOffset != AV_NOPTS_VALUE )
            {
                // This packet is referenced to the same SCR as the last.
                // Adjust timestamp to remove the System Clock Reference
                // offset then update the average inter-packet time
                // for this stream.
                buf->s.renderOffset -= r->scr_offset;
                update_ipt( r, buf );
            }
#if 0
            // JAS: This was added to fix a rare "audio time went backward"
            // sync error I found in one sample.  But it has a bad side
            // effect on DVDs, causing frequent "adding silence" sync
            // errors. So I am disabling it.
            else
            {
                update_ipt( r, buf );
            }
#endif
        }
        buf = splice_discontinuity(r, buf);
        if( fifos && buf != NULL )
        {
            if ( !r->start_found )
            {
                hb_buffer_close( &buf );
                continue;
            }

            buf->sequence = r->sequence++;
            /* if there are mutiple output fifos, send a copy of the
             * buffer down all but the first (we have to not ship the
             * original buffer or we'll race with the thread that's
             * consuming the buffer & inject garbage into the data stream). */
            for (ii = 1; fifos[ii] != NULL; ii++)
            {
                hb_buffer_t *buf_copy = hb_buffer_init(buf->size);
                buf_copy->s = buf->s;
                memcpy(buf_copy->data, buf->data, buf->size);
                push_buf(r, fifos[ii], buf_copy);
            }
            push_buf(r, fifos[0], buf);
            buf = NULL;
        }
        else
        {
            hb_buffer_close(&buf);
        }
    }

    hb_buffer_list_close(&list);
    return HB_WORK_OK;
}
Example #2
0
static hb_buffer_t* Encode(hb_work_object_t *w)
{
    hb_work_private_t *pv = w->private_data;
    hb_audio_t *audio = w->audio;
    uint64_t pts, pos;

    if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float))
    {
        return NULL;
    }

    hb_list_getbytes(pv->list, pv->input_buf, pv->input_samples * sizeof(float),
                     &pts, &pos);

    // Prepare input frame
    int out_linesize;
    int out_size = av_samples_get_buffer_size(&out_linesize,
                                              pv->context->channels,
                                              pv->samples_per_frame,
                                              pv->context->sample_fmt, 1);
    AVFrame frame = { .nb_samples = pv->samples_per_frame, };
    avcodec_fill_audio_frame(&frame,
                             pv->context->channels, pv->context->sample_fmt,
                             pv->output_buf, out_size, 1);
    if (pv->avresample != NULL)
    {
        int in_linesize;
        av_samples_get_buffer_size(&in_linesize, pv->context->channels,
                                   frame.nb_samples, AV_SAMPLE_FMT_FLT, 1);
        int out_samples = avresample_convert(pv->avresample,
                                             frame.extended_data, out_linesize,
                                             frame.nb_samples,
                                             &pv->input_buf,       in_linesize,
                                             frame.nb_samples);
        if (out_samples != pv->samples_per_frame)
        {
            // we're not doing sample rate conversion, so this shouldn't happen
            hb_log("encavcodecaWork: avresample_convert() failed");
            return NULL;
        }
    }

    // Libav requires that timebase of audio frames be in sample_rate units
    frame.pts = pts + (90000 * pos / (sizeof(float) *
                                      pv->out_discrete_channels *
                                      audio->config.out.samplerate));
    frame.pts = av_rescale(frame.pts, pv->context->sample_rate, 90000);

    // Prepare output packet
    AVPacket pkt;
    int got_packet;
    hb_buffer_t *out = hb_buffer_init(pv->max_output_bytes);
    av_init_packet(&pkt);
    pkt.data = out->data;
    pkt.size = out->alloc;

    // Encode
    int ret = avcodec_encode_audio2(pv->context, &pkt, &frame, &got_packet);
    if (ret < 0)
    {
        hb_log("encavcodeca: avcodec_encode_audio failed");
        hb_buffer_close(&out);
        return NULL;
    }

    if (got_packet && pkt.size)
    {
        out->size = pkt.size;
        // The output pts from libav is in context->time_base. Convert it back
        // to our timebase.
        out->s.start     = av_rescale_q(pkt.pts, pv->context->time_base,
                                        (AVRational){1, 90000});
        out->s.duration  = (double)90000 * pv->samples_per_frame /
                                           audio->config.out.samplerate;
        out->s.stop      = out->s.start + out->s.duration;
        out->s.type      = AUDIO_BUF;
        out->s.frametype = HB_FRAME_AUDIO;
    }
    else
    {
        hb_buffer_close(&out);
        return Encode(w);
    }

    return out;
}

static hb_buffer_t * Flush( hb_work_object_t * w )
{
    hb_buffer_list_t list;
    hb_buffer_t *buf;

    hb_buffer_list_clear(&list);
    buf = Encode( w );
    while (buf != NULL)
    {
        hb_buffer_list_append(&list, buf);
        buf = Encode( w );
    }

    hb_buffer_list_append(&list, hb_buffer_eof_init());
    return hb_buffer_list_clear(&list);
}
Example #3
0
static hb_buffer_t * Encode( hb_work_object_t * w )
{
    hb_work_private_t * pv = w->private_data;
    uint64_t pts, pos;
    hb_audio_t * audio = w->audio;
    hb_buffer_t * buf;
    int ii;

    if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) )
    {
        return NULL;
    }

    hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ),
                      &pts, &pos);

    hb_chan_map_t *map = NULL;
    if ( audio->config.in.codec == HB_ACODEC_AC3 )
    {
        map = &hb_ac3_chan_map;
    }
    else if ( audio->config.in.codec == HB_ACODEC_DCA )
    {
        map = &hb_qt_chan_map;
    }
    if ( map )
    {
        int layout;
        switch (audio->config.out.mixdown)
        {
            case HB_AMIXDOWN_MONO:
                layout = HB_INPUT_CH_LAYOUT_MONO;
                break;
            case HB_AMIXDOWN_STEREO:
            case HB_AMIXDOWN_DOLBY:
            case HB_AMIXDOWN_DOLBYPLII:
                layout = HB_INPUT_CH_LAYOUT_STEREO;
                break;
            case HB_AMIXDOWN_6CH:
            default:
                layout = HB_INPUT_CH_LAYOUT_3F2R | HB_INPUT_CH_LAYOUT_HAS_LFE;
                break;
        }
        hb_layout_remap( map, &hb_smpte_chan_map, layout, 
                        (float*)pv->buf, AC3_SAMPLES_PER_FRAME);
    }
    
    for (ii = 0; ii < pv->input_samples; ii++)
    {
        // ffmpeg float samples are -1.0 to 1.0
        pv->samples[ii] = ((float*)pv->buf)[ii] / 32768.0;
    }

    buf = hb_buffer_init( pv->output_bytes );
    buf->size = avcodec_encode_audio( pv->context, buf->data, buf->alloc,
                                      (short*)pv->samples );

    buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
    buf->stop  = buf->start + 90000 * AC3_SAMPLES_PER_FRAME / audio->config.out.samplerate;

    buf->frametype = HB_FRAME_AUDIO;

    if ( !buf->size )
    {
        hb_buffer_close( &buf );
        return Encode( w );
    }
    else if (buf->size < 0)
    {
        hb_log( "encac3: avcodec_encode_audio failed" );
        hb_buffer_close( &buf );
        return NULL;
    }

    return buf;
}
static int decsrtInit( hb_work_object_t * w, hb_job_t * job )
{
    int retval = 1;
    hb_work_private_t * pv;
    hb_buffer_t *buffer;
    int i;
    hb_chapter_t * chapter;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    if( pv )
    {
        w->private_data = pv;

        pv->job = job;

        buffer = hb_buffer_init( 0 );
        hb_fifo_push( w->fifo_in, buffer);
        
        pv->current_state = k_state_potential_new_entry;
        pv->number_of_entries = 0;
        pv->last_entry_number = 0;
        pv->current_time = 0;
        pv->subtitle = w->subtitle;

        /*
         * Figure out the start and stop times from teh chapters being
         * encoded - drop subtitle not in this range.
         */
        pv->start_time = 0;
        for( i = 1; i < job->chapter_start; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->start_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }
        pv->stop_time = pv->start_time;
        for( i = job->chapter_start; i <= job->chapter_end; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->stop_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }

        hb_deep_log( 3, "SRT Start time %"PRId64", stop time %"PRId64, pv->start_time, pv->stop_time);

        pv->iconv_context = iconv_open( "utf-8", pv->subtitle->config.src_codeset );


        if( pv->iconv_context == (iconv_t) -1 )
        {
            hb_error("Could not open the iconv library with those file formats\n");

        } else {
            memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
            
            pv->file = fopen( w->subtitle->config.src_filename, "r" );
            
            if( !pv->file )
            {
                hb_error("Could not open the SRT subtitle file '%s'\n", 
                         w->subtitle->config.src_filename);
            } else {
                retval = 0;
            }
        }
    } 

    return retval;
}
Example #5
0
/**********************************************************************
 * AVIInit
 **********************************************************************
 * Allocates things, create file, initialize and write headers
 *********************************************************************/
static int AVIInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_title_t * title = job->title;

    hb_audio_t    * audio;
    hb_mux_data_t * mux_data;

    int audio_count = hb_list_count( title->list_audio );
    int is_passthru = 0;
    int is_ac3      = 0;
    int hdrl_bytes;
    int i;

    /* Allocate index */
    m->index       = hb_buffer_init( 1024 * 1024 );
    m->index->size = 0;

    /* Open destination file */
    hb_log( "muxavi: opening %s", job->file );
    m->file = fopen( job->file, "wb" );

#define m m->main_header
    /* AVI main header */
    m.FourCC           = FOURCC( "avih" );
    m.BytesCount       = sizeof( hb_avi_main_header_t ) - 8;
    m.MicroSecPerFrame = (uint64_t) 1000000 * job->vrate_base / job->vrate;
    m.Streams          = 1 + audio_count;
    m.Width            = job->width;
    m.Height           = job->height;
#undef m

    /* Video track */
    mux_data = calloc( sizeof( hb_mux_data_t ), 1 );
    job->mux_data = mux_data;

#define h mux_data->header
    /* Video stream header */
    h.FourCC     = FOURCC( "strh" );
    h.BytesCount = sizeof( hb_avi_stream_header_t ) - 8;
    h.Type       = FOURCC( "vids" );

    if( job->vcodec == HB_VCODEC_FFMPEG )
        h.Handler = FOURCC( "divx" );
    else if( job->vcodec == HB_VCODEC_X264 )
        h.Handler = FOURCC( "h264" );

    h.Scale      = job->vrate_base;
    h.Rate       = job->vrate;
#undef h

#define f mux_data->format.v
    /* Video stream format */
    f.FourCC      = FOURCC( "strf" );
    f.BytesCount  = sizeof( hb_bitmap_info_t ) - 8;
    f.Size        = f.BytesCount;
    f.Width       = job->width;
    f.Height      = job->height;
    f.Planes      = 1;
    f.BitCount    = 24;
    if( job->vcodec == HB_VCODEC_FFMPEG )
        f.Compression = FOURCC( "DX50" );
    else if( job->vcodec == HB_VCODEC_X264 )
        f.Compression = FOURCC( "H264" );
#undef f

#define g mux_data->vprp_header
    /* Vprp video stream header */	
    AVRational sample_aspect_ratio = ( AVRational ){ job->anamorphic.par_width, job->anamorphic.par_height };
    AVRational dar = av_mul_q( sample_aspect_ratio, ( AVRational ){ job->width, job->height } );
    int num, den;
    av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

    g.FourCC                = FOURCC( "vprp" );
    g.BytesCount            = sizeof( hb_avi_vprp_info_t ) - 8;
    g.VideoFormatToken      = 0;
    g.VideoStandard         = 0;
    g.dwVerticalRefreshRate = job->vrate / job->vrate_base;
    g.dwHTotalInT           = job->width;
    g.dwVTotalInLines       = job->height;
    g.dwFrameAspectRatioDen = den;
    g.dwFrameAspectRatioNum = num;
    g.dwFrameWidthInPixels  = job->width;
    g.dwFrameHeightInLines  = job->height;
    g.nbFieldPerFrame       = 1;
    g.CompressedBMHeight    = job->height;
    g.CompressedBMWidth     = job->width;
    g.ValidBMHeight         = job->height;
    g.ValidBMWidth          = job->width;
    g.ValidBMXOffset        = 0;
    g.ValidBMYOffset        = 0;
    g.VideoXOffsetInT       = 0;
    g.VideoYValidStartLine  = 0;
#undef g

    /* Audio tracks */
    for( i = 0; i < hb_list_count( title->list_audio ); i++ )
    {
        audio = hb_list_item( title->list_audio, i );

        is_ac3 = (audio->config.out.codec == HB_ACODEC_AC3);
        is_passthru = (audio->config.out.codec == HB_ACODEC_AC3) ||
                      (audio->config.out.codec == HB_ACODEC_DCA);

        mux_data = calloc( sizeof( hb_mux_data_t ), 1 );
        audio->priv.mux_data = mux_data;

#define h mux_data->header
#define f mux_data->format.a.f
#define m mux_data->format.a.m
        /* Audio stream header */
        h.FourCC        = FOURCC( "strh" );
        h.BytesCount    = sizeof( hb_avi_stream_header_t ) - 8;
        h.Type          = FOURCC( "auds" );
        h.InitialFrames = 1;
        h.Scale         = 1;
        h.Rate          = is_passthru ? ( audio->config.in.bitrate / 8 ) :
                                   ( audio->config.out.bitrate * 1000 / 8 );
        h.Quality       = 0xFFFFFFFF;
        h.SampleSize    = 1;

        /* Audio stream format */
        f.FourCC         = FOURCC( "strf" );
        if( is_passthru )
        {
            f.BytesCount     = sizeof( hb_wave_formatex_t ) - 8;
            f.FormatTag      = is_ac3 ? 0x2000 : 0x2001;
            f.Channels       = HB_INPUT_CH_LAYOUT_GET_DISCRETE_COUNT(audio->config.in.channel_layout);
            f.SamplesPerSec  = audio->config.in.samplerate;
        }
        else
        {
            f.BytesCount     = sizeof( hb_wave_formatex_t ) +
                               sizeof( hb_wave_mp3_t ) - 8;
            f.FormatTag      = 0x55;
            f.Channels       = HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT(audio->config.out.mixdown);
            f.SamplesPerSec  = audio->config.out.samplerate;
        }
        f.AvgBytesPerSec = h.Rate;
        f.BlockAlign     = 1;
        if( is_passthru )
        {
            f.Size       = 0;
        }
        else
        {
            f.Size           = sizeof( hb_wave_mp3_t );
            m.Id             = 1;
            m.Flags          = 2;
            m.BlockSize      = 1152 * f.AvgBytesPerSec / audio->config.out.samplerate;
            m.FramesPerBlock = 1;
            m.CodecDelay     = 1393;
        }
#undef h
#undef f
#undef m
    }

    hdrl_bytes =
        /* Main header */
        4 + sizeof( hb_avi_main_header_t ) +
        /* strh for video + audios */
        ( 1 + audio_count ) * ( 12 + sizeof( hb_avi_stream_header_t ) ) +
        /* video strf */
		sizeof( hb_bitmap_info_t ) +
        /* video vprp */
        ( job->anamorphic.mode ? sizeof( hb_avi_vprp_info_t ) : 0 ) +
        /* audios strf */
        audio_count * ( sizeof( hb_wave_formatex_t ) +
                        ( is_passthru ? 0 : sizeof( hb_wave_mp3_t ) ) );

    /* Here we really start to write into the file */

    /* Main headers */
    WriteInt32( m->file, FOURCC( "RIFF" ) );
    WriteInt32( m->file, 2040 );
    WriteInt32( m->file, FOURCC( "AVI " ) );
    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, hdrl_bytes );
    WriteInt32( m->file, FOURCC( "hdrl" ) );
    WriteMainHeader( m->file, &m->main_header );

    /* Video track */
    mux_data          = job->mux_data;
    mux_data->fourcc = FOURCC( "00dc" );

    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, 4 + sizeof( hb_avi_stream_header_t ) +
                sizeof( hb_bitmap_info_t )  +
                ( job->anamorphic.mode ? sizeof( hb_avi_vprp_info_t ) : 0 ) );
    WriteInt32( m->file, FOURCC( "strl" ) );
    WriteStreamHeader( m->file, &mux_data->header );
    WriteBitmapInfo( m->file, &mux_data->format.v );
    if( job->anamorphic.mode )
    {
        WriteVprpInfo( m->file, &mux_data->vprp_header );
    }

    /* Audio tracks */
    for( i = 0; i < audio_count; i++ )
    {
        char fourcc[4] = "00wb";

        audio    = hb_list_item( title->list_audio, i );
        mux_data = audio->priv.mux_data;

        fourcc[1] = '1' + i; /* This is fine as we don't allow more
                                than 8 tracks */
        mux_data->fourcc = FOURCC( fourcc );

        WriteInt32( m->file, FOURCC( "LIST" ) );
        WriteInt32( m->file, 4 + sizeof( hb_avi_stream_header_t ) +
                             sizeof( hb_wave_formatex_t ) +
                             ( is_passthru ? 0 : sizeof( hb_wave_mp3_t ) ) );
        WriteInt32( m->file, FOURCC( "strl" ) );
        WriteStreamHeader( m->file, &mux_data->header );
        WriteWaveFormatEx( m->file, &mux_data->format.a.f );
        if( !is_passthru )
        {
            WriteWaveMp3( m->file, &mux_data->format.a.m );
        }
    }

    WriteInt32( m->file, FOURCC( "JUNK" ) );
    WriteInt32( m->file, 2020 - hdrl_bytes );
    for( i = 0; i < 2020 - hdrl_bytes; i++ )
    {
        WriteInt8( m->file, 0 );
    }
    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, 4 );
    WriteInt32( m->file, FOURCC( "movi" ) );

    return 0;
}
Example #6
0
hb_buffer_t* hb_audio_resample(hb_audio_resample_t *resample,
                               uint8_t **samples, int nsamples)
{
    if (resample == NULL)
    {
        hb_error("hb_audio_resample: resample is NULL");
        return NULL;
    }
    if (resample->resample_needed && resample->avresample == NULL)
    {
        hb_error("hb_audio_resample: resample needed but libavresample context "
                 "is NULL");
        return NULL;
    }

    hb_buffer_t *out;
    int out_size, out_samples;

    if (resample->resample_needed)
    {
        int in_linesize, out_linesize;
        // set in/out linesize and out_size
        av_samples_get_buffer_size(&in_linesize,
                                   resample->resample.channels, nsamples,
                                   resample->resample.sample_fmt, 0);
        out_size = av_samples_get_buffer_size(&out_linesize,
                                              resample->out.channels, nsamples,
                                              resample->out.sample_fmt, 0);
        out = hb_buffer_init(out_size);

        out_samples = avresample_convert(resample->avresample,
                                         &out->data, out_linesize, nsamples,
                                         samples,     in_linesize, nsamples);

        if (out_samples <= 0)
        {
            if (out_samples < 0)
                hb_log("hb_audio_resample: avresample_convert() failed");
            // don't send empty buffers downstream (EOF)
            hb_buffer_close(&out);
            return NULL;
        }
        out->size = (out_samples *
                     resample->out.sample_size * resample->out.channels);
    }
    else
    {
        out_samples = nsamples;
        out_size = (out_samples *
                    resample->out.sample_size * resample->out.channels);
        out = hb_buffer_init(out_size);
        memcpy(out->data, samples[0], out_size);
    }

    /*
     * Dual Mono to Mono.
     *
     * Copy all left or right samples to the first half of the buffer and halve
     * the buffer size.
     */
    if (resample->dual_mono_downmix)
    {
        int ii, jj = !!resample->dual_mono_right_only;
        int sample_size = resample->out.sample_size;
        uint8_t *audio_samples = out->data;
        for (ii = 0; ii < out_samples; ii++)
        {
            memcpy(audio_samples + (ii * sample_size),
                   audio_samples + (jj * sample_size), sample_size);
            jj += 2;
        }
        out->size = out_samples * sample_size;
    }

    return out;
}
Example #7
0
/***********************************************************************
 * Decode
 ***********************************************************************
 *
 **********************************************************************/
static hb_buffer_t * Decode( hb_work_object_t * w )
{
    hb_work_private_t * pv = w->private_data;
    hb_buffer_t * buf;
    hb_audio_t  * audio = w->audio;
    int           i, j, k;
    int64_t       pts;
    uint64_t      upts, upos;
    int           num_blocks;

    /* Get a frame header if don't have one yet */
    if( !pv->sync )
    {
        while( hb_list_bytes( pv->list ) >= 14 )
        {
            /* We have 14 bytes, check if this is a correct header */
            hb_list_seebytes( pv->list, pv->frame, 14 );
            pv->size = dca_syncinfo( pv->state, pv->frame, &pv->flags_in, &pv->rate,
                                    &pv->bitrate, &pv->frame_length );
            if( pv->size )
            {
                /* It is. W00t. */
                if( pv->error )
                {
                    hb_log( "dca_syncinfo ok" );
                }
                pv->error = 0;
                pv->sync  = 1;
                break;
            }

            /* It is not */
            if( !pv->error )
            {
                hb_log( "dca_syncinfo failed" );
                pv->error = 1;
            }

            /* Try one byte later */
            hb_list_getbytes( pv->list, pv->frame, 1, NULL, NULL );
        }
    }

    if( !pv->sync || hb_list_bytes( pv->list ) < pv->size )
    {
        /* Need more data */
        return NULL;
    }

    /* Get the whole frame */
    hb_list_getbytes( pv->list, pv->frame, pv->size, &upts, &upos );
    pts = (int64_t)upts;

    if ( pts != pv->last_buf_pts )
    {
        pv->last_buf_pts = pts;
    }
    else
    {
        // spec says that the PTS is the start time of the first frame
        // that starts in the PES frame so we only use the PTS once then
        // get the following frames' PTS from the frame length.
        pts = -1;
    }

    // mkv files typically use a 1ms timebase which results in a lot of
    // truncation error in their timestamps. Also, TSMuxer or something
    // in the m2ts-to-mkv toolchain seems to take a very casual attitude
    // about time - timestamps seem to randomly offset by ~40ms for a few
    // seconds then recover. So, if the pts we got is within 50ms of the
    // pts computed from the data stream, use the data stream pts.
    if ( pts == -1 || ( pv->next_pts && fabs( pts - pv->next_pts ) < 50.*90. ) )
    {
        pts = pv->next_pts;
    }

    double frame_dur = (double)(pv->frame_length & ~0xFF) / (double)pv->rate * 90000.;

    /* DCA passthrough: don't decode the DCA frame */
    if( audio->config.out.codec == HB_ACODEC_DCA_PASS )
    {
        buf = hb_buffer_init( pv->size );
        memcpy( buf->data, pv->frame, pv->size );
        buf->s.start = pts;
        buf->s.duration = frame_dur;
        pv->next_pts = pts + frame_dur;
        buf->s.stop  = pv->next_pts;
        pv->sync = 0;
        return buf;
    }

    /* Feed libdca */
    dca_frame( pv->state, pv->frame, &pv->flags_out, &pv->level, 0 );

    /* find out how many blocks are in this frame */
    num_blocks = dca_blocks_num( pv->state );

    /* num_blocks blocks per frame, 256 samples per block, channelsused channels */
    int nsamp = num_blocks * 256;
    frame_dur = (double)nsamp / (double)pv->rate * 90000.;
    buf = hb_buffer_init( nsamp * pv->out_discrete_channels * sizeof( float ) );

    buf->s.start = pts;
    buf->s.duration = frame_dur;
    pv->next_pts = pts + frame_dur;
    buf->s.stop  = pv->next_pts;

    for( i = 0; i < num_blocks; i++ )
    {
        dca_sample_t * samples_in;
        float    * samples_out;

        dca_block( pv->state );
        samples_in  = dca_samples( pv->state );
        samples_out = ((float *) buf->data) + 256 * pv->out_discrete_channels * i;

        /* Interleave */
        for( j = 0; j < 256; j++ )
        {
            for ( k = 0; k < pv->out_discrete_channels; k++ )
            {
                samples_out[(pv->out_discrete_channels*j)+k]   = samples_in[(256*k)+j];
            }
        }

    }

    pv->sync = 0;
    return buf;
}
Example #8
0
void hb_srt_to_ssa(hb_buffer_t *sub_in, int line)
{
    if (sub_in->size == 0)
        return;

    // null terminate input if not already terminated
    if (sub_in->data[sub_in->size-1] != 0)
    {
        hb_buffer_realloc(sub_in, ++sub_in->size);
        sub_in->data[sub_in->size - 1] = 0;
    }
    char * srt = (char*)sub_in->data;
    // SSA markup expands a little over SRT, so allocate a bit of extra
    // space.  More will be realloc'd if needed.
    hb_buffer_t * sub = hb_buffer_init(sub_in->size + 80);
    char * ssa, *ssa_markup;
    int skip, len, pos, ii;

    // Exchange data between input sub and new ssa_sub
    // After this, sub_in contains ssa data
    hb_buffer_swap_copy(sub_in, sub);
    ssa = (char*)sub_in->data;

    sprintf((char*)sub_in->data, "%d,,Default,,0,0,0,,", line);
    pos = strlen((char*)sub_in->data);

    ii = 0;
    while (srt[ii] != '\0')
    {
        if ((ssa_markup = srt_markup_to_ssa(srt + ii, &skip)) != NULL)
        {
            len = strlen(ssa_markup);
            hb_buffer_realloc(sub_in, pos + len + 1);
            // After realloc, sub_in->data may change
            ssa = (char*)sub_in->data;
            sprintf(ssa + pos, "%s", ssa_markup);
            free(ssa_markup);
            pos += len;
            ii += skip;
        }
        else
        {
            hb_buffer_realloc(sub_in, pos + 4);
            // After realloc, sub_in->data may change
            ssa = (char*)sub_in->data;
            if (srt[ii] == '\r')
            {
                if (srt[ii + 1] == '\n')
                {
                    ii++;
                }
                if (srt[ii + 1] != 0)
                {
                    ssa[pos++] = '\\';
                    ssa[pos++] = 'N';
                }
                ii++;
            }
            else if (srt[ii] == '\n')
            {
                if (srt[ii + 1] != 0)
                {
                    ssa[pos++] = '\\';
                    ssa[pos++] = 'N';
                }
                ii++;
            }
            else
            {
                ssa[pos++] = srt[ii++];
            }
        }
    }
    ssa[pos] = '\0';
    sub_in->size = pos + 1;
    hb_buffer_close(&sub);
}
Example #9
0
/***********************************************************************
 * ReaderFunc
 ***********************************************************************
 *
 **********************************************************************/
void ReadLoop( void * _w )
{
    hb_work_object_t * w = _w;
    hb_work_private_t  * r = w->private_data;
    hb_fifo_t   ** fifos;
    hb_buffer_t  * buf;
    hb_list_t    * list;
    int            n;
    int            chapter = -1;
    int            chapter_end = r->job->chapter_end;
    uint8_t        done = 0;

    if (r->bd)
    {
        if( !hb_bd_start( r->bd, r->title ) )
        {
            hb_bd_close( &r->bd );
            return;
        }
        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_bd_seek( r->bd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
        else if ( r->job->pts_to_start )
        {
            // Note, bd seeks always put us to an i-frame.  no need
            // to start decoding early using r->pts_to_start
            hb_bd_seek_pts( r->bd, r->job->pts_to_start );
            r->duration -= r->job->pts_to_start;
            r->job->pts_to_start = 0;
            r->start_found = 1;
        }
        else
        {
            hb_bd_seek_chapter( r->bd, r->job->chapter_start );
        }
        if (r->job->angle > 1)
        {
            hb_bd_set_angle( r->bd, r->job->angle - 1 );
        }
    }
    else if (r->dvd)
    {
        /*
         * XXX this code is a temporary hack that should go away if/when
         *     chapter merging goes away in libhb/dvd.c
         * map the start and end chapter numbers to on-media chapter
         * numbers since chapter merging could cause the handbrake numbers
         * to diverge from the media numbers and, if our chapter_end is after
         * a media chapter that got merged, we'll stop ripping too early.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->job->list_chapter, chapter_end - 1 );

        chapter_end = chap->index;
        if (start > 1)
        {
           chap = hb_list_item( r->job->list_chapter, start - 1 );
           start = chap->index;
        }
        /* end chapter mapping XXX */

        if( !hb_dvd_start( r->dvd, r->title, start ) )
        {
            hb_dvd_close( &r->dvd );
            return;
        }
        if (r->job->angle)
        {
            hb_dvd_set_angle( r->dvd, r->job->angle );
        }

        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_dvd_seek( r->dvd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
    }
    else if ( r->stream && r->job->start_at_preview )
    {
        
        // XXX code from DecodePreviews - should go into its own routine
        hb_stream_seek( r->stream, (float)( r->job->start_at_preview - 1 ) /
                        ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );

    } 
    else if ( r->stream && r->job->pts_to_start )
    {
        int64_t pts_to_start = r->job->pts_to_start;
        
        // Find out what the first timestamp of the stream is
        // and then seek to the appropriate offset from it
        if ( ( buf = hb_stream_read( r->stream ) ) )
        {
            if ( buf->s.start > 0 )
            {
                pts_to_start += buf->s.start;
            }
        }
        
        if ( hb_stream_seek_ts( r->stream, pts_to_start ) >= 0 )
        {
            // Seek takes us to the nearest I-frame before the timestamp
            // that we want.  So we will retrieve the start time of the
            // first packet we get, subtract that from pts_to_start, and
            // inspect the reset of the frames in sync.
            r->start_found = 2;
            r->duration -= r->job->pts_to_start;
            r->job->pts_to_start = pts_to_start;
        }
    } 
    else if( r->stream )
    {
        /*
         * Standard stream, seek to the starting chapter, if set, and track the
         * end chapter so that we end at the right time.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->job->list_chapter, chapter_end - 1 );
        
        chapter_end = chap->index;
        if (start > 1)
        {
            chap = hb_list_item( r->job->list_chapter, start - 1 );
            start = chap->index;
        }
        
        /*
         * Seek to the start chapter.
         */
        hb_stream_seek_chapter( r->stream, start );
    }

    list  = hb_list_init();

    while(!*r->die && !r->job->done && !done)
    {
        if (r->bd)
            chapter = hb_bd_chapter( r->bd );
        else if (r->dvd)
            chapter = hb_dvd_chapter( r->dvd );
        else if (r->stream)
            chapter = hb_stream_chapter( r->stream );

        if( chapter < 0 )
        {
            hb_log( "reader: end of the title reached" );
            break;
        }
        if( chapter > chapter_end )
        {
            hb_log( "reader: end of chapter %d (media %d) reached at media chapter %d",
                    r->job->chapter_end, chapter_end, chapter );
            break;
        }

        if (r->bd)
        {
          if( (buf = hb_bd_read( r->bd )) == NULL )
          {
              break;
          }
        }
        else if (r->dvd)
        {
          if( (buf = hb_dvd_read( r->dvd )) == NULL )
          {
              break;
          }
        }
        else if (r->stream)
        {
          if ( (buf = hb_stream_read( r->stream )) == NULL )
          {
            break;
          }
          if ( r->start_found == 2 )
          {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if ( buf->s.start > 0 && buf->s.start < r->job->pts_to_start )
            {
                r->job->pts_to_start -= buf->s.start;
            }
            else if ( buf->s.start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
                r->start_found = 1;
            }
          }
        }

        (hb_demux[r->title->demuxer])( buf, list, &r->demux );

        while( ( buf = hb_list_item( list, 0 ) ) )
        {
            hb_list_rem( list, buf );
            fifos = GetFifoForId( r, buf->s.id );

            if ( fifos && ! r->saw_video && !r->job->indepth_scan )
            {
                // The first data packet with a PTS from an audio or video stream
                // that we're decoding defines 'time zero'. Discard packets until
                // we get one.
                if ( buf->s.start != -1 && buf->s.renderOffset != -1 &&
                     ( buf->s.id == r->title->video_id || is_audio( r, buf->s.id ) ) )
                {
                    // force a new scr offset computation
                    r->scr_changes = r->demux.scr_changes - 1;
                    // create a stream state if we don't have one so the
                    // offset will get computed correctly.
                    id_to_st( r, buf, 1 );
                    r->saw_video = 1;
                    hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
                            r->demux.last_scr, buf->s.id, buf->s.renderOffset );
                }
                else
                {
                    fifos = NULL;
                }
            }

            if ( r->job->indepth_scan || fifos )
            {
                if ( buf->s.renderOffset != -1 )
                {
                    if ( r->scr_changes != r->demux.scr_changes )
                    {
                        // This is the first audio or video packet after an SCR
                        // change. Compute a new scr offset that would make this
                        // packet follow the last of this stream with the 
                        // correct average spacing.
                        stream_timing_t *st = id_to_st( r, buf, 0 );

                        // if this is the video stream and we don't have
                        // audio yet or this is an audio stream
                        // generate a new scr
                        if ( st->is_audio ||
                             ( st == r->stream_timing && !r->saw_audio ) )
                        {
                            new_scr_offset( r, buf );
                        }
                        else
                        {
                            // defer the scr change until we get some
                            // audio since audio has a timestamp per
                            // frame but video & subtitles don't. Clear
                            // the timestamps so the decoder will generate
                            // them from the frame durations.
                            buf->s.start = -1;
                            buf->s.renderOffset = -1;
                        }
                    }
                }
                if ( buf->s.start != -1 )
                {
                    int64_t start = buf->s.start - r->scr_offset;

                    if (!r->start_found || r->job->indepth_scan)
                    {
                        UpdateState( r, start );
                    }

                    if (r->job->indepth_scan && r->job->pts_to_stop &&
                        start >= r->pts_to_start + r->job->pts_to_stop)
                    {
                        // sync normally would terminate p-to-p
                        // but sync doesn't run during indepth scan
                        hb_log( "reader: reached pts %"PRId64", exiting early", start );
                        done = 1;
                        break;
                    }

                    if ( !r->start_found &&
                        start >= r->pts_to_start )
                    {
                        // pts_to_start point found
                        r->start_found = 1;
                    }
                    // This log is handy when you need to debug timing problems
                    //hb_log("id %x scr_offset %"PRId64
                    //       " start %"PRId64" --> %"PRId64"", 
                    //        buf->s.id, r->scr_offset, buf->s.start, 
                    //        buf->s.start - r->scr_offset);
                    buf->s.start -= r->scr_offset;
                }
                if ( buf->s.renderOffset != -1 )
                {
                    // This packet is referenced to the same SCR as the last.
                    // Adjust timestamp to remove the System Clock Reference
                    // offset then update the average inter-packet time
                    // for this stream.
                    buf->s.renderOffset -= r->scr_offset;
                    update_ipt( r, buf );
                }
#if 0
                // JAS: This was added to fix a rare "audio time went backward"
                // sync error I found in one sample.  But it has a bad side
                // effect on DVDs, causing frequent "adding silence" sync
                // errors. So I am disabling it.
                else
                {
                    update_ipt( r, buf );
                }
#endif
            }
            if( fifos )
            {
                if ( !r->start_found )
                {
                    hb_buffer_close( &buf );
                    continue;
                }

                buf->sequence = r->sequence++;
                /* if there are mutiple output fifos, send a copy of the
                 * buffer down all but the first (we have to not ship the
                 * original buffer or we'll race with the thread that's
                 * consuming the buffer & inject garbage into the data stream). */
                for( n = 1; fifos[n] != NULL; n++)
                {
                    hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
                    buf_copy->s = buf->s;
                    memcpy( buf_copy->data, buf->data, buf->size );
                    push_buf( r, fifos[n], buf_copy );
                }
                push_buf( r, fifos[0], buf );
            }
            else
            {
                hb_buffer_close( &buf );
            }
        }
    }

    // send empty buffers downstream to video & audio decoders to signal we're done.
    if( !*r->die && !r->job->done )
    {
        push_buf( r, r->job->fifo_mpeg2, hb_buffer_init(0) );

        hb_audio_t *audio;
        for( n = 0; (audio = hb_list_item( r->job->list_audio, n)); ++n )
        {
            if ( audio->priv.fifo_in )
                push_buf( r, audio->priv.fifo_in, hb_buffer_init(0) );
        }

        hb_subtitle_t *subtitle;
        for( n = 0; (subtitle = hb_list_item( r->job->list_subtitle, n)); ++n )
        {
            if ( subtitle->fifo_in && subtitle->source == VOBSUB)
                push_buf( r, subtitle->fifo_in, hb_buffer_init(0) );
        }
    }

    hb_list_empty( &list );

    hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
    if ( r->demux.dts_drops )
    {
        hb_log( "reader: %d drops because DTS out of range", r->demux.dts_drops );
    }
}
Example #10
0
static hb_buffer_t *tx3g_decode_to_utf8( hb_buffer_t *in )
{
    uint8_t *pos = in->data;
    uint8_t *end = in->data + in->size;
    
    uint16_t numStyleRecords = 0;
    
    uint8_t *startStyle;
    uint8_t *endStyle;
    
    /*
     * Parse the packet as a TX3G TextSample.
     * 
     * Look for a single StyleBox ('styl') and read all contained StyleRecords.
     * Ignore all other box types.
     * 
     * NOTE: Buffer overflows on read are not checked.
     */
    uint16_t textLength = READ_U16();
    uint8_t *text = READ_ARRAY(textLength);
    startStyle = calloc( textLength, 1 );
    endStyle = calloc( textLength, 1 );
    while ( pos < end ) {
        /*
         * Read TextSampleModifierBox
         */
        uint32_t size = READ_U32();
        if ( size == 0 ) {
            size = pos - end;   // extends to end of packet
        }
        if ( size == 1 ) {
            hb_log( "dectx3gsub: TextSampleModifierBox has unsupported large size" );
            break;
        }
        uint32_t type = READ_U32();
        if ( type == FOURCC("uuid") ) {
            hb_log( "dectx3gsub: TextSampleModifierBox has unsupported extended type" );
            break;
        }
        
        if ( type == FOURCC("styl") ) {
            // Found a StyleBox. Parse the contained StyleRecords
            
            if ( numStyleRecords != 0 ) {
                hb_log( "dectx3gsub: found additional StyleBoxes on subtitle; skipping" );
                SKIP_ARRAY(size);
                continue;
            }
            
            numStyleRecords = READ_U16();
            
            int i;
            for (i=0; i<numStyleRecords; i++) {
                StyleRecord curRecord;
                curRecord.startChar         = READ_U16();
                curRecord.endChar           = READ_U16();
                curRecord.fontID            = READ_U16();
                curRecord.faceStyleFlags    = READ_U8();
                curRecord.fontSize          = READ_U8();
                curRecord.textColorRGBA     = READ_U32();
                
                startStyle[curRecord.startChar] |= curRecord.faceStyleFlags;
                endStyle[curRecord.endChar]     |= curRecord.faceStyleFlags;
            }
        } else {
            // Found some other kind of TextSampleModifierBox. Skip it.
            SKIP_ARRAY(size);
        }
    }
    
    /*
     * Copy text to output buffer, and add HTML markup for the style records
     */
    int maxOutputSize = textLength + (numStyleRecords * NUM_FACE_STYLE_FLAGS * (MAX_OPEN_TAG_SIZE + MAX_CLOSE_TAG_SIZE));
    hb_buffer_t *out = hb_buffer_init( maxOutputSize );
    if ( out == NULL )
        goto fail;
    uint8_t *dst = out->data;
    int charIndex = 0;
    for ( pos = text, end = text + textLength; pos < end; pos++ ) {
        if (IS_10xxxxxx(*pos)) {
            // Is a non-first byte of a multi-byte UTF-8 character
            WRITE_CHAR(*pos);
            continue;   // ...without incrementing 'charIndex'
        }
        
        uint8_t plusStyles = startStyle[charIndex];
        uint8_t minusStyles = endStyle[charIndex];
        
        if (minusStyles & UNDERLINE)
            WRITE_END_TAG('u');
        if (minusStyles & ITALIC)
            WRITE_END_TAG('i');
        if (minusStyles & BOLD)
            WRITE_END_TAG('b');
        
        if (plusStyles & BOLD)
            WRITE_START_TAG('b');
        if (plusStyles & ITALIC)
            WRITE_START_TAG('i');
        if (plusStyles & UNDERLINE)
            WRITE_START_TAG('u');
        
        WRITE_CHAR(*pos);
        charIndex++;
    }
    
    // Trim output buffer to the actual amount of data written
    out->size = dst - out->data;
    
    // Copy metadata from the input packet to the output packet
    out->s.start = in->s.start;
    out->s.stop = in->s.stop;
    
fail:
    free( startStyle );
    free( endStyle );
    
    return out;
}
Example #11
0
static int reader_work( hb_work_object_t * w, hb_buffer_t ** buf_in,
                        hb_buffer_t ** buf_out)
{
    hb_work_private_t  * r = w->private_data;
    hb_fifo_t         ** fifos;
    hb_buffer_t        * buf;
    hb_buffer_list_t     list;
    int                  ii, chapter = -1;

    hb_buffer_list_clear(&list);

    if (r->bd)
        chapter = hb_bd_chapter( r->bd );
    else if (r->dvd)
        chapter = hb_dvd_chapter( r->dvd );
    else if (r->stream)
        chapter = hb_stream_chapter( r->stream );

    if( chapter < 0 )
    {
        hb_log( "reader: end of the title reached" );
        reader_send_eof(r);
        return HB_WORK_DONE;
    }
    if( chapter > r->chapter_end )
    {
        hb_log("reader: end of chapter %d (media %d) reached at media chapter %d",
                r->job->chapter_end, r->chapter_end, chapter);
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    if (r->bd)
    {
        if( (buf = hb_bd_read( r->bd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->dvd)
    {
        if( (buf = hb_dvd_read( r->dvd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->stream)
    {
        if ( (buf = hb_stream_read( r->stream )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else
    {
        // This should never happen
        hb_error("Stream not initialized");
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    (hb_demux[r->title->demuxer])(buf, &list, &r->demux);

    while ((buf = hb_buffer_list_rem_head(&list)) != NULL)
    {
        fifos = GetFifoForId( r, buf->s.id );
        if (fifos && r->stream && !r->start_found)
        {
            // libav is allowing SSA subtitles to leak through that are
            // prior to the seek point.  So only make the adjustment to
            // pts_to_start after we see the next video buffer.
            if (buf->s.id != r->job->title->video_id)
            {
                hb_buffer_close(&buf);
                continue;
            }
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.start < r->job->pts_to_start)
            {
                r->job->pts_to_start -= buf->s.start;
            }
            else if ( buf->s.start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
            }
            r->start_found = 1;
        }

        if (buf->s.start   != AV_NOPTS_VALUE &&
            r->scr_changes != r->demux.scr_changes)
        {
            // First valid timestamp after an SCR change.  Update
            // the per-stream scr sequence number
            r->scr_changes = r->demux.scr_changes;

            // libav tries to be too smart with timestamps and
            // enforces unnecessary conditions.  One such condition
            // is that subtitle timestamps must be monotonically
            // increasing.  To ensure this is the case, we calculate
            // an offset upon each SCR change that will guarantee this.
            // This is just a very rough SCR offset.  A fine grained
            // offset that maintains proper sync is calculated in sync.c
            if (r->last_pts != AV_NOPTS_VALUE)
            {
                r->scr_offset  = r->last_pts + 90000 - buf->s.start;
            }
            else
            {
                r->scr_offset  = -buf->s.start;
            }
        }
        // Set the scr sequence that this buffer's timestamps are
        // referenced to.
        buf->s.scr_sequence = r->scr_changes;
        if (buf->s.start != AV_NOPTS_VALUE)
        {
            buf->s.start += r->scr_offset;
        }
        if (buf->s.renderOffset != AV_NOPTS_VALUE)
        {
            buf->s.renderOffset += r->scr_offset;
        }
        if (buf->s.start > r->last_pts)
        {
            r->last_pts = buf->s.start;
            UpdateState(r);
        }

        buf = splice_discontinuity(r, buf);
        if (fifos && buf != NULL)
        {
            /* if there are mutiple output fifos, send a copy of the
             * buffer down all but the first (we have to not ship the
             * original buffer or we'll race with the thread that's
             * consuming the buffer & inject garbage into the data stream). */
            for (ii = 1; fifos[ii] != NULL; ii++)
            {
                hb_buffer_t *buf_copy = hb_buffer_init(buf->size);
                buf_copy->s = buf->s;
                memcpy(buf_copy->data, buf->data, buf->size);
                push_buf(r, fifos[ii], buf_copy);
            }
            push_buf(r, fifos[0], buf);
            buf = NULL;
        }
        else
        {
            hb_buffer_close(&buf);
        }
    }

    hb_buffer_list_close(&list);
    return HB_WORK_OK;
}
Example #12
0
static hb_buffer_t * Encode( hb_work_object_t * w )
{
    hb_work_private_t * pv = w->private_data;
    uint64_t pts, pos;
    hb_audio_t * audio = w->audio;
    hb_buffer_t * buf;

    if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) )
    {
        return NULL;
    }

    hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ),
                      &pts, &pos);

    // XXX: ffaac fails to remap from the internal libav* channel map (SMPTE) to the native AAC channel map
    //      do it here - this hack should be removed if Libav fixes the bug
    hb_chan_map_t * out_map = ( w->codec_param == CODEC_ID_AAC ) ? &hb_qt_chan_map : &hb_smpte_chan_map;

    if ( audio->config.in.channel_map != out_map )
    {
        hb_layout_remap( audio->config.in.channel_map, out_map, pv->layout,
                         (float*)pv->buf, pv->samples_per_frame );
    }

    // Do we need to convert our internal float format?
    if ( pv->context->sample_fmt != AV_SAMPLE_FMT_FLT )
    {
        int isamp, osamp;
        AVAudioConvert *ctx;

        isamp = av_get_bytes_per_sample( AV_SAMPLE_FMT_FLT );
        osamp = av_get_bytes_per_sample( pv->context->sample_fmt );
        ctx = av_audio_convert_alloc( pv->context->sample_fmt, 1,
                                      AV_SAMPLE_FMT_FLT, 1,
                                      NULL, 0 );

        // get output buffer size then malloc a buffer
        //nsamples = out_size / isamp;
        //buffer = av_malloc( nsamples * sizeof(hb_sample_t) );

        // we're doing straight sample format conversion which 
        // behaves as if there were only one channel.
        const void * const ibuf[6] = { pv->buf };
        void * const obuf[6] = { pv->buf };
        const int istride[6] = { isamp };
        const int ostride[6] = { osamp };

        av_audio_convert( ctx, obuf, ostride, ibuf, istride, pv->input_samples );
        av_audio_convert_free( ctx );
    }
    
    buf = hb_buffer_init( pv->output_bytes );
    buf->size = avcodec_encode_audio( pv->context, buf->data, buf->alloc,
                                      (short*)pv->buf );

    buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
    buf->stop  = buf->start + 90000 * pv->samples_per_frame / audio->config.out.samplerate;

    buf->frametype = HB_FRAME_AUDIO;

    if ( !buf->size )
    {
        hb_buffer_close( &buf );
        return Encode( w );
    }
    else if (buf->size < 0)
    {
        hb_log( "encavcodeca: avcodec_encode_audio failed" );
        hb_buffer_close( &buf );
        return NULL;
    }

    return buf;
}
Example #13
0
/***********************************************************************
 * SyncAudio
 ***********************************************************************
 *
 **********************************************************************/
static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
                       hb_buffer_t ** buf_out )
{
    hb_work_private_t * pv = w->private_data;
    hb_job_t        * job = pv->job;
    hb_sync_audio_t * sync = &pv->type.audio;
    hb_buffer_t     * buf;
    int64_t start;

    *buf_out = NULL;
    buf = *buf_in;
    *buf_in = NULL;
    /* if the next buffer is an eof send it downstream */
    if ( buf->size <= 0 )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        pv->common->first_pts[sync->index+1] = INT64_MAX - 1;
        return HB_WORK_DONE;
    }

    /* Wait till we can determine the initial pts of all streams */
    if( pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[sync->index+1] = buf->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if (hb_fifo_is_full(w->fifo_in))
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    /* Wait for start frame if doing point-to-point */
    hb_lock( pv->common->mutex );
    start = buf->s.start - pv->common->audio_pts_slip;
    while ( !pv->common->start_found )
    {
        if ( pv->common->audio_pts_thresh < 0 )
        {
            // I would initialize this in hb_sync_init, but 
            // job->pts_to_start can be modified by reader 
            // after hb_sync_init is called.
            pv->common->audio_pts_thresh = job->pts_to_start;
        }
        if ( buf->s.start < pv->common->audio_pts_thresh )
        {
            hb_buffer_close( &buf );
            hb_unlock( pv->common->mutex );
            return HB_WORK_OK;
        }
        while ( !pv->common->start_found && 
                buf->s.start >= pv->common->audio_pts_thresh )
        {
            hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 );
            // There is an unfortunate unavoidable deadlock that can occur.
            // Since we need to wait for a specific frame in syncVideoWork,
            // syncAudioWork can be stalled indefinitely.  The video decoder
            // often drops multiple of the initial frames after starting
            // because they require references that have not been decoded yet.
            // This allows a lot of audio to be queued in the fifo and the
            // audio fifo fills before we get a single video frame.  So we
            // must drop some audio to unplug the pipeline and allow the first
            // video frame to be decoded.
            if ( hb_fifo_is_full(w->fifo_in) )
            {
                hb_buffer_t *tmp;
                tmp = buf = hb_fifo_get( w->fifo_in );
                while ( tmp )
                {
                    tmp = hb_fifo_get( w->fifo_in );
                    if ( tmp )
                    {
                        hb_buffer_close( &buf );
                        buf = tmp;
                    }
                }
            }
        }
        start = buf->s.start - pv->common->audio_pts_slip;
    }
    if ( start < 0 )
    {
        hb_buffer_close( &buf );
        hb_unlock( pv->common->mutex );
        return HB_WORK_OK;
    }
    hb_unlock( pv->common->mutex );

    if( job->frame_to_stop && pv->common->count_frames >= job->frame_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    // audio time went backwards.
    // If our output clock is more than a half frame ahead of the
    // input clock drop this frame to move closer to sync.
    // Otherwise drop frames until the input clock matches the output clock.
    if ( sync->next_start - start > 90*15 )
    {
        // Discard data that's in the past.
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = start;
        }
        ++sync->drop_count;
        hb_buffer_close( &buf );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        // we were dropping old data but input buf time is now current
        hb_log( "sync: audio 0x%x time went backwards %d ms, dropped %d frames "
                "(start %"PRId64", next %"PRId64")", w->audio->id,
                (int)( sync->next_start - sync->first_drop ) / 90,
                sync->drop_count, sync->first_drop, (int64_t)sync->next_start );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }
    if ( start - sync->next_start >= (90 * 70) )
    {
        if ( start - sync->next_start > (90000LL * 60) )
        {
            // there's a gap of more than a minute between the last
            // frame and this. assume we got a corrupted timestamp
            // and just drop the next buf.
            hb_log( "sync: %d minute time gap in audio 0x%x - dropping buf"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / (90000*60)),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_buffer_close( &buf );
            return HB_WORK_OK;
        }
        /*
         * there's a gap of at least 70ms between the last
         * frame we processed & the next. Fill it with silence.
         * Or in the case of DCA, skip some frames from the
         * other streams.
         */
        if ( sync->drop_video_to_sync )
        {
            hb_log( "sync: audio gap %d ms. Skipping frames. Audio 0x%x"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / 90),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_lock( pv->common->mutex );
            pv->common->audio_pts_slip += (start - sync->next_start);
            pv->common->video_pts_slip += (start - sync->next_start);
            hb_unlock( pv->common->mutex );
            *buf_out = OutputAudioFrame( w->audio, buf, sync );
            return HB_WORK_OK;
        }
        hb_log( "sync: adding %d ms of silence to audio 0x%x"
                "  start %"PRId64", next %"PRId64,
                (int)((start - sync->next_start) / 90),
                w->audio->id, start, (int64_t)sync->next_start );
        InsertSilence( w, start - sync->next_start );
    }

    /*
     * When we get here we've taken care of all the dups and gaps in the
     * audio stream and are ready to inject the next input frame into
     * the output stream.
     */
    *buf_out = OutputAudioFrame( w->audio, buf, sync );
    return HB_WORK_OK;
}
Example #14
0
/***********************************************************************
 * syncVideoWork
 ***********************************************************************
 *
 **********************************************************************/
int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
              hb_buffer_t ** buf_out )
{
    hb_buffer_t * cur, * next, * sub = NULL;
    hb_work_private_t * pv = w->private_data;
    hb_job_t          * job = pv->job;
    hb_subtitle_t     * subtitle;
    hb_sync_video_t   * sync = &pv->type.video;
    int i;
    int64_t next_start;

    *buf_out = NULL;
    next = *buf_in;
    *buf_in = NULL;

    /* Wait till we can determine the initial pts of all streams */
    if( next->size != 0 && pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[0] = next->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if ( hb_fifo_is_full( job->fifo_raw ) )
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    hb_lock( pv->common->mutex );
    next_start = next->s.start - pv->common->video_pts_slip;
    hb_unlock( pv->common->mutex );

    /* Wait for start of point-to-point encoding */
    if( !pv->common->start_found )
    {
        hb_sync_video_t   * sync = &pv->type.video;

        if( next->size == 0 )
        {
            *buf_out = next;
            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        if ( pv->common->count_frames < job->frame_to_start ||
             next->s.start < job->pts_to_start )
        {
            // Flush any subtitles that have pts prior to the
            // current frame
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) )
                {
                    if ( sub->s.start > next->s.start )
                        break;
                    sub = hb_fifo_get( subtitle->fifo_raw );
                    hb_buffer_close( &sub );
                }
            }
            hb_lock( pv->common->mutex );
            // Tell the audio threads what must be dropped
            pv->common->audio_pts_thresh = next_start + pv->common->video_pts_slip;
            hb_cond_broadcast( pv->common->next_frame );
            hb_unlock( pv->common->mutex );

            UpdateSearchState( w, next_start );
            hb_buffer_close( &next );

            return HB_WORK_OK;
        }
        hb_lock( pv->common->mutex );
        pv->common->audio_pts_thresh = 0;
        pv->common->audio_pts_slip += next_start;
        pv->common->video_pts_slip += next_start;
        next_start = 0;
        pv->common->start_found = 1;
        pv->common->count_frames = 0;
        hb_cond_broadcast( pv->common->next_frame );
        hb_unlock( pv->common->mutex );
        sync->st_first = 0;
    }

    if( !sync->cur )
    {
        sync->cur = next;
        if (next->size == 0)
        {
            /* we got an end-of-stream as our first video packet? 
             * Feed it downstream & signal that we're done. 
             */
            *buf_out = next;
            sync->cur = NULL;

            pv->common->start_found = 1;
            pv->common->first_pts[0] = INT64_MAX - 1;
            hb_cond_broadcast( pv->common->next_frame );

            /*
             * Push through any subtitle EOFs in case they 
             * were not synced through.
             */
            for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
            {
                subtitle = hb_list_item( job->list_subtitle, i );
                if( subtitle->config.dest == PASSTHRUSUB )
                {
                    hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
                }
            }
            return HB_WORK_DONE;
        }
        return HB_WORK_OK;
    }
    cur = sync->cur;
    /* At this point we have a frame to process. Let's check
        1) if we will be able to push into the fifo ahead
        2) if the next frame is there already, since we need it to
           compute the duration of the current frame*/
    if( next->size == 0 )
    {
        hb_buffer_close( &next );

        pv->common->first_pts[0] = INT64_MAX - 1;
        cur->s.start = sync->next_start;
        cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base);
        sync->next_start += cur->s.stop - cur->s.start;;

        /* Make sure last frame is reflected in frame count */
        pv->common->count_frames++;

        /* Push the frame to the renderer */
        *buf_out = cur;
        sync->cur = NULL;

        /* we got an end-of-stream. Feed it downstream & signal that
         * we're done. Note that this means we drop the final frame of
         * video (we don't know its duration). On DVDs the final frame
         * is often strange and dropping it seems to be a good idea. */
        (*buf_out)->next = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        pv->common->start_found = 1;
        hb_cond_broadcast( pv->common->next_frame );
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point frame encoding */
    if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );
        hb_log( "sync: reached %d frames, exiting early",
                pv->common->count_frames );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    /* Check for end of point-to-point pts encoding */
    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        // Drop an empty buffer into our output to ensure that things
        // get flushed all the way out.
        hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start );
        hb_buffer_close( &sync->cur );
        hb_buffer_close( &next );
        *buf_out = hb_buffer_init( 0 );

        /*
         * Push through any subtitle EOFs in case they were not synced through.
         */
        for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
        {
            subtitle = hb_list_item( job->list_subtitle, i );
            if( subtitle->config.dest == PASSTHRUSUB )
            {
                hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
            }
        }
        return HB_WORK_DONE;
    }

    if( sync->first_frame )
    {
        /* This is our first frame */
        if ( cur->s.start > 0 )
        {
            /*
             * The first pts from a dvd should always be zero but
             * can be non-zero with a transport or program stream since
             * we're not guaranteed to start on an IDR frame. If we get
             * a non-zero initial PTS extend its duration so it behaves
             * as if it started at zero so that our audio timing will
             * be in sync.
             */
            hb_log( "sync: first pts is %"PRId64, cur->s.start );
            cur->s.start = 0;
        }
        sync->first_frame = 0;
    }

    /*
     * since the first frame is always 0 and the upstream reader code
     * is taking care of adjusting for pts discontinuities, we just have
     * to deal with the next frame's start being in the past. This can
     * happen when the PTS is adjusted after data loss but video frame
     * reordering causes some frames with the old clock to appear after
     * the clock change. This creates frames that overlap in time which
     * looks to us like time going backward. The downstream muxing code
     * can deal with overlaps of up to a frame time but anything larger
     * we handle by dropping frames here.
     */
    if ( next_start - cur->s.start <= 0 )
    {
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = next_start;
        }
        ++sync->drop_count;
        if ( next->s.new_chap )
        {
            // don't drop a chapter mark when we drop the buffer
            sync->chap_mark = next->s.new_chap;
        }
        hb_buffer_close( &next );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        hb_log( "sync: video time didn't advance - dropped %d frames "
                "(delta %d ms, current %"PRId64", next %"PRId64", dur %d)",
                sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90,
                cur->s.start, next_start, (int)( next_start - cur->s.start ) );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }

    /*
     * Track the video sequence number locally so that we can sync the audio
     * to it using the sequence number as well as the PTS.
     */
    sync->video_sequence = cur->sequence;
    
    /* Process subtitles that apply to this video frame */
    // NOTE: There is no logic in either subtitle-sync algorithm that waits
    // for the subtitle-decoder if it is lagging behind the video-decoder.
    //       
    // Therefore there is the implicit assumption that the subtitle-decoder 
    // is always faster than the video-decoder. This assumption is definitely 
    // incorrect in some cases where the SSA subtitle decoder is used.

    for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
    {
        int64_t sub_start, sub_stop, duration;

        subtitle = hb_list_item( job->list_subtitle, i );
        
        // Sanitize subtitle start and stop times, then pass to 
        // muxer or renderer filter.
        while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL )
        {
            hb_lock( pv->common->mutex );
            sub_start = sub->s.start - pv->common->video_pts_slip;
            hb_unlock( pv->common->mutex );

            if (sub->s.stop == -1)
            {
                if (subtitle->config.dest != RENDERSUB &&
                    hb_fifo_size( subtitle->fifo_raw ) < 2)
                {
                    // For passthru subs, we want to wait for the
                    // next subtitle so that we can fill in the stop time.
                    // This way the muxer can compute the duration of
                    // the subtitle.
                    //
                    // For render subs, we need to ensure that they
                    // get to the renderer before the associated video
                    // that they are to be applied to.  It is the 
                    // responsibility of the renderer to handle
                    // stop == -1.
                    break;
                }
            }

            sub = hb_fifo_get( subtitle->fifo_raw );
            if ( sub->s.stop == -1 )
            {
                hb_buffer_t *next;
                next = hb_fifo_see( subtitle->fifo_raw );
                if (next != NULL)
                    sub->s.stop = next->s.start;
            }
            // Need to re-write subtitle timestamps to account
            // for any slippage.
            sub_stop = -1;
            if ( sub->s.stop != -1 )
            {
                duration = sub->s.stop - sub->s.start;
                sub_stop = sub_start + duration;
            }

            sub->s.start = sub_start;
            sub->s.stop = sub_stop;

            hb_fifo_push( subtitle->fifo_out, sub );
        }
    }

    /*
     * Adjust the pts of the current frame so that it's contiguous
     * with the previous frame. The start time of the current frame
     * has to be the end time of the previous frame and the stop
     * time has to be the start of the next frame.  We don't
     * make any adjustments to the source timestamps other than removing
     * the clock offsets (which also removes pts discontinuities).
     * This means we automatically encode at the source's frame rate.
     * MP2 uses an implicit duration (frames end when the next frame
     * starts) but more advanced containers like MP4 use an explicit
     * duration. Since we're looking ahead one frame we set the
     * explicit stop time from the start time of the next frame.
     */
    *buf_out = cur;
    int64_t duration = next_start - cur->s.start;
    sync->cur = cur = next;
    cur->sub = NULL;
    cur->s.start -= pv->common->video_pts_slip;
    cur->s.stop -= pv->common->video_pts_slip;
    sync->pts_skip = 0;
    if ( duration <= 0 )
    {
        hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"",
                duration, cur->s.start, next_start );
    }

    (*buf_out)->s.start = sync->next_start;
    sync->next_start += duration;
    (*buf_out)->s.stop = sync->next_start;

    if ( sync->chap_mark )
    {
        // we have a pending chapter mark from a recent drop - put it on this
        // buffer (this may make it one frame late but we can't do any better).
        (*buf_out)->s.new_chap = sync->chap_mark;
        sync->chap_mark = 0;
    }

    /* Update UI */
    UpdateState( w );

    return HB_WORK_OK;
}
Example #15
0
/*
 * SSA line format:
 *   Dialogue: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
 *             1      2     3   4     5    6       7       8       9      10
 */
static hb_buffer_t *ssa_decode_line_to_utf8( uint8_t *in_data, int in_size, int in_sequence )
{
    uint8_t *pos = in_data;
    uint8_t *end = in_data + in_size;
    
    // Parse values for in->s.start and in->s.stop
    int64_t in_start, in_stop;
    if ( parse_timing_from_ssa_packet( (char *) in_data, &in_start, &in_stop ) )
        goto fail;
    
    uint8_t *textFieldPos = find_field( pos, end, 10 );
    if ( textFieldPos == NULL )
        goto fail;
    
    // Count the number of style overrides in the Text field
    int numStyleOverrides = 0;
    pos = textFieldPos;
    while ( pos < end )
    {
        if (*pos++ == '{')
        {
            numStyleOverrides++;
        }
    }
    
    int maxOutputSize = (end - textFieldPos) + ((numStyleOverrides + 1) * MAX_OVERHEAD_PER_OVERRIDE);
    hb_buffer_t *out = hb_buffer_init( maxOutputSize );
    if ( out == NULL )
        return NULL;
    
    /*
     * The Text field contains plain text marked up with:
     * (1) '\n' -> space
     * (2) '\N' -> newline
     * (3) curly-brace control codes like '{\k44}' -> HTML tags / strip
     * 
     * Perform the above conversions and copy it to the output packet
     */
    StyleSet prevStyles = 0;
    uint8_t *dst = out->data;
    pos = textFieldPos;
    while ( pos < end )
    {
        if ( pos[0] == '\\' && pos[1] == 'n' )
        {
            *dst++ = ' ';
            pos += 2;
        }
        else if ( pos[0] == '\\' && pos[1] == 'N' )
        {
            *dst++ = '\n';
            pos += 2;
        }
        else if ( pos[0] == '{' )
        {
            // Parse SSA style overrides and append appropriate HTML style tags
            StyleSet nextStyles = ssa_parse_style_override( pos, prevStyles );
            ssa_append_html_tags_for_style_change( &dst, prevStyles, nextStyles );
            prevStyles = nextStyles;
            
            // Skip past SSA control code
            while ( pos < end && *pos != '}' ) pos++;
            if    ( pos < end && *pos == '}' ) pos++;
        }
        else
        {
            // Copy raw character
            *dst++ = *pos++;
        }
    }
    
    // Append closing HTML style tags
    ssa_append_html_tags_for_style_change( &dst, prevStyles, 0 );
    
    // Trim output buffer to the actual amount of data written
    out->size = dst - out->data;
    
    // Copy metadata from the input packet to the output packet
    out->s.frametype = HB_FRAME_SUBTITLE;
    out->s.start = in_start;
    out->s.stop = in_stop;
    out->sequence = in_sequence;
    
    return out;
    
fail:
    hb_log( "decssasub: malformed SSA subtitle packet: %.*s\n", in_size, in_data );
    return NULL;
}
Example #16
0
hb_filter_private_t * hb_rotate_init( int pix_fmt,
                                           int width,
                                           int height,
                                           char * settings )
{
    if( pix_fmt != PIX_FMT_YUV420P )
    {
        return 0;
    }

    hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );

    pv->pix_fmt = pix_fmt;

    pv->width[0]  = width;
    pv->height[0] = height;
    pv->width[1]  = pv->width[2]  = width >> 1;
    pv->height[1] = pv->height[2] = height >> 1;

    pv->buf_out = hb_video_buffer_init( width, height );
    pv->buf_settings = hb_buffer_init( 0 );

    pv->mode     = MODE_DEFAULT;

    pv->ref_stride[0] = pv->width[0];
    pv->ref_stride[1] = pv->width[1];
    pv->ref_stride[2] = pv->width[2];
    
    if( settings )
    {
        sscanf( settings, "%d",
                &pv->mode );
    }

    pv->cpu_count = hb_get_cpu_count();


    /*
     * Create threads and locks.
     */
    pv->rotate_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
    pv->rotate_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
    pv->rotate_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
    pv->rotate_arguments = malloc( sizeof( rotate_arguments_t ) * pv->cpu_count );

    int i;
    for( i = 0; i < pv->cpu_count; i++ )
    {
        rotate_thread_arg_t *thread_args;
    
        thread_args = malloc( sizeof( rotate_thread_arg_t ) );
    
        if( thread_args ) {
            thread_args->pv = pv;
            thread_args->segment = i;
    
            pv->rotate_begin_lock[i] = hb_lock_init();
            pv->rotate_complete_lock[i] = hb_lock_init();
    
            /*
             * Important to start off with the threads locked waiting
             * on input.
             */
            hb_lock( pv->rotate_begin_lock[i] );
    
            pv->rotate_arguments[i].stop = 0;
            pv->rotate_arguments[i].dst = NULL;
            
            pv->rotate_threads[i] = hb_thread_init( "rotate_filter_segment",
                                                   rotate_filter_thread,
                                                   thread_args,
                                                   HB_NORMAL_PRIORITY );
        } else {
            hb_error( "rotate could not create threads" );
        }
    }

    return pv;
}
Example #17
0
    /* check if we need more data */
    if ((pv->ibytes = hb_list_bytes(pv->list)) < pv->isamples * pv->isamplesiz)
    {
        return NULL;
    }

    hb_buffer_t *obuf;
    AudioStreamPacketDescription odesc = { 0 };
    AudioBufferList obuflist =
    {
        .mNumberBuffers = 1,
        .mBuffers = { { .mNumberChannels = pv->nchannels } },
    };

    obuf = hb_buffer_init(pv->omaxpacket);
    obuflist.mBuffers[0].mDataByteSize = obuf->size;
    obuflist.mBuffers[0].mData = obuf->data;

    OSStatus err = AudioConverterFillComplexBuffer(pv->converter,
                                                   inInputDataProc, pv,
                                                   &npackets, &obuflist, &odesc);

    if (err != noErr && err != 1)
    {
        hb_log("encCoreAudio: unexpected error in AudioConverterFillComplexBuffer()");
    }
    // only drop the output buffer if it's actually empty
    if (!npackets || odesc.mDataByteSize <= 0)
    {
        hb_log("encCoreAudio: 0 packets returned");
/*
 * Read the SRT file and put the entries into the subtitle fifo for all to read
 */
static hb_buffer_t *srt_read( hb_work_private_t *pv )
{
    char line_buffer[1024];
    int reprocess = 0, resync = 0;

    if( !pv->file )
    {
        return NULL;
    }
    
    while( reprocess || get_line( pv, line_buffer, sizeof( line_buffer ) ) ) 
    {
        reprocess = 0;
        switch (pv->current_state)
        {
        case k_state_timecode:
        {
            struct start_and_end timing;
            int result;

            result = read_time_from_string( line_buffer, &timing );
            if (!result)
            {
                resync = 1;
                pv->current_state = k_state_potential_new_entry;
                continue;
            }
            pv->current_entry.duration = timing.end - timing.start;
            pv->current_entry.offset = timing.start - pv->current_time;
            
            pv->current_time = timing.end;

            pv->current_entry.start = timing.start;
            pv->current_entry.stop = timing.end;

            pv->current_state = k_state_inEntry;
            continue;
        }

        case k_state_inEntry_or_new:
        {
            char *endpoint;
            /*
             * Is this really new next entry begin?
             * Look for entry number.
             */
            strtol(line_buffer, &endpoint, 10);
            if (endpoint == line_buffer ||
                (endpoint && *endpoint != '\n' && *endpoint != '\r'))
            {
                /*
                 * Doesn't resemble an entry number
                 * must still be in an entry
                 */
                if (!resync)
                {
                    reprocess = 1;
                    pv->current_state = k_state_inEntry;
                }
                continue;
            }
            reprocess = 1;
            pv->current_state = k_state_potential_new_entry;
            break;
        }

        case k_state_inEntry:
        {
            char *q;
            int  size, len;

            // If the current line is empty, we assume this is the
            //	seperation betwene two entries. In case we are wrong,
            //	the mistake is corrected in the next state.
            if (strcmp(line_buffer, "\n") == 0 || strcmp(line_buffer, "\r\n") == 0) {
                pv->current_state = k_state_potential_new_entry;
                continue;
            }
            
            q = pv->current_entry.text + pv->current_entry.pos;
            len = strlen( line_buffer );
            size = MIN(1024 - pv->current_entry.pos - 1, len );
            memcpy(q, line_buffer, size);
            pv->current_entry.pos += size;
            pv->current_entry.text[pv->current_entry.pos] = '\0';
            break;
        }

        case k_state_potential_new_entry:
        {
            char *endpoint;
            long entry_number;
            hb_buffer_t *buffer = NULL;
            /*
             * Is this really new next entry begin?
             */
            entry_number = strtol(line_buffer, &endpoint, 10);
            if (!resync && (*line_buffer == '\n' || *line_buffer == '\r'))
            {
                /*
                 * Well.. looks like we are in the wrong mode.. lets add the
                 * newline we misinterpreted...
                 */
                strncat(pv->current_entry.text, " ", 1024);
                pv->current_state = k_state_inEntry_or_new;
                continue;
            }
            if (endpoint == line_buffer ||
                (endpoint && *endpoint != '\n' && *endpoint != '\r'))
            {
                /*
                 * Well.. looks like we are in the wrong mode.. lets add the
                 * line we misinterpreted...
                 */
                if (!resync)
                {
                    reprocess = 1;
                    pv->current_state = k_state_inEntry;
                }
                continue;
            }
            /*
             * We found the next entry - or a really rare error condition
             */
            pv->last_entry_number = entry_number;
            resync = 0;
            if (*pv->current_entry.text != '\0')
            {
                long length;
                char *p, *q;
                int  line = 1;
                uint64_t start_time = ( pv->current_entry.start + 
                                        pv->subtitle->config.offset ) * 90;
                uint64_t stop_time = ( pv->current_entry.stop + 
                                       pv->subtitle->config.offset ) * 90;

                if( !( start_time > pv->start_time && stop_time < pv->stop_time ) )
                {
                    hb_deep_log( 3, "Discarding SRT at time start %"PRId64", stop %"PRId64, start_time, stop_time);
                    memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
                    ++(pv->number_of_entries);
                    pv->current_state = k_state_timecode;
                    continue;
                }

                length = strlen( pv->current_entry.text );

                for (q = p = pv->current_entry.text; *p != '\0'; p++)
                {
                    if (*p == '\n' || *p == '\r')
                    {
                        if (*(p + 1) == '\n' || *(p + 1) == '\r' ||
                            *(p + 1) == '\0')
                        {
                            // followed by line break or last character, skip it
                            length--;
                            continue;
                        }
                        else if (line == 1)
                        {
                            // replace '\r' with '\n'
                            *q   = '\n';
                            line = 2;
                        }
                        else
                        {
                            // all subtitles on two lines tops
                            // replace line breaks with spaces
                            *q = ' ';
                        }
                        q++;
                    }
                    else
                    {
                        *q = *p;
                        q++;
                    }
                }
                *q = '\0';

                buffer = hb_buffer_init( length + 1 );

                if( buffer )
                {
                    buffer->s.start = start_time - pv->start_time;
                    buffer->s.stop = stop_time - pv->start_time;

                    memcpy( buffer->data, pv->current_entry.text, length + 1 );
                }
            }
            memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
            ++(pv->number_of_entries);
            pv->current_state = k_state_timecode;
            if( buffer )
            {
                return buffer;
            }
            continue;
        } 
        }
    }

    hb_buffer_t *buffer = NULL;
    if (*pv->current_entry.text != '\0')
    {
        long length;
        char *p, *q;
        int  line = 1;
        uint64_t start_time = ( pv->current_entry.start + 
                                pv->subtitle->config.offset ) * 90;
        uint64_t stop_time = ( pv->current_entry.stop + 
                               pv->subtitle->config.offset ) * 90;

        if( !( start_time > pv->start_time && stop_time < pv->stop_time ) )
        {
            hb_deep_log( 3, "Discarding SRT at time start %"PRId64", stop %"PRId64, start_time, stop_time);
            memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
            return NULL;
        }

        length = strlen( pv->current_entry.text );

        for (q = p = pv->current_entry.text; *p != '\0'; p++)
        {
            if (*p == '\n' || *p == '\r')
            {
                if (*(p + 1) == '\n' || *(p + 1) == '\r' || *(p + 1) == '\0')
                {
                    // followed by line break or last character, skip it
                    length--;
                    continue;
                }
                else if (line == 1)
                {
                    // replace '\r' with '\n'
                    *q   = '\n';
                    line = 2;
                }
                else
                {
                    // all subtitles on two lines tops
                    // replace line breaks with spaces
                    *q = ' ';
                }
                q++;
            }
            else
            {
                *q = *p;
                q++;
            }
        }
        *q = '\0';

        buffer = hb_buffer_init( length + 1 );

        if( buffer )
        {
            buffer->s.start = start_time - pv->start_time;
            buffer->s.stop = stop_time - pv->start_time;

            memcpy( buffer->data, pv->current_entry.text, length + 1 );
        }
    }
    memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
    if( buffer )
    {
        return buffer;
    }
    
    return NULL;
}
Example #19
0
/*
 * SSA line format:
 *   Dialogue: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
 *             1      2     3   4     5    6       7       8       9      10
 *
 * MKV-SSA packet format:
 *   ReadOrder,Marked,          Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
 *   1         2                3     4    5       6       7       8      9
 */
static hb_buffer_t *
ssa_decode_line_to_mkv_ssa( hb_work_object_t * w, hb_buffer_t * in,
                            uint8_t *in_data, int in_size )
{
    hb_work_private_t * pv = w->private_data;
    hb_buffer_t * out;

    // Parse values for in->s.start and in->s.stop
    int64_t in_start, in_stop;
    if ( parse_timing_from_ssa_packet( (char *) in_data, &in_start, &in_stop ) )
        goto fail;

    // Convert the SSA packet to MKV-SSA format, which is what libass expects
    char *mkvIn;
    int numPartsRead;
    char *styleToTextFields;
    char *layerField = malloc( in_size );

    // SSA subtitles have an empty layer field (bare ',').  The scanf
    // format specifier "%*128[^,]" will not match on a bare ','.  There
    // must be at least one non ',' character in the match.  So the format
    // specifier is placed directly next to the ':' so that the next
    // expected ' ' after the ':' will be the character it matches on
    // when there is no layer field.
    numPartsRead = sscanf( (char *)in_data, "Dialogue:%128[^,],", layerField );
    if ( numPartsRead != 1 )
        goto fail;

    styleToTextFields = (char *)find_field( in_data, in_data + in_size, 4 );
    if ( styleToTextFields == NULL ) {
        free( layerField );
        goto fail;
    }

    // The sscanf conversion above will result in an extra space
    // before the layerField.  Strip the space.
    char *stripLayerField = layerField;
    for(; *stripLayerField == ' '; stripLayerField++);

    out = hb_buffer_init( in_size + 1 );
    mkvIn = (char*)out->data;

    mkvIn[0] = '\0';
    sprintf(mkvIn, "%d", pv->readOrder++);    // ReadOrder: make this up
    strcat( mkvIn, "," );
    strcat( mkvIn, stripLayerField );
    strcat( mkvIn, "," );
    strcat( mkvIn, (char *)styleToTextFields );

    out->size           = strlen(mkvIn) + 1;
    out->s.frametype    = HB_FRAME_SUBTITLE;
    out->s.start        = in->s.start;
    out->s.duration     = in_stop - in_start;
    out->s.stop         = in->s.start + out->s.duration;
    out->s.scr_sequence = in->s.scr_sequence;

    if( out->size == 0 )
    {
        hb_buffer_close(&out);
    }

    free( layerField );

    return out;

fail:
    hb_log( "decssasub: malformed SSA subtitle packet: %.*s\n", in_size, in_data );
    return NULL;
}
Example #20
0
/***********************************************************************
 * ReaderFunc
 ***********************************************************************
 *
 **********************************************************************/
static void ReaderFunc( void * _r )
{
    hb_reader_t  * r = _r;
    hb_fifo_t   ** fifos;
    hb_buffer_t  * buf;
    hb_list_t    * list;
    int            n;
    int            chapter = -1;
    int            chapter_end = r->job->chapter_end;

    if ( r->title->type == HB_BD_TYPE )
    {
        if ( !( r->bd = hb_bd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_DVD_TYPE )
    {
        if ( !( r->dvd = hb_dvd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_STREAM_TYPE ||
              r->title->type == HB_FF_STREAM_TYPE )
    {
        if ( !( r->stream = hb_stream_open( r->title->path, r->title ) ) )
            return;
    }
    else
    {
        // Unknown type, should never happen
        return;
    }

    hb_buffer_t *ps = hb_buffer_init( HB_DVD_READ_BUFFER_SIZE );
    if (r->bd)
    {
        if( !hb_bd_start( r->bd, r->title ) )
        {
            hb_bd_close( &r->bd );
            hb_buffer_close( &ps );
            return;
        }
        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_bd_seek( r->bd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
        else if ( r->job->pts_to_start )
        {
            hb_bd_seek_pts( r->bd, r->job->pts_to_start );
            r->job->pts_to_start = 0;
            r->start_found = 1;
        }
        else
        {
            hb_bd_seek_chapter( r->bd, r->job->chapter_start );
        }
        if (r->job->angle > 1)
        {
            hb_bd_set_angle( r->bd, r->job->angle - 1 );
        }
    }
    else if (r->dvd)
    {
        /*
         * XXX this code is a temporary hack that should go away if/when
         *     chapter merging goes away in libhb/dvd.c
         * map the start and end chapter numbers to on-media chapter
         * numbers since chapter merging could cause the handbrake numbers
         * to diverge from the media numbers and, if our chapter_end is after
         * a media chapter that got merged, we'll stop ripping too early.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );

        chapter_end = chap->index;
        if (start > 1)
        {
           chap = hb_list_item( r->title->list_chapter, start - 1 );
           start = chap->index;
        }
        /* end chapter mapping XXX */

        if( !hb_dvd_start( r->dvd, r->title, start ) )
        {
            hb_dvd_close( &r->dvd );
            hb_buffer_close( &ps );
            return;
        }
        if (r->job->angle)
        {
            hb_dvd_set_angle( r->dvd, r->job->angle );
        }

        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_dvd_seek( r->dvd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
    }
    else if ( r->stream && r->job->start_at_preview )
    {
        
        // XXX code from DecodePreviews - should go into its own routine
        hb_stream_seek( r->stream, (float)( r->job->start_at_preview - 1 ) /
                        ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );

    } 
    else if ( r->stream && r->job->pts_to_start )
    {
        int64_t pts_to_start = r->job->pts_to_start;
        
        // Find out what the first timestamp of the stream is
        // and then seek to the appropriate offset from it
        if ( hb_stream_read( r->stream, ps ) )
        {
            if ( ps->start > 0 )
                pts_to_start += ps->start;
        }
        
        if ( hb_stream_seek_ts( r->stream, pts_to_start ) >= 0 )
        {
            // Seek takes us to the nearest I-frame before the timestamp
            // that we want.  So we will retrieve the start time of the
            // first packet we get, subtract that from pts_to_start, and
            // inspect the reset of the frames in sync.
            r->start_found = 2;
            r->job->pts_to_start = pts_to_start;
        }
    } 
    else if( r->stream )
    {
        /*
         * Standard stream, seek to the starting chapter, if set, and track the
         * end chapter so that we end at the right time.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );
        
        chapter_end = chap->index;
        if (start > 1)
        {
            chap = hb_list_item( r->title->list_chapter, start - 1 );
            start = chap->index;
        }
        
        /*
         * Seek to the start chapter.
         */
        hb_stream_seek_chapter( r->stream, start );
    }

    list  = hb_list_init();

    while( !*r->die && !r->job->done )
    {
        if (r->bd)
            chapter = hb_bd_chapter( r->bd );
        else if (r->dvd)
            chapter = hb_dvd_chapter( r->dvd );
        else if (r->stream)
            chapter = hb_stream_chapter( r->stream );

        if( chapter < 0 )
        {
            hb_log( "reader: end of the title reached" );
            break;
        }
        if( chapter > chapter_end )
        {
            hb_log( "reader: end of chapter %d (media %d) reached at media chapter %d",
                    r->job->chapter_end, chapter_end, chapter );
            break;
        }

        if (r->bd)
        {
          if( !hb_bd_read( r->bd, ps ) )
          {
              break;
          }
        }
        else if (r->dvd)
        {
          if( !hb_dvd_read( r->dvd, ps ) )
          {
              break;
          }
        }
        else if (r->stream)
        {
          if ( !hb_stream_read( r->stream, ps ) )
          {
            break;
          }
          if ( r->start_found == 2 )
          {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if ( ps->start > 0 && ps->start < r->job->pts_to_start )
            {
                r->job->pts_to_start -= ps->start;
            }
            else if ( ps->start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
                r->start_found = 1;
            }
          }
        }

        if( r->job->indepth_scan )
        {
            /*
             * Need to update the progress during a subtitle scan
             */
            hb_state_t state;

#define p state.param.working

            state.state = HB_STATE_WORKING;
            p.progress = (double)chapter / (double)r->job->chapter_end;
            if( p.progress > 1.0 )
            {
                p.progress = 1.0;
            }
            p.rate_avg = 0.0;
            p.hours    = -1;
            p.minutes  = -1;
            p.seconds  = -1;
            hb_set_state( r->job->h, &state );
        }

        (hb_demux[r->title->demuxer])( ps, list, &r->demux );

        while( ( buf = hb_list_item( list, 0 ) ) )
        {
            hb_list_rem( list, buf );
            fifos = GetFifoForId( r->job, buf->id );

            if ( fifos && ! r->saw_video && !r->job->indepth_scan )
            {
                // The first data packet with a PTS from an audio or video stream
                // that we're decoding defines 'time zero'. Discard packets until
                // we get one.
                if ( buf->start != -1 && buf->renderOffset != -1 &&
                     ( buf->id == r->title->video_id || is_audio( r, buf->id ) ) )
                {
                    // force a new scr offset computation
                    r->scr_changes = r->demux.scr_changes - 1;
                    // create a stream state if we don't have one so the
                    // offset will get computed correctly.
                    id_to_st( r, buf, 1 );
                    r->saw_video = 1;
                    hb_log( "reader: first SCR %"PRId64" id %d DTS %"PRId64,
                            r->demux.last_scr, buf->id, buf->renderOffset );
                }
                else
                {
                    fifos = NULL;
                }
            }
            if( fifos )
            {
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes != r->demux.scr_changes )
                    {
                        // This is the first audio or video packet after an SCR
                        // change. Compute a new scr offset that would make this
                        // packet follow the last of this stream with the 
                        // correct average spacing.
                        stream_timing_t *st = id_to_st( r, buf, 0 );

                        // if this is the video stream and we don't have
                        // audio yet or this is an audio stream
                        // generate a new scr
                        if ( st->is_audio ||
                             ( st == r->stream_timing && !r->saw_audio ) )
                        {
                            new_scr_offset( r, buf );
                        }
                        else
                        {
                            // defer the scr change until we get some
                            // audio since audio has a timestamp per
                            // frame but video & subtitles don't. Clear
                            // the timestamps so the decoder will generate
                            // them from the frame durations.
                            buf->start = -1;
                            buf->renderOffset = -1;
                        }
                    }
                }
                if ( buf->start != -1 )
                {
                    int64_t start = buf->start - r->scr_offset;
                    if ( !r->start_found )
                        UpdateState( r, start );

                    if ( !r->start_found &&
                        start >= r->job->pts_to_start )
                    {
                        // pts_to_start point found
                        r->start_found = 1;
                    }
                    // This log is handy when you need to debug timing problems
                    //hb_log("id %x scr_offset %ld start %ld --> %ld", 
                    //        buf->id, r->scr_offset, buf->start, 
                    //        buf->start - r->scr_offset);
                    buf->start -= r->scr_offset;
                }
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes == r->demux.scr_changes )
                    {
                        // This packet is referenced to the same SCR as the last.
                        // Adjust timestamp to remove the System Clock Reference
                        // offset then update the average inter-packet time
                        // for this stream.
                        buf->renderOffset -= r->scr_offset;
                        update_ipt( r, buf );
                    }
                }
                if ( !r->start_found )
                {
                    hb_buffer_close( &buf );
                    continue;
                }

                buf->sequence = r->sequence++;
                /* if there are mutiple output fifos, send a copy of the
                 * buffer down all but the first (we have to not ship the
                 * original buffer or we'll race with the thread that's
                 * consuming the buffer & inject garbage into the data stream). */
                for( n = 1; fifos[n] != NULL; n++)
                {
                    hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
                    hb_buffer_copy_settings( buf_copy, buf );
                    memcpy( buf_copy->data, buf->data, buf->size );
                    push_buf( r, fifos[n], buf_copy );
                }
                push_buf( r, fifos[0], buf );
            }
            else
            {
                hb_buffer_close( &buf );
            }
        }
    }

    // send empty buffers downstream to video & audio decoders to signal we're done.
    if( !*r->die && !r->job->done )
    {
        push_buf( r, r->job->fifo_mpeg2, hb_buffer_init(0) );

        hb_audio_t *audio;
        for( n = 0; (audio = hb_list_item( r->job->title->list_audio, n)); ++n )
        {
            if ( audio->priv.fifo_in )
                push_buf( r, audio->priv.fifo_in, hb_buffer_init(0) );
        }

        hb_subtitle_t *subtitle;
        for( n = 0; (subtitle = hb_list_item( r->job->title->list_subtitle, n)); ++n )
        {
            if ( subtitle->fifo_in && subtitle->source == VOBSUB)
                push_buf( r, subtitle->fifo_in, hb_buffer_init(0) );
        }
    }

    hb_list_empty( &list );
    hb_buffer_close( &ps );
    if (r->bd)
    {
        hb_bd_stop( r->bd );
        hb_bd_close( &r->bd );
    }
    else if (r->dvd)
    {
        hb_dvd_stop( r->dvd );
        hb_dvd_close( &r->dvd );
    }
    else if (r->stream)
    {
        hb_stream_close(&r->stream);
    }

    if ( r->stream_timing )
    {
        free( r->stream_timing );
    }

    hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
    if ( r->demux.dts_drops )
    {
        hb_log( "reader: %d drops because DTS out of range", r->demux.dts_drops );
    }

    free( r );
    _r = NULL;
}