static int decsrtWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_buffer_t ** buf_out ) { hb_work_private_t * pv = w->private_data; hb_buffer_t * out = NULL; if (pv->job->reader_pts_offset == AV_NOPTS_VALUE) { // We need to wait for reader to initialize it's pts offset so that // we know where to start reading SRTs. *buf_out = NULL; return HB_WORK_OK; } if (pv->start_time == AV_NOPTS_VALUE) { pv->start_time = pv->job->reader_pts_offset; if (pv->job->pts_to_stop > 0) { pv->stop_time = pv->job->pts_to_start + pv->job->pts_to_stop; } } out = srt_read( pv ); if (out != NULL) { hb_srt_to_ssa(out, ++pv->line); *buf_out = out; return HB_WORK_OK; } else { *buf_out = hb_buffer_eof_init(); return HB_WORK_DONE; } }
static void reader_send_eof( hb_work_private_t * r ) { int ii; // send eof buffers downstream to decoders to signal we're done. push_buf(r, r->job->fifo_mpeg2, hb_buffer_eof_init()); hb_audio_t *audio; for (ii = 0; (audio = hb_list_item(r->job->list_audio, ii)); ++ii) { if (audio->priv.fifo_in) push_buf(r, audio->priv.fifo_in, hb_buffer_eof_init()); } hb_subtitle_t *subtitle; for (ii = 0; (subtitle = hb_list_item(r->job->list_subtitle, ii)); ++ii) { if (subtitle->fifo_in && subtitle->source != SRTSUB) push_buf(r, subtitle->fifo_in, hb_buffer_eof_init()); } hb_log("reader: done. %d scr changes", r->demux.scr_changes); }
static hb_buffer_t* Encode(hb_work_object_t *w) { hb_work_private_t *pv = w->private_data; hb_audio_t *audio = w->audio; uint64_t pts, pos; if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float)) { return NULL; } hb_list_getbytes(pv->list, pv->input_buf, pv->input_samples * sizeof(float), &pts, &pos); // Prepare input frame int out_linesize; int out_size = av_samples_get_buffer_size(&out_linesize, pv->context->channels, pv->samples_per_frame, pv->context->sample_fmt, 1); AVFrame frame = { .nb_samples = pv->samples_per_frame, }; avcodec_fill_audio_frame(&frame, pv->context->channels, pv->context->sample_fmt, pv->output_buf, out_size, 1); if (pv->avresample != NULL) { int in_linesize; av_samples_get_buffer_size(&in_linesize, pv->context->channels, frame.nb_samples, AV_SAMPLE_FMT_FLT, 1); int out_samples = avresample_convert(pv->avresample, frame.extended_data, out_linesize, frame.nb_samples, &pv->input_buf, in_linesize, frame.nb_samples); if (out_samples != pv->samples_per_frame) { // we're not doing sample rate conversion, so this shouldn't happen hb_log("encavcodecaWork: avresample_convert() failed"); return NULL; } } // Libav requires that timebase of audio frames be in sample_rate units frame.pts = pts + (90000 * pos / (sizeof(float) * pv->out_discrete_channels * audio->config.out.samplerate)); frame.pts = av_rescale(frame.pts, pv->context->sample_rate, 90000); // Prepare output packet AVPacket pkt; int got_packet; hb_buffer_t *out = hb_buffer_init(pv->max_output_bytes); av_init_packet(&pkt); pkt.data = out->data; pkt.size = out->alloc; // Encode int ret = avcodec_encode_audio2(pv->context, &pkt, &frame, &got_packet); if (ret < 0) { hb_log("encavcodeca: avcodec_encode_audio failed"); hb_buffer_close(&out); return NULL; } if (got_packet && pkt.size) { out->size = pkt.size; // The output pts from libav is in context->time_base. Convert it back // to our timebase. out->s.start = av_rescale_q(pkt.pts, pv->context->time_base, (AVRational){1, 90000}); out->s.duration = (double)90000 * pv->samples_per_frame / audio->config.out.samplerate; out->s.stop = out->s.start + out->s.duration; out->s.type = AUDIO_BUF; out->s.frametype = HB_FRAME_AUDIO; } else { hb_buffer_close(&out); return Encode(w); } return out; } static hb_buffer_t * Flush( hb_work_object_t * w ) { hb_buffer_list_t list; hb_buffer_t *buf; hb_buffer_list_clear(&list); buf = Encode( w ); while (buf != NULL) { hb_buffer_list_append(&list, buf); buf = Encode( w ); } hb_buffer_list_append(&list, hb_buffer_eof_init()); return hb_buffer_list_clear(&list); }