void video_deliver_frame(video_decoder_t *vd, frame_buffer_type_t type, void *frame, const frame_info_t *info) { vd->vd_skip = 0; mp_set_current_time(vd->vd_mp, info->fi_time, info->fi_epoch); vd->vd_frame_deliver(type, frame, info, vd->vd_opaque); video_decoder_scan_ext_sub(vd, info->fi_time); }
static void video_decoder_set_current_time(video_decoder_t *vd, int64_t ts, int epoch, int64_t delta) { if(ts == PTS_UNSET) return; mp_set_current_time(vd->vd_mp, ts, epoch, delta); vd->vd_subpts = ts - vd->vd_mp->mp_svdelta - delta; if(vd->vd_ext_subtitles != NULL) subtitles_pick(vd->vd_ext_subtitles, vd->vd_subpts, vd->vd_mp); }
void video_deliver_frame_avctx(video_decoder_t *vd, media_pipe_t *mp, media_queue_t *mq, AVCodecContext *ctx, AVFrame *frame, const media_buf_t *mb, int decode_time) { frame_info_t fi; if(mb->mb_time != AV_NOPTS_VALUE) mp_set_current_time(mp, mb->mb_time); /* Compute aspect ratio */ switch(mb->mb_aspect_override) { case 0: if(frame->pan_scan != NULL && frame->pan_scan->width != 0) { fi.dar.num = frame->pan_scan->width; fi.dar.den = frame->pan_scan->height; } else { fi.dar.num = ctx->width; fi.dar.den = ctx->height; } if(ctx->sample_aspect_ratio.num) fi.dar = av_mul_q(fi.dar, ctx->sample_aspect_ratio); break; case 1: fi.dar = (AVRational){4,3}; break; case 2: fi.dar = (AVRational){16,9}; break; } int64_t pts = mb->mb_pts; /* Compute duration and PTS of frame */ if(pts == AV_NOPTS_VALUE && mb->mb_dts != AV_NOPTS_VALUE && (ctx->has_b_frames == 0 || frame->pict_type == FF_B_TYPE)) { pts = mb->mb_dts; } int duration = mb->mb_duration; if(!vd_valid_duration(duration)) { /* duration is zero or very invalid, use duration from last output */ duration = vd->vd_estimated_duration; } if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE) pts = vd->vd_nextpts; /* no pts set, use estimated pts */ if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) { /* we know PTS of a prior frame */ int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt; if(vd_valid_duration(t)) { /* inter frame duration seems valid, store it */ vd->vd_estimated_duration = t; if(duration == 0) duration = t; } else if(t < 0 || t > 10000000LL) { /* PTS discontinuity, use estimated PTS from last output instead */ pts = vd->vd_nextpts; } } duration += frame->repeat_pict * duration / 2; if(pts != AV_NOPTS_VALUE) { vd->vd_prevpts = pts; vd->vd_prevpts_cnt = 0; } vd->vd_prevpts_cnt++; if(duration == 0) { TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0"); return; } prop_set_int(mq->mq_prop_too_slow, decode_time > duration); if(pts != AV_NOPTS_VALUE) { vd->vd_nextpts = pts + duration; } else { vd->vd_nextpts = AV_NOPTS_VALUE; } vd->vd_interlaced |= frame->interlaced_frame && !mb->mb_disable_deinterlacer; fi.width = ctx->width; fi.height = ctx->height; fi.pix_fmt = ctx->pix_fmt; fi.pts = pts; fi.epoch = mb->mb_epoch; fi.duration = duration; fi.interlaced = !!vd->vd_interlaced; fi.tff = !!frame->top_field_first; fi.prescaled = 0; fi.color_space = ctx->colorspace; fi.color_range = ctx->color_range; video_deliver_frame(vd, FRAME_BUFFER_TYPE_LIBAV_FRAME, frame, &fi, mb->mb_send_pts); }
static void audio_process_audio(audio_decoder_t *ad, media_buf_t *mb) { const audio_class_t *ac = ad->ad_ac; AVFrame *frame = ad->ad_frame; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; int r; int got_frame; if(mb->mb_skip || mb->mb_stream != mq->mq_stream) return; while(mb->mb_size) { if(mb->mb_cw == NULL) { frame->sample_rate = mb->mb_rate; frame->format = AV_SAMPLE_FMT_S16; switch(mb->mb_channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; frame->nb_samples = mb->mb_size / 2; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; frame->nb_samples = mb->mb_size / 4; break; default: abort(); } frame->data[0] = mb->mb_data; frame->linesize[0] = 0; r = mb->mb_size; got_frame = 1; } else { media_codec_t *mc = mb->mb_cw; AVCodecContext *ctx = mc->ctx; if(mc->codec_id != ad->ad_in_codec_id) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); TRACE(TRACE_DEBUG, "audio", "Codec changed to %s (0x%x)", codec ? codec->name : "???", mc->codec_id); ad->ad_in_codec_id = mc->codec_id; ad->ad_in_sample_rate = 0; audio_cleanup_spdif_muxer(ad); ad->ad_mode = ac->ac_get_mode != NULL ? ac->ac_get_mode(ad, mc->codec_id, ctx ? ctx->extradata : NULL, ctx ? ctx->extradata_size : 0) : AUDIO_MODE_PCM; if(ad->ad_mode == AUDIO_MODE_SPDIF) { audio_setup_spdif_muxer(ad, codec, mq); } else if(ad->ad_mode == AUDIO_MODE_CODED) { hts_mutex_lock(&mp->mp_mutex); ac->ac_deliver_coded_locked(ad, mb->mb_data, mb->mb_size, mb->mb_pts, mb->mb_epoch); hts_mutex_unlock(&mp->mp_mutex); return; } } if(ad->ad_spdif_muxer != NULL) { mb->mb_pkt.stream_index = 0; ad->ad_pts = mb->mb_pts; ad->ad_epoch = mb->mb_epoch; mb->mb_pts = AV_NOPTS_VALUE; mb->mb_dts = AV_NOPTS_VALUE; av_write_frame(ad->ad_spdif_muxer, &mb->mb_pkt); avio_flush(ad->ad_spdif_muxer->pb); return; } if(ad->ad_mode == AUDIO_MODE_CODED) { ad->ad_pts = mb->mb_pts; ad->ad_epoch = mb->mb_epoch; } if(ctx == NULL) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); assert(codec != NULL); // Checked in libav.c ctx = mc->ctx = avcodec_alloc_context3(codec); if(ad->ad_stereo_downmix) ctx->request_channel_layout = AV_CH_LAYOUT_STEREO; if(avcodec_open2(mc->ctx, codec, NULL) < 0) { av_freep(&mc->ctx); return; } } r = avcodec_decode_audio4(ctx, frame, &got_frame, &mb->mb_pkt); if(r < 0) return; if(frame->sample_rate == 0) { frame->sample_rate = ctx->sample_rate; if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx) frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate; if(frame->sample_rate == 0) { if(!ad->ad_sample_rate_fail) { ad->ad_sample_rate_fail = 1; TRACE(TRACE_ERROR, "Audio", "Unable to determine sample rate"); } return; } } if(frame->channel_layout == 0) { frame->channel_layout = av_get_default_channel_layout(ctx->channels); if(frame->channel_layout == 0) { if(!ad->ad_channel_layout_fail) { ad->ad_channel_layout_fail = 1; TRACE(TRACE_ERROR, "Audio", "Unable to map %d channels to channel layout"); } return; } } if(mp->mp_stats) mp_set_mq_meta(mq, ctx->codec, ctx); } if(mb->mb_pts != PTS_UNSET) { int od = 0, id = 0; if(ad->ad_avr != NULL) { od = avresample_available(ad->ad_avr) * 1000000LL / ad->ad_out_sample_rate; id = avresample_get_delay(ad->ad_avr) * 1000000LL / frame->sample_rate; } ad->ad_pts = mb->mb_pts - od - id; ad->ad_epoch = mb->mb_epoch; if(mb->mb_drive_clock) mp_set_current_time(mp, mb->mb_pts - ad->ad_delay, mb->mb_epoch, mb->mb_delta); mb->mb_pts = PTS_UNSET; // No longer valid } mb->mb_data += r; mb->mb_size -= r; if(got_frame) { if(frame->sample_rate != ad->ad_in_sample_rate || frame->format != ad->ad_in_sample_format || frame->channel_layout != ad->ad_in_channel_layout || ad->ad_want_reconfig) { ad->ad_want_reconfig = 0; ad->ad_in_sample_rate = frame->sample_rate; ad->ad_in_sample_format = frame->format; ad->ad_in_channel_layout = frame->channel_layout; ac->ac_reconfig(ad); if(ad->ad_avr == NULL) ad->ad_avr = avresample_alloc_context(); else avresample_close(ad->ad_avr); av_opt_set_int(ad->ad_avr, "in_sample_fmt", ad->ad_in_sample_format, 0); av_opt_set_int(ad->ad_avr, "in_sample_rate", ad->ad_in_sample_rate, 0); av_opt_set_int(ad->ad_avr, "in_channel_layout", ad->ad_in_channel_layout, 0); av_opt_set_int(ad->ad_avr, "out_sample_fmt", ad->ad_out_sample_format, 0); av_opt_set_int(ad->ad_avr, "out_sample_rate", ad->ad_out_sample_rate, 0); av_opt_set_int(ad->ad_avr, "out_channel_layout", ad->ad_out_channel_layout, 0); char buf1[128]; char buf2[128]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, ad->ad_in_channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, ad->ad_out_channel_layout); TRACE(TRACE_DEBUG, "Audio", "Converting from [%s %dHz %s] to [%s %dHz %s]", buf1, ad->ad_in_sample_rate, av_get_sample_fmt_name(ad->ad_in_sample_format), buf2, ad->ad_out_sample_rate, av_get_sample_fmt_name(ad->ad_out_sample_format)); if(avresample_open(ad->ad_avr)) { TRACE(TRACE_ERROR, "Audio", "Unable to open resampler"); avresample_free(&ad->ad_avr); } prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1); if(ac->ac_set_volume != NULL) ac->ac_set_volume(ad, ad->ad_vol_scale); } if(ad->ad_avr != NULL) { avresample_convert(ad->ad_avr, NULL, 0, 0, frame->data, frame->linesize[0], frame->nb_samples); } else { int delay = 1000000LL * frame->nb_samples / frame->sample_rate; usleep(delay); } } } }
static void audio_process_audio(audio_decoder_t *ad, media_buf_t *mb) { const audio_class_t *ac = ad->ad_ac; AVFrame *frame = ad->ad_frame; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; int r; int got_frame; AVPacket avpkt; int offset = 0; if(mb->mb_skip || mb->mb_stream != mq->mq_stream) return; while(offset < mb->mb_size) { if(mb->mb_cw == NULL) { frame->sample_rate = mb->mb_rate; frame->format = AV_SAMPLE_FMT_S16; switch(mb->mb_channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; frame->nb_samples = mb->mb_size / 2; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; frame->nb_samples = mb->mb_size / 4; break; default: abort(); } frame->data[0] = mb->mb_data; frame->linesize[0] = 0; r = mb->mb_size; got_frame = 1; } else { av_init_packet(&avpkt); avpkt.data = mb->mb_data + offset; avpkt.size = mb->mb_size - offset; r = avcodec_decode_audio4(mb->mb_cw->codec_ctx, frame, &got_frame, &avpkt); if(r < 0) return; if(frame->sample_rate == 0) frame->sample_rate = mb->mb_cw->codec_ctx->sample_rate; if(frame->sample_rate == 0) return; if(mp->mp_stats) mp_set_mq_meta(mq, mb->mb_cw->codec, mb->mb_cw->codec_ctx); } if(offset == 0 && mb->mb_pts != AV_NOPTS_VALUE) { int od = 0, id = 0; if(ad->ad_avr != NULL) { od = avresample_available(ad->ad_avr) * 1000000LL / ad->ad_out_sample_rate; id = avresample_get_delay(ad->ad_avr) * 1000000LL / frame->sample_rate; } ad->ad_pts = mb->mb_pts - od - id; ad->ad_epoch = mb->mb_epoch; // printf("od=%-20d id=%-20d PTS=%-20ld oPTS=%-20ld\n", // od, id, mb->mb_pts, pts); if(mb->mb_drive_clock) mp_set_current_time(mp, mb->mb_pts - ad->ad_delay, mb->mb_epoch, mb->mb_delta); } offset += r; if(got_frame) { if(frame->sample_rate != ad->ad_in_sample_rate || frame->format != ad->ad_in_sample_format || frame->channel_layout != ad->ad_in_channel_layout) { ad->ad_in_sample_rate = frame->sample_rate; ad->ad_in_sample_format = frame->format; ad->ad_in_channel_layout = frame->channel_layout; ac->ac_reconfig(ad); if(ad->ad_avr == NULL) ad->ad_avr = avresample_alloc_context(); else avresample_close(ad->ad_avr); av_opt_set_int(ad->ad_avr, "in_sample_fmt", ad->ad_in_sample_format, 0); av_opt_set_int(ad->ad_avr, "in_sample_rate", ad->ad_in_sample_rate, 0); av_opt_set_int(ad->ad_avr, "in_channel_layout", ad->ad_in_channel_layout, 0); av_opt_set_int(ad->ad_avr, "out_sample_fmt", ad->ad_out_sample_format, 0); av_opt_set_int(ad->ad_avr, "out_sample_rate", ad->ad_out_sample_rate, 0); av_opt_set_int(ad->ad_avr, "out_channel_layout", ad->ad_out_channel_layout, 0); char buf1[128]; char buf2[128]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, ad->ad_in_channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, ad->ad_out_channel_layout); TRACE(TRACE_DEBUG, "Audio", "Converting from [%s %dHz %s] to [%s %dHz %s]", buf1, ad->ad_in_sample_rate, av_get_sample_fmt_name(ad->ad_in_sample_format), buf2, ad->ad_out_sample_rate, av_get_sample_fmt_name(ad->ad_out_sample_format)); if(avresample_open(ad->ad_avr)) { TRACE(TRACE_ERROR, "AudioQueue", "Unable to open resampler"); avresample_free(&ad->ad_avr); } } if(ad->ad_avr != NULL) avresample_convert(ad->ad_avr, NULL, 0, 0, frame->data, frame->linesize[0], frame->nb_samples); } } }
static void ad_decode_buf(audio_decoder_t *ad, media_pipe_t *mp, media_queue_t *mq, media_buf_t *mb) { audio_mode_t *am = audio_mode_current; uint8_t *buf; int size, r, data_size, channels, rate, frames, delay, i; media_codec_t *cw = mb->mb_cw; AVCodecContext *ctx; int64_t pts; if(cw == NULL) { /* Raw native endian PCM */ if(ad->ad_do_flush) { ad->ad_do_flush = 0; if(mp_is_primary(mp)) ad->ad_send_flush = 1; } else if(mb->mb_time != AV_NOPTS_VALUE) mp_set_current_time(mp, mb->mb_time); if(mb->mb_send_pts && mb->mb_pts != AV_NOPTS_VALUE) { event_ts_t *ets = event_create(EVENT_CURRENT_PTS, sizeof(event_ts_t)); ets->ts = mb->mb_pts; mp_enqueue_event(mp, &ets->h); event_release(&ets->h); } frames = mb->mb_size / sizeof(int16_t) / mb->mb_channels; if(mp_is_primary(mp)) { /* Must copy if auto pipeline does multichannel upmixing */ memcpy(ad->ad_outbuf, mb->mb_data, mb->mb_size); audio_mix1(ad, am, mb->mb_channels, mb->mb_rate, 0, ad->ad_outbuf, frames, mb->mb_pts, mb->mb_epoch, mp); } else { /* We are just suppoed to be silent, emulate some kind of delay, this is not accurate, so we also set the clock epoch to zero to avoid AV sync */ mp->mp_audio_clock_epoch = 0; delay = (int64_t)frames * 1000000LL / mb->mb_rate; usleep(delay); /* XXX: Must be better */ /* Flush any packets in the pause pending queue */ audio_fifo_clear_queue(&ad->ad_hold_queue); } return; } ctx = cw->codec_ctx; if(mp_is_primary(mp)) { switch(ctx->codec_id) { case CODEC_ID_AC3: if(am->am_formats & AM_FORMAT_AC3) { audio_deliver_passthru(mb, ad, AM_FORMAT_AC3, mp); return; } break; case CODEC_ID_DTS: if(am->am_formats & AM_FORMAT_DTS) { audio_deliver_passthru(mb, ad, AM_FORMAT_DTS, mp); return; } break; default: break; } } buf = mb->mb_data; size = mb->mb_size; pts = mb->mb_pts; while(size > 0) { if(ad->ad_do_flush) { avcodec_flush_buffers(cw->codec_ctx); ad->ad_do_flush = 0; if(mp_is_primary(mp)) ad->ad_send_flush = 1; } else if(mb->mb_time != AV_NOPTS_VALUE) mp_set_current_time(mp, mb->mb_time); if(mb->mb_send_pts && mb->mb_pts != AV_NOPTS_VALUE) { event_ts_t *ets = event_create(EVENT_CURRENT_PTS, sizeof(event_ts_t)); ets->ts = pts; mp_enqueue_event(mp, &ets->h); event_release(&ets->h); } if(audio_mode_stereo_only(am) && cw->codec_id != CODEC_ID_TRUEHD && cw->codec_id != CODEC_ID_MLP) ctx->request_channels = 2; /* We can only output stereo. Ask codecs to do downmixing for us. */ else ctx->request_channels = 0; data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = buf; avpkt.size = size; if(am->am_float) ctx->request_sample_fmt = AV_SAMPLE_FMT_FLT; r = avcodec_decode_audio3(ctx, ad->ad_outbuf, &data_size, &avpkt); if(r < 0) break; if(mp->mp_stats) mp_set_mq_meta(mq, cw->codec, cw->codec_ctx); channels = ctx->channels; rate = ctx->sample_rate; /* Convert to signed 16bit */ if(data_size > 0) { frames = data_size / sample_fmt_to_size[ctx->sample_fmt]; if(!mp_is_primary(mp)) { mp->mp_audio_clock_epoch = 0; delay = (int64_t)(frames / channels) * 1000000LL / rate; usleep(delay); /* XXX: Must be better */ /* Flush any packets in the pause pending queue */ audio_fifo_clear_queue(&ad->ad_hold_queue); } else { /* We are the primary audio decoder == we may play, forward to the mixer stages */ /* But first, if we have any pending packets (due to a previous pause), release them */ audio_fifo_reinsert(thefifo, &ad->ad_hold_queue); if(ctx->sample_fmt == SAMPLE_FMT_FLT && am->am_float && (am->am_sample_rates & AM_SR_ANY || audio_rateflag_from_rate(rate) & am->am_sample_rates) && channels_to_format(channels) & am->am_formats) { frames /= channels; audio_deliver(ad, am, ad->ad_outbuf, channels, frames, rate, pts, mb->mb_epoch, mp, 1); } else { switch(ctx->sample_fmt) { default: return; case SAMPLE_FMT_U8: for(i = frames - 1; i >= 0; i--) ad->ad_outbuf[i] = (((uint8_t *)ad->ad_outbuf)[i] - 0x80) << 8; break; case SAMPLE_FMT_S16: break; case SAMPLE_FMT_S32: for(i = 0; i < frames; i++) ad->ad_outbuf[i] = ((int32_t *)ad->ad_outbuf)[i] >> 16; break; case SAMPLE_FMT_FLT: for(i = 0; i < frames; i++) ad->ad_outbuf[i] = rintf(((float *)ad->ad_outbuf)[i] * 32768); break; case SAMPLE_FMT_DBL: for(i = 0; i < frames; i++) ad->ad_outbuf[i] = rint(((double *)ad->ad_outbuf)[i] * 32768); break; } frames /= channels; audio_mix1(ad, am, channels, rate, ctx->channel_layout, ad->ad_outbuf, frames, pts, mb->mb_epoch, mp); } } } pts = AV_NOPTS_VALUE; buf += r; size -= r; }
static void * ad_thread(void *aux) { audio_decoder_t *ad = aux; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; media_buf_t *mb; int hold = 0; int run = 1; int64_t silence_start_pts = AV_NOPTS_VALUE; uint64_t silence_start_realtime = 0; hts_mutex_lock(&mp->mp_mutex); while(run) { if((mb = TAILQ_FIRST(&mq->mq_q)) == NULL) { hts_cond_wait(&mq->mq_avail, &mp->mp_mutex); continue; } if(mb->mb_data_type == MB_AUDIO && hold && mb->mb_skip == 0) { hts_cond_wait(&mq->mq_avail, &mp->mp_mutex); continue; } TAILQ_REMOVE(&mq->mq_q, mb, mb_link); mq->mq_packets_current--; mp->mp_buffer_current -= mb->mb_size; mq_update_stats(mp, mq); hts_cond_signal(&mp->mp_backpressure); hts_mutex_unlock(&mp->mp_mutex); switch(mb->mb_data_type) { case MB_CTRL_EXIT: run = 0; break; case MB_CTRL_PAUSE: /* Copy back any pending audio in the output fifo */ audio_fifo_purge(thefifo, ad, &ad->ad_hold_queue); hold = 1; break; case MB_CTRL_PLAY: hold = 0; break; case MB_FLUSH: ad->ad_do_flush = 1; /* Flush any pending audio in the output fifo */ audio_fifo_purge(thefifo, ad, NULL); audio_decoder_flush(ad); break; case MB_AUDIO: if(mb->mb_skip != 0) break; if(mq->mq_stream == -1) { if(mb->mb_pts == AV_NOPTS_VALUE) break; if(silence_start_pts == AV_NOPTS_VALUE) { silence_start_pts = mb->mb_pts; silence_start_realtime = showtime_get_ts(); } else { int64_t d = mb->mb_pts - silence_start_pts; if(d > 0) { int64_t sleeptime = silence_start_realtime + d - showtime_get_ts(); if(sleeptime > 0) usleep(sleeptime); } } break; } if(mb->mb_stream != mq->mq_stream) break; ad_decode_buf(ad, mp, mq, mb); silence_start_pts = AV_NOPTS_VALUE; break; case MB_END: mp_set_current_time(mp, AV_NOPTS_VALUE); break; default: abort(); } hts_mutex_lock(&mp->mp_mutex); media_buf_free_locked(mp, mb); } hts_mutex_unlock(&mp->mp_mutex); audio_fifo_purge(thefifo, ad, NULL); return NULL; }
static void audio_process_audio(audio_decoder_t *ad, media_buf_t *mb) { const audio_class_t *ac = ad->ad_ac; AVFrame *frame = ad->ad_frame; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; int r; int got_frame; AVPacket avpkt; int offset = 0; if(mb->mb_skip || mb->mb_stream != mq->mq_stream) return; while(offset < mb->mb_size) { if(mb->mb_cw == NULL) { frame->sample_rate = mb->mb_rate; frame->format = AV_SAMPLE_FMT_S16; switch(mb->mb_channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; frame->nb_samples = mb->mb_size / 2; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; frame->nb_samples = mb->mb_size / 4; break; default: abort(); } frame->data[0] = mb->mb_data; frame->linesize[0] = 0; r = mb->mb_size; got_frame = 1; } else { media_codec_t *mc = mb->mb_cw; AVCodecContext *ctx = mc->ctx; if(mc->codec_id != ad->ad_in_codec_id) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); TRACE(TRACE_DEBUG, "audio", "Codec changed to %s", codec ? codec->name : "???"); ad->ad_in_codec_id = mc->codec_id; audio_cleanup_spdif_muxer(ad); if(ac->ac_check_passthru != NULL && codec != NULL && ac->ac_check_passthru(ad, mc->codec_id)) { audio_setup_spdif_muxer(ad, codec, mq); } } av_init_packet(&avpkt); avpkt.data = mb->mb_data + offset; avpkt.size = mb->mb_size - offset; if(ad->ad_spdif_muxer != NULL) { av_write_frame(ad->ad_spdif_muxer, &avpkt); avio_flush(ad->ad_spdif_muxer->pb); ad->ad_pts = mb->mb_pts; ad->ad_epoch = mb->mb_epoch; return; } if(ctx == NULL) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); assert(codec != NULL); // Checked in libav.c ctx = mc->ctx = avcodec_alloc_context3(codec); if(ad->ad_stereo_downmix) ctx->request_channels = 2; if(avcodec_open2(mc->ctx, codec, NULL) < 0) { av_freep(&mc->ctx); return; } } r = avcodec_decode_audio4(ctx, frame, &got_frame, &avpkt); if(r < 0) return; if(frame->sample_rate == 0) { frame->sample_rate = ctx->sample_rate; if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx) frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate; if(frame->sample_rate == 0) return; } if(frame->channel_layout == 0) { switch(ctx->channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; break; default: return; } } if(mp->mp_stats) mp_set_mq_meta(mq, ctx->codec, ctx); } if(offset == 0 && mb->mb_pts != AV_NOPTS_VALUE) { int od = 0, id = 0; if(ad->ad_avr != NULL) { od = avresample_available(ad->ad_avr) * 1000000LL / ad->ad_out_sample_rate; id = avresample_get_delay(ad->ad_avr) * 1000000LL / frame->sample_rate; } ad->ad_pts = mb->mb_pts - od - id; ad->ad_epoch = mb->mb_epoch; // printf("od=%-20d id=%-20d PTS=%-20ld oPTS=%-20ld\n", // od, id, mb->mb_pts, pts); if(mb->mb_drive_clock) mp_set_current_time(mp, mb->mb_pts - ad->ad_delay, mb->mb_epoch, mb->mb_delta); } offset += r; if(got_frame) { if(frame->sample_rate != ad->ad_in_sample_rate || frame->format != ad->ad_in_sample_format || frame->channel_layout != ad->ad_in_channel_layout) { ad->ad_in_sample_rate = frame->sample_rate; ad->ad_in_sample_format = frame->format; ad->ad_in_channel_layout = frame->channel_layout; ac->ac_reconfig(ad); if(ad->ad_avr == NULL) ad->ad_avr = avresample_alloc_context(); else avresample_close(ad->ad_avr); av_opt_set_int(ad->ad_avr, "in_sample_fmt", ad->ad_in_sample_format, 0); av_opt_set_int(ad->ad_avr, "in_sample_rate", ad->ad_in_sample_rate, 0); av_opt_set_int(ad->ad_avr, "in_channel_layout", ad->ad_in_channel_layout, 0); av_opt_set_int(ad->ad_avr, "out_sample_fmt", ad->ad_out_sample_format, 0); av_opt_set_int(ad->ad_avr, "out_sample_rate", ad->ad_out_sample_rate, 0); av_opt_set_int(ad->ad_avr, "out_channel_layout", ad->ad_out_channel_layout, 0); char buf1[128]; char buf2[128]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, ad->ad_in_channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, ad->ad_out_channel_layout); TRACE(TRACE_DEBUG, "Audio", "Converting from [%s %dHz %s] to [%s %dHz %s]", buf1, ad->ad_in_sample_rate, av_get_sample_fmt_name(ad->ad_in_sample_format), buf2, ad->ad_out_sample_rate, av_get_sample_fmt_name(ad->ad_out_sample_format)); if(avresample_open(ad->ad_avr)) { TRACE(TRACE_ERROR, "AudioQueue", "Unable to open resampler"); avresample_free(&ad->ad_avr); } if(ac->ac_set_volume != NULL) { prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1); ac->ac_set_volume(ad, ad->ad_vol_scale); } } if(ad->ad_avr != NULL) avresample_convert(ad->ad_avr, NULL, 0, 0, frame->data, frame->linesize[0], frame->nb_samples); } } }