static void uninit(struct af_instance *af) { struct af_resample *s = af->priv; if (s->avrctx) avresample_close(s->avrctx); avresample_free(&s->avrctx); if (s->avrctx_out) avresample_close(s->avrctx_out); avresample_free(&s->avrctx_out); }
/* * Resamples a frame if needed and pushes the audio data along * with a timestamp onto the framelist. */ static void resampleframe(AVFrame *avframe) { AVAudioResampleContext *avr; struct frame *frame; uint8_t *output; size_t outputlen; int linesize, nr, samples; if (avframe->format != out_sample_fmt || avframe->channel_layout != out_channel_layout || avframe->sample_rate != out_sample_rate) { avr = avresample_alloc_context(); if (avr == NULL) { errx(1, "(%s:%d) avresample_alloc_context", __FILE__, __LINE__); } av_opt_set_int(avr, "in_channel_layout", avframe->channel_layout, 0); av_opt_set_int(avr, "out_channel_layout", out_channel_layout, 0); av_opt_set_int(avr, "in_sample_rate", avframe->sample_rate, 0); av_opt_set_int(avr, "out_sample_rate", out_sample_rate, 0); av_opt_set_int(avr, "in_sample_fmt", avframe->format, 0); av_opt_set_int(avr, "out_sample_fmt", out_sample_fmt, 0); nr = avresample_open(avr); if (nr < 0) { avresample_free(&avr); return; } outputlen = av_samples_get_buffer_size(&linesize, out_channels, avframe->nb_samples, out_sample_fmt, 0); output = xmalloc(outputlen); samples = avresample_convert(avr, &output, linesize, avframe->nb_samples, avframe->data, avframe->linesize[0], avframe->nb_samples); outputlen = samples * out_channels * av_get_bytes_per_sample(out_sample_fmt); avresample_close(avr); avresample_free(&avr); } else { outputlen = av_samples_get_buffer_size(NULL, avfmt->streams[sti]->codec->channels, avframe->nb_samples, avframe->format, 1); output = xmalloc(outputlen); memcpy(output, avframe->data[0], outputlen); } frame = xmalloc(sizeof(struct frame)); frame->data = output; frame->size = outputlen; frame->pts = avframe->pkt_pts; flpush(frame); }
void dc_audio_decoder_close(AudioInputFile *audio_input_file) { /* * Close the audio format context */ avformat_close_input(&audio_input_file->av_fmt_ctx); if (audio_input_file->av_pkt_list_mutex) { gf_mx_p(audio_input_file->av_pkt_list_mutex); while (gf_list_count(audio_input_file->av_pkt_list)) { AVPacket *pkt = gf_list_last(audio_input_file->av_pkt_list); av_free_packet(pkt); gf_list_rem_last(audio_input_file->av_pkt_list); } gf_list_del(audio_input_file->av_pkt_list); gf_mx_v(audio_input_file->av_pkt_list_mutex); gf_mx_del(audio_input_file->av_pkt_list_mutex); } av_fifo_free(audio_input_file->fifo); #ifdef DC_AUDIO_RESAMPLER avresample_free(&audio_input_file->aresampler); #endif }
void AudioLoader::closeAudioFile() { if (!_demuxCtx) { return; } #if HAVE_AVRESAMPLE if (_convertCtxAv) { avresample_close(_convertCtxAv); avresample_free(&_convertCtxAv); } #elif HAVE_SWRESAMPLE if (_convertCtx) { swr_free(&_convertCtx); } #endif // Close the codec avcodec_close(_audioCtx); // Close the audio file avformat_close_input(&_demuxCtx); // free AVPacket av_free_packet(&_packet); _demuxCtx = 0; _audioCtx = 0; }
static void uninit(sh_audio_t *sh) { sh->codecname = NULL; struct priv *ctx = sh->context; if (!ctx) return; AVCodecContext *lavc_context = ctx->avctx; if (lavc_context) { if (avcodec_close(lavc_context) < 0) mp_tmsg(MSGT_DECVIDEO, MSGL_ERR, "Could not close codec.\n"); av_freep(&lavc_context->extradata); av_freep(&lavc_context); } #ifdef CONFIG_LIBAVRESAMPLE avresample_free(&ctx->avr); #endif #if LIBAVCODEC_VERSION_INT >= (54 << 16 | 28 << 8) avcodec_free_frame(&ctx->avframe); #else av_free(ctx->avframe); #endif talloc_free(ctx); sh->context = NULL; }
static void close_card( decklink_opts_t *decklink_opts ) { decklink_ctx_t *decklink_ctx = &decklink_opts->decklink_ctx; if( decklink_ctx->p_config ) decklink_ctx->p_config->Release(); if( decklink_ctx->p_input ) { decklink_ctx->p_input->StopStreams(); decklink_ctx->p_input->Release(); } if( decklink_ctx->p_card ) decklink_ctx->p_card->Release(); if( decklink_ctx->p_delegate ) decklink_ctx->p_delegate->Release(); if( decklink_ctx->codec ) { avcodec_close( decklink_ctx->codec ); av_free( decklink_ctx->codec ); } if( IS_SD( decklink_opts->video_format ) ) vbi_raw_decoder_destroy( &decklink_ctx->non_display_parser.vbi_decoder ); if( decklink_ctx->avr ) avresample_free( &decklink_ctx->avr ); }
static void ffmpeg_close_context(FREERDP_DSP_CONTEXT* context) { if (context) { if (context->context) avcodec_free_context(&context->context); if (context->frame) av_frame_free(&context->frame); if (context->resampled) av_frame_free(&context->resampled); if (context->buffered) av_frame_free(&context->buffered); if (context->packet) av_packet_free(&context->packet); if (context->rcontext) avresample_free(&context->rcontext); context->id = AV_CODEC_ID_NONE; context->codec = NULL; context->isOpen = FALSE; context->context = NULL; context->frame = NULL; context->resampled = NULL; context->packet = NULL; context->rcontext = NULL; } }
static av_cold int opus_decode_close(AVCodecContext *avctx) { OpusContext *c = avctx->priv_data; int i; for (i = 0; i < c->nb_streams; i++) { OpusStreamContext *s = &c->streams[i]; ff_silk_free(&s->silk); ff_celt_free(&s->celt); av_freep(&s->out_dummy); s->out_dummy_allocated_size = 0; av_audio_fifo_free(s->celt_delay); avresample_free(&s->avr); } av_freep(&c->streams); c->nb_streams = 0; av_freep(&c->channel_maps); return 0; }
static void close_stream(AVFormatContext *oc, OutputStream *ost) { avcodec_free_context(&ost->enc); av_frame_free(&ost->frame); av_frame_free(&ost->tmp_frame); sws_freeContext(ost->sws_ctx); avresample_free(&ost->avr); }
static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0]; ResampleContext *s = ctx->priv; char buf1[64], buf2[64]; int ret; if (s->avr) { avresample_close(s->avr); avresample_free(&s->avr); } if (inlink->channel_layout == outlink->channel_layout && inlink->sample_rate == outlink->sample_rate && (inlink->format == outlink->format || (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 && av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 && av_get_planar_sample_fmt(inlink->format) == av_get_planar_sample_fmt(outlink->format)))) return 0; if (!(s->avr = avresample_alloc_context())) return AVERROR(ENOMEM); if (s->options) { AVDictionaryEntry *e = NULL; while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX))) av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value); av_opt_set_dict(s->avr, &s->options); } av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0); av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0); av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0); av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0); av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0); av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0); if ((ret = avresample_open(s->avr)) < 0) return ret; outlink->time_base = (AVRational){ 1, outlink->sample_rate }; s->next_pts = AV_NOPTS_VALUE; s->next_in_pts = AV_NOPTS_VALUE; av_get_channel_layout_string(buf1, sizeof(buf1), -1, inlink ->channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, outlink->channel_layout); av_log(ctx, AV_LOG_VERBOSE, "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n", av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1, av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2); return 0; }
static av_cold void uninit(AVFilterContext *ctx) { ResampleContext *s = ctx->priv; if (s->avr) { avresample_close(s->avr); avresample_free(&s->avr); } }
void hb_audio_resample_free(hb_audio_resample_t *resample) { if (resample != NULL) { if (resample->avresample != NULL) { avresample_free(&resample->avresample); } free(resample); } }
AudioResamplerFfmpeg::~AudioResamplerFfmpeg() { if (_context) { #ifdef HAVE_SWRESAMPLE_H swr_free(&_context); #elif HAVE_AVRESAMPLE_H avresample_close(_context); avresample_free(&_context); #else audio_resample_close(_context); #endif } }
AudioDecoderThread::~AudioDecoderThread() { if (m_pResampleContext) { #ifdef LIBAVRESAMPLE_VERSION avresample_close(m_pResampleContext); avresample_free(&m_pResampleContext); #else audio_resample_close(m_pResampleContext); #endif m_pResampleContext = 0; } }
AudioConverter::~AudioConverter() { TRACE(); if (m_decoder) avcodec_close(m_decoder); if (m_encoder) avcodec_close(m_encoder); if (m_audioFrame) av_free(m_audioFrame); if (m_resampler) avresample_free(&m_resampler); }
static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0]; ResampleContext *s = ctx->priv; char buf1[64], buf2[64]; int ret; if (s->avr) { avresample_close(s->avr); avresample_free(&s->avr); } if (inlink->channel_layout == outlink->channel_layout && inlink->sample_rate == outlink->sample_rate && inlink->format == outlink->format) return 0; if (!(s->avr = avresample_alloc_context())) return AVERROR(ENOMEM); av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0); av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0); av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0); av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0); av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0); av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0); /* if both the input and output formats are s16 or u8, use s16 as the internal sample format */ if (av_get_bytes_per_sample(inlink->format) <= 2 && av_get_bytes_per_sample(outlink->format) <= 2) av_opt_set_int(s->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0); if ((ret = avresample_open(s->avr)) < 0) return ret; outlink->time_base = (AVRational){ 1, outlink->sample_rate }; s->next_pts = AV_NOPTS_VALUE; av_get_channel_layout_string(buf1, sizeof(buf1), -1, inlink ->channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, outlink->channel_layout); av_log(ctx, AV_LOG_VERBOSE, "fmt:%s srate: %d cl:%s -> fmt:%s srate: %d cl:%s\n", av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1, av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2); return 0; }
void audio_decoder_destroy(struct audio_decoder *ad) { mp_send_cmd(ad->ad_mp, &ad->ad_mp->mp_audio, MB_CTRL_EXIT); hts_thread_join(&ad->ad_tid); avcodec_free_frame(&ad->ad_frame); if(ad->ad_avr != NULL) { avresample_close(ad->ad_avr); avresample_free(&ad->ad_avr); } free(ad); }
void audio_decoder_destroy(struct audio_decoder *ad) { mp_send_cmd(ad->ad_mp, &ad->ad_mp->mp_audio, MB_CTRL_EXIT); hts_thread_join(&ad->ad_tid); mq_flush(ad->ad_mp, &ad->ad_mp->mp_audio, 1); av_frame_free(&ad->ad_frame); if(ad->ad_avr != NULL) { avresample_close(ad->ad_avr); avresample_free(&ad->ad_avr); } audio_cleanup_spdif_muxer(ad); free(ad); }
void free_file(file_t* file) { if(file->path != NULL) free(file->path); if(file->resample_context != NULL) avresample_free(&(file->resample_context)); if(file->codec_context != NULL) avcodec_close(file->codec_context); if(file->format_context != NULL) avformat_close_input(&(file->format_context)); free(file); }
/** * Initialize the audio resampler based on the input and output codec settings. * If the input and output sample formats differ, a conversion is required * libavresample takes care of this, but requires initialization. */ static int init_resampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context, AVAudioResampleContext **resample_context) { /** * Only initialize the resampler if it is necessary, i.e., * if and only if the sample formats differ. */ if (input_codec_context->sample_fmt != output_codec_context->sample_fmt || input_codec_context->channels != output_codec_context->channels) { int error; /** Create a resampler context for the conversion. */ if (!(*resample_context = avresample_alloc_context())) { fprintf(stderr, "Could not allocate resample context\n"); return AVERROR(ENOMEM); } /** * Set the conversion parameters. * Default channel layouts based on the number of channels * are assumed for simplicity (they are sometimes not detected * properly by the demuxer and/or decoder). */ av_opt_set_int(*resample_context, "in_channel_layout", av_get_default_channel_layout(input_codec_context->channels), 0); av_opt_set_int(*resample_context, "out_channel_layout", av_get_default_channel_layout(output_codec_context->channels), 0); av_opt_set_int(*resample_context, "in_sample_rate", input_codec_context->sample_rate, 0); av_opt_set_int(*resample_context, "out_sample_rate", output_codec_context->sample_rate, 0); av_opt_set_int(*resample_context, "in_sample_fmt", input_codec_context->sample_fmt, 0); av_opt_set_int(*resample_context, "out_sample_fmt", output_codec_context->sample_fmt, 0); /** Open the resampler with the specified parameters. */ if ((error = avresample_open(*resample_context)) < 0) { fprintf(stderr, "Could not open resample context\n"); avresample_free(resample_context); return error; } } return 0; }
void BE_ST_ShutdownAudio(void) { if (g_sdlAudioSubsystemUp) { if ((g_sdlAudioSpec.callback == BEL_ST_Resampling_EmuCallBack) || (g_sdlAudioSpec.callback == BEL_ST_Resampling_DigiCallBack)) { #ifndef REFKEEN_RESAMPLER_NONE if (g_refKeenCfg.useResampler) { #if (defined REFKEEN_RESAMPLER_LIBSWRESAMPLE) swr_free(&g_sdlSwrContext); #elif (defined REFKEEN_RESAMPLER_LIBAVRESAMPLE) avresample_free(&g_sdlAvAudioResampleContext); #elif (defined REFKEEN_RESAMPLER_LIBAVCODEC) av_resample_close(g_sdlAvResampleContext); #elif (defined REFKEEN_RESAMPLER_LIBRESAMPLE) resample_close(g_sdlResampleHandle); #elif (defined REFKEEN_RESAMPLER_LIBSOXR) soxr_delete(g_sdlSoxr); #elif (defined REFKEEN_RESAMPLER_LIBSPEEXDSP) speex_resampler_destroy(g_sdlSpeexResamplerState); #elif (defined REFKEEN_RESAMPLER_LIBSAMPLERATE) src_delete(g_sdlSrcResampler); #endif } else #endif // REFKEEN_RESAMPLER_NONE { free(g_sdlSampleRateConvTable); } } #ifdef REFKEEN_CONFIG_THREADS SDL_DestroyMutex(g_sdlCallbackMutex); g_sdlCallbackMutex = NULL; #endif SDL_CloseAudio(); SDL_QuitSubSystem(SDL_INIT_AUDIO); g_sdlAudioSubsystemUp = false; } g_sdlCallbackSDFuncPtr = 0; // Just in case this may be called after the audio subsystem was never really started (manual calls to callback) }
static av_cold int opus_decode_close(AVCodecContext *avctx) { OpusContext *c = avctx->priv_data; int i; for (i = 0; i < c->nb_streams; i++) { OpusStreamContext *s = &c->streams[i]; ff_silk_free(&s->silk); ff_celt_free(&s->celt); av_freep(&s->out_dummy); s->out_dummy_allocated_size = 0; av_audio_fifo_free(s->celt_delay); #if CONFIG_SWRESAMPLE swr_free(&s->swr); #elif CONFIG_AVRESAMPLE avresample_free(&s->avr); #endif } av_freep(&c->streams); if (c->sync_buffers) { for (i = 0; i < c->nb_streams; i++) av_audio_fifo_free(c->sync_buffers[i]); } av_freep(&c->sync_buffers); av_freep(&c->decoded_samples); av_freep(&c->out); av_freep(&c->out_size); c->nb_streams = 0; av_freep(&c->channel_maps); av_freep(&c->fdsp); return 0; }
static void encavcodecaClose(hb_work_object_t * w) { hb_work_private_t * pv = w->private_data; if (pv != NULL) { if (pv->context != NULL) { Finalize(w); hb_deep_log(2, "encavcodeca: closing libavcodec"); if (pv->context->codec != NULL) avcodec_flush_buffers(pv->context); hb_avcodec_close(pv->context); av_free( pv->context ); } if (pv->output_buf != NULL) { free(pv->output_buf); } if (pv->input_buf != NULL && pv->input_buf != pv->output_buf) { free(pv->input_buf); } pv->output_buf = pv->input_buf = NULL; if (pv->list != NULL) { hb_list_empty(&pv->list); } if (pv->avresample != NULL) { avresample_free(&pv->avresample); } free(pv); w->private_data = NULL; } }
void dc_audio_encoder_close(AudioOutputFile *audio_output_file) { // int i; // // /* free the streams */ // for (i = 0; i < audio_output_file->av_fmt_ctx->nb_streams; i++) { // avcodec_close(audio_output_file->av_fmt_ctx->streams[i]->codec); // av_freep(&audio_output_file->av_fmt_ctx->streams[i]->info); // } av_fifo_free(audio_output_file->fifo); av_free(audio_output_file->adata_buf); av_free(audio_output_file->aframe); avcodec_close(audio_output_file->codec_ctx); av_free(audio_output_file->codec_ctx); #ifdef DC_AUDIO_RESAMPLER avresample_free(&audio_output_file->aresampler); #endif }
void AudioLoader::closeAudioFile() { if (!_demuxCtx) { return; } if (_convertCtxAv) { avresample_close(_convertCtxAv); avresample_free(&_convertCtxAv); } // Close the codec if (!_audioCtx) avcodec_close(_audioCtx); // Close the audio file if (!_demuxCtx) avformat_close_input(&_demuxCtx); // free AVPacket // TODO: use a variable for whether _packet is initialized or not av_free_packet(&_packet); _demuxCtx = 0; _audioCtx = 0; _streams.clear(); }
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job) { AVCodec *codec; AVCodecContext *context; hb_audio_t *audio = w->audio; hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t)); w->private_data = pv; pv->job = job; pv->list = hb_list_init(); // channel count, layout and matrix encoding int matrix_encoding; uint64_t channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, &matrix_encoding); pv->out_discrete_channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); // default settings and options AVDictionary *av_opts = NULL; const char *codec_name = NULL; enum AVCodecID codec_id = AV_CODEC_ID_NONE; enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP; int bits_per_raw_sample = 0; int profile = FF_PROFILE_UNKNOWN; // override with encoder-specific values switch (audio->config.out.codec) { case HB_ACODEC_AC3: codec_id = AV_CODEC_ID_AC3; if (matrix_encoding != AV_MATRIX_ENCODING_NONE) av_dict_set(&av_opts, "dsur_mode", "on", 0); break; case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: codec_name = "libfdk_aac"; sample_fmt = AV_SAMPLE_FMT_S16; bits_per_raw_sample = 16; switch (audio->config.out.codec) { case HB_ACODEC_FDK_HAAC: profile = FF_PROFILE_AAC_HE; break; default: profile = FF_PROFILE_AAC_LOW; break; } // Libav's libfdk-aac wrapper expects back channels for 5.1 // audio, and will error out unless we translate the layout if (channel_layout == AV_CH_LAYOUT_5POINT1) channel_layout = AV_CH_LAYOUT_5POINT1_BACK; break; case HB_ACODEC_FFAAC: codec_name = "aac"; av_dict_set(&av_opts, "stereo_mode", "ms_off", 0); break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: codec_id = AV_CODEC_ID_FLAC; switch (audio->config.out.codec) { case HB_ACODEC_FFFLAC24: sample_fmt = AV_SAMPLE_FMT_S32; bits_per_raw_sample = 24; break; default: sample_fmt = AV_SAMPLE_FMT_S16; bits_per_raw_sample = 16; break; } break; default: hb_error("encavcodecaInit: unsupported codec (0x%x)", audio->config.out.codec); return 1; } if (codec_name != NULL) { codec = avcodec_find_encoder_by_name(codec_name); if (codec == NULL) { hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed", codec_name); return 1; } } else { codec = avcodec_find_encoder(codec_id); if (codec == NULL) { hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed", codec_id); return 1; } } // allocate the context and apply the settings context = avcodec_alloc_context3(codec); hb_ff_set_sample_fmt(context, codec, sample_fmt); context->bits_per_raw_sample = bits_per_raw_sample; context->profile = profile; context->channel_layout = channel_layout; context->channels = pv->out_discrete_channels; context->sample_rate = audio->config.out.samplerate; if (audio->config.out.bitrate > 0) { context->bit_rate = audio->config.out.bitrate * 1000; } else if (audio->config.out.quality >= 0) { context->global_quality = audio->config.out.quality * FF_QP2LAMBDA; context->flags |= CODEC_FLAG_QSCALE; } if (audio->config.out.compression_level >= 0) { context->compression_level = audio->config.out.compression_level; } // For some codecs, libav requires the following flag to be set // so that it fills extradata with global header information. // If this flag is not set, it inserts the data into each // packet instead. context->flags |= CODEC_FLAG_GLOBAL_HEADER; if (hb_avcodec_open(context, codec, &av_opts, 0)) { hb_error("encavcodecaInit: hb_avcodec_open() failed"); return 1; } // avcodec_open populates the opts dictionary with the // things it didn't recognize. AVDictionaryEntry *t = NULL; while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX))) { hb_log("encavcodecaInit: Unknown avcodec option %s", t->key); } av_dict_free(&av_opts); pv->context = context; audio->config.out.samples_per_frame = pv->samples_per_frame = context->frame_size; pv->input_samples = context->frame_size * context->channels; pv->input_buf = malloc(pv->input_samples * sizeof(float)); pv->max_output_bytes = (pv->input_samples * av_get_bytes_per_sample(context->sample_fmt)); // sample_fmt conversion if (context->sample_fmt != AV_SAMPLE_FMT_FLT) { pv->output_buf = malloc(pv->max_output_bytes); pv->avresample = avresample_alloc_context(); if (pv->avresample == NULL) { hb_error("encavcodecaInit: avresample_alloc_context() failed"); return 1; } av_opt_set_int(pv->avresample, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0); av_opt_set_int(pv->avresample, "out_sample_fmt", context->sample_fmt, 0); av_opt_set_int(pv->avresample, "in_channel_layout", context->channel_layout, 0); av_opt_set_int(pv->avresample, "out_channel_layout", context->channel_layout, 0); if (hb_audio_dither_is_supported(audio->config.out.codec)) { // dithering needs the sample rate av_opt_set_int(pv->avresample, "in_sample_rate", context->sample_rate, 0); av_opt_set_int(pv->avresample, "out_sample_rate", context->sample_rate, 0); av_opt_set_int(pv->avresample, "dither_method", audio->config.out.dither_method, 0); } if (avresample_open(pv->avresample)) { hb_error("encavcodecaInit: avresample_open() failed"); avresample_free(&pv->avresample); return 1; } } else { pv->avresample = NULL; pv->output_buf = pv->input_buf; } if (context->extradata != NULL) { memcpy(w->config->extradata.bytes, context->extradata, context->extradata_size); w->config->extradata.length = context->extradata_size; } return 0; }
static void audio_process_audio(audio_decoder_t *ad, media_buf_t *mb) { const audio_class_t *ac = ad->ad_ac; AVFrame *frame = ad->ad_frame; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; int r; int got_frame; if(mb->mb_skip || mb->mb_stream != mq->mq_stream) return; while(mb->mb_size) { if(mb->mb_cw == NULL) { frame->sample_rate = mb->mb_rate; frame->format = AV_SAMPLE_FMT_S16; switch(mb->mb_channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; frame->nb_samples = mb->mb_size / 2; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; frame->nb_samples = mb->mb_size / 4; break; default: abort(); } frame->data[0] = mb->mb_data; frame->linesize[0] = 0; r = mb->mb_size; got_frame = 1; } else { media_codec_t *mc = mb->mb_cw; AVCodecContext *ctx = mc->ctx; if(mc->codec_id != ad->ad_in_codec_id) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); TRACE(TRACE_DEBUG, "audio", "Codec changed to %s (0x%x)", codec ? codec->name : "???", mc->codec_id); ad->ad_in_codec_id = mc->codec_id; ad->ad_in_sample_rate = 0; audio_cleanup_spdif_muxer(ad); ad->ad_mode = ac->ac_get_mode != NULL ? ac->ac_get_mode(ad, mc->codec_id, ctx ? ctx->extradata : NULL, ctx ? ctx->extradata_size : 0) : AUDIO_MODE_PCM; if(ad->ad_mode == AUDIO_MODE_SPDIF) { audio_setup_spdif_muxer(ad, codec, mq); } else if(ad->ad_mode == AUDIO_MODE_CODED) { hts_mutex_lock(&mp->mp_mutex); ac->ac_deliver_coded_locked(ad, mb->mb_data, mb->mb_size, mb->mb_pts, mb->mb_epoch); hts_mutex_unlock(&mp->mp_mutex); return; } } if(ad->ad_spdif_muxer != NULL) { mb->mb_pkt.stream_index = 0; ad->ad_pts = mb->mb_pts; ad->ad_epoch = mb->mb_epoch; mb->mb_pts = AV_NOPTS_VALUE; mb->mb_dts = AV_NOPTS_VALUE; av_write_frame(ad->ad_spdif_muxer, &mb->mb_pkt); avio_flush(ad->ad_spdif_muxer->pb); return; } if(ad->ad_mode == AUDIO_MODE_CODED) { ad->ad_pts = mb->mb_pts; ad->ad_epoch = mb->mb_epoch; } if(ctx == NULL) { AVCodec *codec = avcodec_find_decoder(mc->codec_id); assert(codec != NULL); // Checked in libav.c ctx = mc->ctx = avcodec_alloc_context3(codec); if(ad->ad_stereo_downmix) ctx->request_channel_layout = AV_CH_LAYOUT_STEREO; if(avcodec_open2(mc->ctx, codec, NULL) < 0) { av_freep(&mc->ctx); return; } } r = avcodec_decode_audio4(ctx, frame, &got_frame, &mb->mb_pkt); if(r < 0) return; if(frame->sample_rate == 0) { frame->sample_rate = ctx->sample_rate; if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx) frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate; if(frame->sample_rate == 0) { if(!ad->ad_sample_rate_fail) { ad->ad_sample_rate_fail = 1; TRACE(TRACE_ERROR, "Audio", "Unable to determine sample rate"); } return; } } if(frame->channel_layout == 0) { frame->channel_layout = av_get_default_channel_layout(ctx->channels); if(frame->channel_layout == 0) { if(!ad->ad_channel_layout_fail) { ad->ad_channel_layout_fail = 1; TRACE(TRACE_ERROR, "Audio", "Unable to map %d channels to channel layout"); } return; } } if(mp->mp_stats) mp_set_mq_meta(mq, ctx->codec, ctx); } if(mb->mb_pts != PTS_UNSET) { int od = 0, id = 0; if(ad->ad_avr != NULL) { od = avresample_available(ad->ad_avr) * 1000000LL / ad->ad_out_sample_rate; id = avresample_get_delay(ad->ad_avr) * 1000000LL / frame->sample_rate; } ad->ad_pts = mb->mb_pts - od - id; ad->ad_epoch = mb->mb_epoch; if(mb->mb_drive_clock) mp_set_current_time(mp, mb->mb_pts - ad->ad_delay, mb->mb_epoch, mb->mb_delta); mb->mb_pts = PTS_UNSET; // No longer valid } mb->mb_data += r; mb->mb_size -= r; if(got_frame) { if(frame->sample_rate != ad->ad_in_sample_rate || frame->format != ad->ad_in_sample_format || frame->channel_layout != ad->ad_in_channel_layout || ad->ad_want_reconfig) { ad->ad_want_reconfig = 0; ad->ad_in_sample_rate = frame->sample_rate; ad->ad_in_sample_format = frame->format; ad->ad_in_channel_layout = frame->channel_layout; ac->ac_reconfig(ad); if(ad->ad_avr == NULL) ad->ad_avr = avresample_alloc_context(); else avresample_close(ad->ad_avr); av_opt_set_int(ad->ad_avr, "in_sample_fmt", ad->ad_in_sample_format, 0); av_opt_set_int(ad->ad_avr, "in_sample_rate", ad->ad_in_sample_rate, 0); av_opt_set_int(ad->ad_avr, "in_channel_layout", ad->ad_in_channel_layout, 0); av_opt_set_int(ad->ad_avr, "out_sample_fmt", ad->ad_out_sample_format, 0); av_opt_set_int(ad->ad_avr, "out_sample_rate", ad->ad_out_sample_rate, 0); av_opt_set_int(ad->ad_avr, "out_channel_layout", ad->ad_out_channel_layout, 0); char buf1[128]; char buf2[128]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, ad->ad_in_channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, ad->ad_out_channel_layout); TRACE(TRACE_DEBUG, "Audio", "Converting from [%s %dHz %s] to [%s %dHz %s]", buf1, ad->ad_in_sample_rate, av_get_sample_fmt_name(ad->ad_in_sample_format), buf2, ad->ad_out_sample_rate, av_get_sample_fmt_name(ad->ad_out_sample_format)); if(avresample_open(ad->ad_avr)) { TRACE(TRACE_ERROR, "Audio", "Unable to open resampler"); avresample_free(&ad->ad_avr); } prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1); if(ac->ac_set_volume != NULL) ac->ac_set_volume(ad, ad->ad_vol_scale); } if(ad->ad_avr != NULL) { avresample_convert(ad->ad_avr, NULL, 0, 0, frame->data, frame->linesize[0], frame->nb_samples); } else { int delay = 1000000LL * frame->nb_samples / frame->sample_rate; usleep(delay); } } } }
static void audio_process_audio(audio_decoder_t *ad, media_buf_t *mb) { const audio_class_t *ac = ad->ad_ac; AVFrame *frame = ad->ad_frame; media_pipe_t *mp = ad->ad_mp; media_queue_t *mq = &mp->mp_audio; int r; int got_frame; AVPacket avpkt; int offset = 0; if(mb->mb_skip || mb->mb_stream != mq->mq_stream) return; while(offset < mb->mb_size) { if(mb->mb_cw == NULL) { frame->sample_rate = mb->mb_rate; frame->format = AV_SAMPLE_FMT_S16; switch(mb->mb_channels) { case 1: frame->channel_layout = AV_CH_LAYOUT_MONO; frame->nb_samples = mb->mb_size / 2; break; case 2: frame->channel_layout = AV_CH_LAYOUT_STEREO; frame->nb_samples = mb->mb_size / 4; break; default: abort(); } frame->data[0] = mb->mb_data; frame->linesize[0] = 0; r = mb->mb_size; got_frame = 1; } else { av_init_packet(&avpkt); avpkt.data = mb->mb_data + offset; avpkt.size = mb->mb_size - offset; r = avcodec_decode_audio4(mb->mb_cw->codec_ctx, frame, &got_frame, &avpkt); if(r < 0) return; if(frame->sample_rate == 0) frame->sample_rate = mb->mb_cw->codec_ctx->sample_rate; if(frame->sample_rate == 0) return; if(mp->mp_stats) mp_set_mq_meta(mq, mb->mb_cw->codec, mb->mb_cw->codec_ctx); } if(offset == 0 && mb->mb_pts != AV_NOPTS_VALUE) { int od = 0, id = 0; if(ad->ad_avr != NULL) { od = avresample_available(ad->ad_avr) * 1000000LL / ad->ad_out_sample_rate; id = avresample_get_delay(ad->ad_avr) * 1000000LL / frame->sample_rate; } ad->ad_pts = mb->mb_pts - od - id; ad->ad_epoch = mb->mb_epoch; // printf("od=%-20d id=%-20d PTS=%-20ld oPTS=%-20ld\n", // od, id, mb->mb_pts, pts); if(mb->mb_drive_clock) mp_set_current_time(mp, mb->mb_pts - ad->ad_delay, mb->mb_epoch, mb->mb_delta); } offset += r; if(got_frame) { if(frame->sample_rate != ad->ad_in_sample_rate || frame->format != ad->ad_in_sample_format || frame->channel_layout != ad->ad_in_channel_layout) { ad->ad_in_sample_rate = frame->sample_rate; ad->ad_in_sample_format = frame->format; ad->ad_in_channel_layout = frame->channel_layout; ac->ac_reconfig(ad); if(ad->ad_avr == NULL) ad->ad_avr = avresample_alloc_context(); else avresample_close(ad->ad_avr); av_opt_set_int(ad->ad_avr, "in_sample_fmt", ad->ad_in_sample_format, 0); av_opt_set_int(ad->ad_avr, "in_sample_rate", ad->ad_in_sample_rate, 0); av_opt_set_int(ad->ad_avr, "in_channel_layout", ad->ad_in_channel_layout, 0); av_opt_set_int(ad->ad_avr, "out_sample_fmt", ad->ad_out_sample_format, 0); av_opt_set_int(ad->ad_avr, "out_sample_rate", ad->ad_out_sample_rate, 0); av_opt_set_int(ad->ad_avr, "out_channel_layout", ad->ad_out_channel_layout, 0); char buf1[128]; char buf2[128]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, ad->ad_in_channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, ad->ad_out_channel_layout); TRACE(TRACE_DEBUG, "Audio", "Converting from [%s %dHz %s] to [%s %dHz %s]", buf1, ad->ad_in_sample_rate, av_get_sample_fmt_name(ad->ad_in_sample_format), buf2, ad->ad_out_sample_rate, av_get_sample_fmt_name(ad->ad_out_sample_format)); if(avresample_open(ad->ad_avr)) { TRACE(TRACE_ERROR, "AudioQueue", "Unable to open resampler"); avresample_free(&ad->ad_avr); } } if(ad->ad_avr != NULL) avresample_convert(ad->ad_avr, NULL, 0, 0, frame->data, frame->linesize[0], frame->nb_samples); } } }
inline void resample_free( ResampleContext **ctx ) { avresample_free( ctx ); }
void THMovie::unload() { m_fAborting = true; if(m_pAudioQueue) { m_pAudioQueue->release(); } if(m_pVideoQueue) { m_pVideoQueue->release(); } m_pMoviePictureBuffer->abort(); if(m_pStreamThread) { SDL_WaitThread(m_pStreamThread, nullptr); m_pStreamThread = nullptr; } if(m_pVideoThread) { SDL_WaitThread(m_pVideoThread, nullptr); m_pVideoThread = nullptr; } //wait until after other threads are closed to clear the packet queues //so we don't free something being used. if(m_pAudioQueue) { while(m_pAudioQueue->getCount() > 0) { AVPacket* p = m_pAudioQueue->pull(false); av_packet_unref(p); } delete m_pAudioQueue; m_pAudioQueue = nullptr; } if(m_pVideoQueue) { while(m_pVideoQueue->getCount() > 0) { AVPacket* p = m_pVideoQueue->pull(false); av_packet_unref(p); } delete m_pVideoQueue; m_pVideoQueue = nullptr; } m_pMoviePictureBuffer->deallocate(); if(m_pVideoCodecContext) { avcodec_close(m_pVideoCodecContext); m_pVideoCodecContext = nullptr; } if(m_iChannel >= 0) { Mix_UnregisterAllEffects(m_iChannel); Mix_HaltChannel(m_iChannel); Mix_FreeChunk(m_pChunk); m_iChannel = -1; } SDL_LockMutex(m_pDecodingAudioMutex); if(m_iAudioBufferMaxSize > 0) { av_free(m_pbAudioBuffer); m_iAudioBufferMaxSize = 0; } if(m_pAudioCodecContext) { avcodec_close(m_pAudioCodecContext); m_pAudioCodecContext = nullptr; } av_frame_free(&m_audio_frame); #ifdef CORSIX_TH_USE_FFMPEG swr_free(&m_pAudioResampleContext); #elif defined(CORSIX_TH_USE_LIBAV) // avresample_free doesn't skip nullptr on it's own. if (m_pAudioResampleContext != nullptr) { avresample_free(&m_pAudioResampleContext); m_pAudioResampleContext = nullptr; } #endif if(m_pAudioPacket) { m_pAudioPacket->data = m_pbAudioPacketData; m_pAudioPacket->size = m_iAudioPacketSize; av_packet_unref(m_pAudioPacket); av_free(m_pAudioPacket); m_pAudioPacket = nullptr; m_pbAudioPacketData = nullptr; m_iAudioPacketSize = 0; } SDL_UnlockMutex(m_pDecodingAudioMutex); if(m_pFormatContext) { avformat_close_input(&m_pFormatContext); } }