Example #1
0
void aubio_source_avcodec_reset_resampler(aubio_source_avcodec_t * s, uint_t multi) {
  if ( (multi != s->multi) || (s->avr == NULL) ) {
    int64_t input_layout = av_get_default_channel_layout(s->input_channels);
    uint_t output_channels = multi ? s->input_channels : 1;
    int64_t output_layout = av_get_default_channel_layout(output_channels);
    if (s->avr != NULL) {
      avresample_close( s->avr );
      av_free ( s->avr );
      s->avr = NULL;
    }
    AVAudioResampleContext *avr = s->avr;
    avr = avresample_alloc_context();

    av_opt_set_int(avr, "in_channel_layout",  input_layout,           0);
    av_opt_set_int(avr, "out_channel_layout", output_layout,          0);
    av_opt_set_int(avr, "in_sample_rate",     s->input_samplerate,    0);
    av_opt_set_int(avr, "out_sample_rate",    s->samplerate,          0);
    av_opt_set_int(avr, "in_sample_fmt",      s->avCodecCtx->sample_fmt, 0);
    av_opt_set_int(avr, "out_sample_fmt",     AV_SAMPLE_FMT_FLT,      0);
    int err;
    if ( ( err = avresample_open(avr) ) < 0) {
      char errorstr[256];
      av_strerror (err, errorstr, sizeof(errorstr));
      AUBIO_ERR("source_avcodec: Could not open AVAudioResampleContext for %s (%s)\n",
          s->path, errorstr);
      //goto beach;
      return;
    }
    s->avr = avr;
    s->multi = multi;
  }
}
bool AudioTransform::init( const Frame& srcFrame, const Frame& dstFrame )
{
	const AudioFrame& src = static_cast<const AudioFrame&>( srcFrame );
	const AudioFrame& dst = static_cast<const AudioFrame&>( dstFrame );

	_audioConvertContext = AllocResampleContext();

	if( !_audioConvertContext )
	{
		throw std::runtime_error( "unable to create audio convert context" );
	}
	
	av_opt_set_int(  _audioConvertContext, "in_channel_layout",  av_get_default_channel_layout( src.desc().getChannels() ), 0 );
	av_opt_set_int(  _audioConvertContext, "out_channel_layout", av_get_default_channel_layout( dst.desc().getChannels() ), 0 );
	av_opt_set_int(  _audioConvertContext, "in_sample_rate",     src.desc().getSampleRate(), 0 );
	av_opt_set_int(  _audioConvertContext, "out_sample_rate",    dst.desc().getSampleRate(), 0 );
	SetSampleFormat( _audioConvertContext, "in_sample_fmt",      src.desc().getAVSampleFormat(), 0 );
	SetSampleFormat( _audioConvertContext, "out_sample_fmt",     dst.desc().getAVSampleFormat(), 0 );
	
	if( InitResampleContext( _audioConvertContext ) < 0 )
	{
		FreeResampleContext( &_audioConvertContext );
		throw std::runtime_error( "unable to open audio convert context" );
	}
	
	return true;
}
Example #3
0
SimpleAT3::SimpleAT3()
	: codec_(0),
		codecCtx_(0),
		swrCtx_(0) {
	frame_ = av_frame_alloc();

	codec_ = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P);
	if (!codec_) {
		// Eh, we shouldn't even have managed to compile. But meh.
		ERROR_LOG(ME, "This version of FFMPEG does not support AV_CODEC_ID_ATRAC3P (Atrac3+). Update your submodule.");
		return;
	}

	codecCtx_ = avcodec_alloc_context3(codec_);
	if (!codecCtx_) {
		ERROR_LOG(ME, "Failed to allocate a codec context");
		return;
	}

	codecCtx_->channels = 2;
	codecCtx_->channel_layout = AV_CH_LAYOUT_STEREO;

	AVDictionary *opts = 0;
	av_dict_set(&opts, "channels", "2", 0);
	av_dict_set(&opts, "sample_rate", "44100", 0);
	if (avcodec_open2(codecCtx_, codec_, &opts) < 0) {
		ERROR_LOG(ME, "Failed to open codec");
		return;
	}

	av_dict_free(&opts);

	// Initializing the sample rate convert. We only really use it to convert float output
	// into int.
	int wanted_channels = 2;
	int64_t wanted_channel_layout = av_get_default_channel_layout(wanted_channels);
	int64_t dec_channel_layout = av_get_default_channel_layout(2);

	swrCtx_ = swr_alloc_set_opts(
			swrCtx_,
			wanted_channel_layout,
			AV_SAMPLE_FMT_S16,
			codecCtx_->sample_rate,
			dec_channel_layout,
			codecCtx_->sample_fmt,
			codecCtx_->sample_rate,
			0,
			NULL);

	if (!swrCtx_ || swr_init(swrCtx_) < 0) {
		ERROR_LOG(ME, "swr_init: Failed to initialize the resampling context");
		avcodec_close(codecCtx_);
		codec_ = 0;
		return;
	}
}
Example #4
0
static void audio_convert(dtaudio_decoder_t *decoder, AVFrame * dst,
                          AVFrame * src)
{
    int nb_sample;
    int dst_buf_size;
    int out_channels;
    //for audio post processor
    //struct SwsContext *m_sws_ctx = NULL;
    struct SwrContext *m_swr_ctx = NULL;
    //ResampleContext *m_resample_ctx=NULL;
    enum AVSampleFormat src_fmt = avctxp->sample_fmt;
    enum AVSampleFormat dst_fmt = AV_SAMPLE_FMT_S16;

    dst->linesize[0] = src->linesize[0];
    *dst = *src;

    dst->data[0] = NULL;
    out_channels = decoder->para.dst_channels;
    nb_sample = frame->nb_samples;
    dst_buf_size = nb_sample * av_get_bytes_per_sample(dst_fmt) * out_channels;
    dst->data[0] = (uint8_t *) av_malloc(dst_buf_size);

    avcodec_fill_audio_frame(dst, out_channels, dst_fmt, dst->data[0], dst_buf_size,
                             0);
    dt_debug(TAG, "SRCFMT:%d dst_fmt:%d \n", src_fmt, dst_fmt);
    /* resample toAV_SAMPLE_FMT_S16 */
    if (src_fmt != dst_fmt || out_channels != decoder->para.channels) {
        if (!m_swr_ctx) {
            uint64_t in_channel_layout = av_get_default_channel_layout(avctxp->channels);
            uint64_t out_channel_layout = av_get_default_channel_layout(out_channels);
            m_swr_ctx = swr_alloc_set_opts(NULL, out_channel_layout, dst_fmt,
                                           avctxp->sample_rate, in_channel_layout, src_fmt, avctxp->sample_rate, 0, NULL);
            swr_init(m_swr_ctx);
        }
        uint8_t **out = (uint8_t **) & dst->data;
        const uint8_t **in = (const uint8_t **) src->extended_data;
        if (m_swr_ctx) {
            int ret, out_count;
            out_count = nb_sample;
            ret = swr_convert(m_swr_ctx, out, out_count, in, nb_sample);
            if (ret < 0) {
                //set audio mute
                memset(dst->data[0], 0, dst_buf_size);
                printf("audio convert failed, set mute data\n");
            }
        }
    } else {                    // no need to convert ,just copy
        memcpy(dst->data[0], src->data[0], src->linesize[0]);
    }
    //free context
    if (m_swr_ctx != NULL) {
        swr_free(&m_swr_ctx);
    }
    //if(m_resample_ctx!=NULL)
    //    audio_resample_close(m_resample_ctx);
}
    /**
     * Initialize the audio resampler based on the input and output codec settings.
     * If the input and output sample formats differ, a conversion is required
     * libswresample takes care of this, but requires initialization.
     */
    int AudioDecoder::init_resampler(AVCodecContext *input_codec_context,
            AVCodecContext *output_codec_context)
    {
        int error;

        /**
         * Create a resampler context for the conversion.
         * Set the conversion parameters.
         * Default channel layouts based on the number of channels
         * are assumed for simplicity (they are sometimes not detected
         * properly by the demuxer and/or decoder).
         */
        resample_context = swr_alloc_set_opts(NULL,
                av_get_default_channel_layout(output_codec_context->channels),
                output_codec_context->sample_fmt,
                output_codec_context->sample_rate,
                av_get_default_channel_layout(input_codec_context->channels),
                input_codec_context->sample_fmt,
                input_codec_context->sample_rate,
                0, NULL);

        if (!resample_context) {
            ELOG_WARN( "Could not allocate resample context\n");
            return AVERROR(ENOMEM);
        }


        /**
         * Perform a sanity check so that the number of converted samples is
         * not greater than the number of samples to be converted.
         * If the sample rates differ, this case has to be handled differently
         */

        ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate);

        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_WARN( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }


        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_DEBUG( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }

        ELOG_DEBUG( "swr_init done");

        return 0;
    }
Example #6
0
    kxMovieError openAudioStream(size_t audioStream)
    {
        AVCodecContext *codecCtx = _formatCtx->streams[audioStream]->codec;
        SwrContext *swrContext = NULL;
        
        AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);
        if(!codec)
            return kxMovieErrorCodecNotFound;
        
        if (avcodec_open2(codecCtx, codec, NULL) < 0)
            return kxMovieErrorOpenCodec;
        

        
        if (!audioCodecIsSupported(codecCtx))
        {
            swrContext = swr_alloc_set_opts(NULL,
                                            av_get_default_channel_layout(codecCtx->channels),
                                            AV_SAMPLE_FMT_S16,
                                            codecCtx->sample_rate,
                                            av_get_default_channel_layout(codecCtx->channels),
                                            codecCtx->sample_fmt,
                                            codecCtx->sample_rate,
                                            0,
                                            NULL);
            
            if (!swrContext ||
                swr_init(swrContext)) {
                
                if (swrContext)
                    swr_free(&swrContext);
                avcodec_close(codecCtx);
                return kxMovieErroReSampler;
            }
        }
        
        _audioFrame = av_frame_alloc();
        
        if (!_audioFrame) {
            if (swrContext)
                swr_free(&swrContext);
            avcodec_close(codecCtx);
            return kxMovieErrorAllocateFrame;
        }
        
        _audioStream = audioStream;
        _audioCodecCtx = codecCtx;
        _swrContext = swrContext;
        return kxMovieErrorNone; 
    }
Example #7
0
void COMXAudioCodecOMX::BuildChannelMap()
{
  uint64_t layout;
  int bits = count_bits(m_pCodecContext->channel_layout);
  if (bits == m_pCodecContext->channels)
    layout = m_pCodecContext->channel_layout;
  else
  {
    CLog::Log(LOGINFO, "COMXAudioCodecOMX::GetChannelMap - FFmpeg reported %d channels, but the layout contains %d ignoring", m_pCodecContext->channels, bits);
    layout = av_get_default_channel_layout(m_pCodecContext->channels);
  }

  m_channelLayout.Reset();

  if (layout & AV_CH_FRONT_LEFT           ) m_channelLayout += AE_CH_FL  ;
  if (layout & AV_CH_FRONT_RIGHT          ) m_channelLayout += AE_CH_FR  ;
  if (layout & AV_CH_FRONT_CENTER         ) m_channelLayout += AE_CH_FC  ;
  if (layout & AV_CH_LOW_FREQUENCY        ) m_channelLayout += AE_CH_LFE ;
  if (layout & AV_CH_BACK_LEFT            ) m_channelLayout += AE_CH_BL  ;
  if (layout & AV_CH_BACK_RIGHT           ) m_channelLayout += AE_CH_BR  ;
  if (layout & AV_CH_FRONT_LEFT_OF_CENTER ) m_channelLayout += AE_CH_FLOC;
  if (layout & AV_CH_FRONT_RIGHT_OF_CENTER) m_channelLayout += AE_CH_FROC;
  if (layout & AV_CH_BACK_CENTER          ) m_channelLayout += AE_CH_BC  ;
  if (layout & AV_CH_SIDE_LEFT            ) m_channelLayout += AE_CH_SL  ;
  if (layout & AV_CH_SIDE_RIGHT           ) m_channelLayout += AE_CH_SR  ;
  if (layout & AV_CH_TOP_CENTER           ) m_channelLayout += AE_CH_TC  ;
  if (layout & AV_CH_TOP_FRONT_LEFT       ) m_channelLayout += AE_CH_TFL ;
  if (layout & AV_CH_TOP_FRONT_CENTER     ) m_channelLayout += AE_CH_TFC ;
  if (layout & AV_CH_TOP_FRONT_RIGHT      ) m_channelLayout += AE_CH_TFR ;
  if (layout & AV_CH_TOP_BACK_LEFT        ) m_channelLayout += AE_CH_BL  ;
  if (layout & AV_CH_TOP_BACK_CENTER      ) m_channelLayout += AE_CH_BC  ;
  if (layout & AV_CH_TOP_BACK_RIGHT       ) m_channelLayout += AE_CH_BR  ;
}
Example #8
0
void Parser::InitResampler()
{
    swr_ctx = swr_alloc_set_opts(NULL,
                                 av_get_default_channel_layout(cdc_ctx_out->channels),
                                 cdc_ctx_out->sample_fmt,
                                 cdc_ctx_out->sample_rate,
                                 channelLayoutIn,
                                 (AVSampleFormat)sampleFormatIn,
                                 sampleRateIn,
                                 0,0);

    if(!swr_ctx)
        throw ContextCreatorException() << errno_code(MIR_ERR_ALLOC_SWR_CONTEXT);

    // set options
    av_opt_set_int(swr_ctx, "in_channel_layout",    channelLayoutIn, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate",       sampleRateIn, 0);
    av_opt_set_int(swr_ctx, "in_bit_rate",          bitRateIn, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", (AVSampleFormat)sampleFormatIn, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout",    cdc_ctx_out->channel_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate",       cdc_ctx_out->sample_rate, 0);
    av_opt_set_int(swr_ctx, "out_bit_rate",          cdc_ctx_out->bit_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", cdc_ctx_out->sample_fmt, 0);

    if (swr_init(swr_ctx) < 0)
        throw ContextCreatorException() << errno_code(MIR_ERR_INIT_SWR_CONTEXT);
}
Example #9
0
void Parser::setStream()
{
    int error = 0;

    AVCodec *codec = NULL;
    AVCodecID codecID = getCodecID();

    codec = avcodec_find_encoder(codecID);
    fmt_ctx_out->audio_codec = codec;
    stm_out = avformat_new_stream(fmt_ctx_out, codec);

    if (!stm_out)
        throw StreamException() << errno_code(MIR_ERR_OPEN_STREAM_1);

    cdc_ctx_out = stm_out->codec;
    cdc_ctx_out->codec = codec;
    cdc_ctx_out->codec_id = codecID;
    cdc_ctx_out->codec_type = AVMEDIA_TYPE_AUDIO;

    cdc_ctx_out->sample_fmt = getSampleFormat(codecID);

    if (!isVBR)
        cdc_ctx_out->bit_rate = bitRate;
    else
    {
        cdc_ctx_out->rc_max_rate = 0;
        cdc_ctx_out->rc_min_rate = 0;
        cdc_ctx_out->bit_rate_tolerance = bitRate;
        cdc_ctx_out->bit_rate = bitRate;
    }

    cdc_ctx_out->sample_rate = sampleRate;
    cdc_ctx_out->channels = nbChannel;
    cdc_ctx_out->channel_layout = av_get_default_channel_layout(nbChannel);

    error = avcodec_open2(cdc_ctx_out, codec, NULL);

    if (error < 0)
        throw StreamException() << errno_code(MIR_ERR_OPEN_STREAM_2);

    if (codecID == AV_CODEC_ID_PCM_S16LE || codecID == AV_CODEC_ID_MP3)
        cdc_ctx_out->frame_size = av_rescale_rnd(nbSamplesIn,
                                                 cdc_ctx_out->sample_rate, sampleRateIn, AV_ROUND_UP);
    else if (codecID == AV_CODEC_ID_AAC)
    {
        cdc_ctx_out->profile = FF_PROFILE_AAC_LOW;
        cdc_ctx_out->frame_size = 1024;

        // some formats want stream headers to be separate
        if(fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
            cdc_ctx_out->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    cdc_out = codec;

    if (audioFormat == AUDIOFORMAT::arq)
    {
        fmt_ctx_out->oformat->flags |= AVFMT_ALLOW_FLUSH;
    }
}
Example #10
0
EC_U32 AudioWaveScale::Init(MediaCtxInfo* pMediaInfo, AudioPCMBuffer *pFirstFrame)
{
    if (EC_NULL == pMediaInfo)
        return Audio_Render_Err_InitFail;

    EC_S32 out_sample_rate = pMediaInfo->m_nSampleRate;
    EC_S64 out_channel_layout = AV_CH_LAYOUT_STEREO;
    EC_S32 out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
    AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

    AVCodecContext *pCodecCtx = (AVCodecContext*)(pMediaInfo->m_pAudioCodecInfo);
    EC_S64 in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels);
    EC_S32 in_sample_rate = pCodecCtx->sample_rate;
    AVSampleFormat in_sample_fmt = pCodecCtx->sample_fmt;

    m_nOutChannels = out_channels;
    m_nOutSampleFormat = out_sample_fmt;

    m_pWaveScaleContext = swr_alloc();
    m_pWaveScaleContext = swr_alloc_set_opts(m_pWaveScaleContext, 
                                             out_channel_layout,
                                             out_sample_fmt,
                                             out_sample_rate,
                                             in_channel_layout, 
                                             in_sample_fmt, 
                                             in_sample_rate, 0, NULL);
    EC_S32 nRet = swr_init(m_pWaveScaleContext);
    if (nRet < 0) return Audio_Render_Err_InitFail;

    m_pScaleOutbuffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
    if (m_pScaleOutbuffer == EC_NULL) return EC_Err_Memory_Low;

    return Audio_Render_Err_None;
}
Example #11
0
void AVFile::open(const char *url)
{
    if (formatCtx)
        throw AVException("Programming error: i already did it");

    if (avformat_open_input(&formatCtx, url, 0, 0) < 0)
        throw AVException("Unable to open media");

    if (avformat_find_stream_info(formatCtx, 0) < 0)
        throw AVException("Unable to find streams in media");

    AVCodec *codec;
    audioStream = av_find_best_stream(formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
    if (audioStream < 0)
        throw AVException("No audio stream found");    

    codecCtx = formatCtx->streams[audioStream]->codec;
    if (avcodec_open2(codecCtx, codec, 0) < 0)
        throw AVException("Could not open codec");

    if (!codecCtx->channel_layout)
        codecCtx->channel_layout = av_get_default_channel_layout(codecCtx->channels);

    _updateSWR();
}
Example #12
0
/*
* ZSL
* 虽然名字叫 decode,其实并不解码,只是从 is->sampq 里拿出 af(audioframe,Frame类型)
* 把 af->frame(AVFrame类型)里的 data 经过 swr_convert() 之后,存入 is->audio_buf
* 返回存入的大小(即 resample 之后的大小)
*
*/
int audio_decode_frame(VideoState *is) {
	int resampled_data_size,out_size;
	Frame *af;
	af = frame_queue_peek_readable(&is->sampq);
	frame_queue_next(&is->sampq);
	if (!is->swr_ctx) {
		is->swr_ctx = swr_alloc_set_opts(NULL,
			AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate,
			av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate,
			0, NULL);
		swr_init(is->swr_ctx);
	}
	const uint8_t **in = (const uint8_t **)af->frame->extended_data;
	uint8_t **out = &is->audio_buf;
	//out_size = av_samples_get_buffer_size(NULL, 2, af->frame->nb_samples, AV_SAMPLE_FMT_S16, 1);
	out_size = 2 * 1152 * 2;
	if (out_size < 0) { /*比如 af->frame->nb_samples==0 的时候,这必须要处理一下,不然一会儿 av_fast_malloc() 就出问题了 */
		av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
		return -1;
	}
	int len2;
	av_fast_malloc(&is->audio_buf, &is->audio_buf_size, out_size);
	len2 = swr_convert(is->swr_ctx, out, af->frame->nb_samples, in, af->frame->nb_samples);
	resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
	return resampled_data_size;
}
void CEncoder::audio_check_channel_layout(AVCodec *codec)
{
	if (codec->channel_layouts == NULL)
	{
		audio_channel_layout = av_get_default_channel_layout(audio_channels);
		return ;
	}
	
	for(int i = 0; codec->channel_layouts[i]; i++)
	{
		if (av_get_channel_layout_nb_channels(codec->channel_layouts[i]) == audio_channels)
		{
			audio_channel_layout = codec->channel_layouts[i];
			return ;
		}
	}

	for(int i = 0; codec->channel_layouts[i]; i++)
	{
		if (av_get_channel_layout_nb_channels(codec->channel_layouts[i]) > audio_channels)
		{
			audio_channels = av_get_channel_layout_nb_channels(codec->channel_layouts[i]);
			audio_channel_layout = codec->channel_layouts[i];
			return ;
		}
	}

	audio_channel_layout = codec->channel_layouts[0];
	audio_channels = av_get_channel_layout_nb_channels(audio_channel_layout);
}
static int64_t get_channel_layout_single(const char *name, int name_len)
{
    int i;
    char *end;
    int64_t layout;

    for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map) - 1; i++) {
        if (strlen(channel_layout_map[i].name) == name_len &&
            !memcmp(channel_layout_map[i].name, name, name_len))
            return channel_layout_map[i].layout;
    }
    for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++)
        if (channel_names[i] &&
            strlen(channel_names[i]) == name_len &&
            !memcmp(channel_names[i], name, name_len))
            return (int64_t)1 << i;
    i = strtol(name, &end, 10);
    if (end - name == name_len ||
        (end + 1 - name == name_len && *end  == 'c'))
        return av_get_default_channel_layout(i);
    layout = strtoll(name, &end, 0);
    if (end - name == name_len)
        return FFMAX(layout, 0);
    return 0;
}
Example #15
0
static int ffat_update_ctx(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    AudioStreamBasicDescription format;
    UInt32 size = sizeof(format);
    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterCurrentInputStreamDescription,
                                   &size, &format)) {
        if (format.mSampleRate)
            avctx->sample_rate = format.mSampleRate;
        avctx->channels = format.mChannelsPerFrame;
        avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
        avctx->frame_size = format.mFramesPerPacket;
    }

    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterCurrentOutputStreamDescription,
                                   &size, &format)) {
        format.mSampleRate = avctx->sample_rate;
        format.mChannelsPerFrame = avctx->channels;
        AudioConverterSetProperty(at->converter,
                                  kAudioConverterCurrentOutputStreamDescription,
                                  size, &format);
    }

    if (!AudioConverterGetPropertyInfo(at->converter, kAudioConverterOutputChannelLayout,
                                       &size, NULL) && size) {
        AudioChannelLayout *layout = av_malloc(size);
        uint64_t layout_mask = 0;
        int i;
        if (!layout)
            return AVERROR(ENOMEM);
        AudioConverterGetProperty(at->converter, kAudioConverterOutputChannelLayout,
                                  &size, layout);
        if (!(layout = ffat_convert_layout(layout, &size)))
            return AVERROR(ENOMEM);
        for (i = 0; i < layout->mNumberChannelDescriptions; i++) {
            int id = ffat_get_channel_id(layout->mChannelDescriptions[i].mChannelLabel);
            if (id < 0)
                goto done;
            if (layout_mask & (1 << id))
                goto done;
            layout_mask |= 1 << id;
            layout->mChannelDescriptions[i].mChannelFlags = i; // Abusing flags as index
        }
        avctx->channel_layout = layout_mask;
        qsort(layout->mChannelDescriptions, layout->mNumberChannelDescriptions,
              sizeof(AudioChannelDescription), &ffat_compare_channel_descriptions);
        for (i = 0; i < layout->mNumberChannelDescriptions; i++)
            at->channel_map[i] = layout->mChannelDescriptions[i].mChannelFlags;
done:
        av_free(layout);
    }

    if (!avctx->frame_size)
        avctx->frame_size = 2048;

    return 0;
}
Example #16
0
/**
 * Initialize the audio resampler based on the input and output codec settings.
 * If the input and output sample formats differ, a conversion is required
 * libavresample takes care of this, but requires initialization.
 */
static int init_resampler(AVCodecContext *input_codec_context,
                          AVCodecContext *output_codec_context,
                          AVAudioResampleContext **resample_context)
{
    /**
     * Only initialize the resampler if it is necessary, i.e.,
     * if and only if the sample formats differ.
     */
    if (input_codec_context->sample_fmt != output_codec_context->sample_fmt ||
        input_codec_context->channels != output_codec_context->channels) {
        int error;

        /** Create a resampler context for the conversion. */
        if (!(*resample_context = avresample_alloc_context())) {
            fprintf(stderr, "Could not allocate resample context\n");
            return AVERROR(ENOMEM);
        }

        /**
         * Set the conversion parameters.
         * Default channel layouts based on the number of channels
         * are assumed for simplicity (they are sometimes not detected
         * properly by the demuxer and/or decoder).
         */
        av_opt_set_int(*resample_context, "in_channel_layout",
                       av_get_default_channel_layout(input_codec_context->channels), 0);
        av_opt_set_int(*resample_context, "out_channel_layout",
                       av_get_default_channel_layout(output_codec_context->channels), 0);
        av_opt_set_int(*resample_context, "in_sample_rate",
                       input_codec_context->sample_rate, 0);
        av_opt_set_int(*resample_context, "out_sample_rate",
                       output_codec_context->sample_rate, 0);
        av_opt_set_int(*resample_context, "in_sample_fmt",
                       input_codec_context->sample_fmt, 0);
        av_opt_set_int(*resample_context, "out_sample_fmt",
                       output_codec_context->sample_fmt, 0);

        /** Open the resampler with the specified parameters. */
        if ((error = avresample_open(*resample_context)) < 0) {
            fprintf(stderr, "Could not open resample context\n");
            avresample_free(resample_context);
            return error;
        }
    }
    return 0;
}
Example #17
0
static bool default_acodec_setup(struct codec_ent* dst, unsigned channels,
	unsigned samplerate, unsigned abr)
{
	AVCodecContext* ctx = dst->storage.audio.context;
	AVCodec* codec = dst->storage.audio.codec;

	assert(channels == 2);
	assert(samplerate > 0 && samplerate <= 48000);
	assert(codec);

	ctx->channels       = channels;
	ctx->channel_layout = av_get_default_channel_layout(channels);

	ctx->sample_rate    = samplerate;
	ctx->time_base      = av_d2q(1.0 / (double) samplerate, 1000000);
	ctx->sample_fmt     = codec->sample_fmts[0];

/* kept for documentation, now we assume the swr_convert just
 * handles all sample formats for us,
 * if we prefer or require a certain on in the future, the following code
 * shows how:
	for (int i = 0; codec->sample_fmts[i] != -1 && ctx->sample_fmt ==
	AV_SAMPLE_FMT_NONE; i++){
		if (codec->sample_fmts[i] == AV_SAMPLE_FMT_S16 ||
		codec->sample_fmts[i] == AV_SAMPLE_FMT_FLTP)
			ctx->sample_fmt = codec->sample_fmts[i];
	}

	if (ctx->sample_fmt == AV_SAMPLE_FMT_NONE){
		LOG("(encoder) Couldn't find supported matching sample format for
		codec, giving up.\n");
		return false;
	}
*/

/* rough quality estimate */
	if (abr <= 10)
		ctx->bit_rate = 1024 * ( 320 - 240 * ((float)(11.0 - abr) / 11.0) );
	else
		ctx->bit_rate = abr;

/* AAC may need this */
	ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

	LOG("(encode) audio encoder setup @ %d hz, requested quality: %d, "
		"got %d kbit/s using %s\n", samplerate, abr,
		ctx->bit_rate / 1000, codec->name);

	if (avcodec_open2(dst->storage.audio.context,
		dst->storage.audio.codec, NULL) != 0){
		avcodec_close(dst->storage.audio.context);
		dst->storage.audio.context = NULL;
		dst->storage.audio.codec   = NULL;
		return false;
	}

	return true;
}
Example #18
0
static int encode_init(AVCodecContext *avctx)
{
    DCAContext *c = avctx->priv_data;
    int i;
    uint64_t layout = avctx->channel_layout;

    c->prim_channels = avctx->channels;
    c->lfe_channel   = (avctx->channels == 3 || avctx->channels == 6);

    if (!layout) {
        av_log(avctx, AV_LOG_WARNING, "No channel layout specified. The "
                                      "encoder will guess the layout, but it "
                                      "might be incorrect.\n");
        layout = av_get_default_channel_layout(avctx->channels);
    }
    switch (layout) {
    case AV_CH_LAYOUT_STEREO:       c->a_mode = 2; c->num_channel = 2; break;
    case AV_CH_LAYOUT_5POINT0:      c->a_mode = 9; c->num_channel = 9; break;
    case AV_CH_LAYOUT_5POINT1:      c->a_mode = 9; c->num_channel = 9; break;
    case AV_CH_LAYOUT_5POINT0_BACK: c->a_mode = 9; c->num_channel = 9; break;
    case AV_CH_LAYOUT_5POINT1_BACK: c->a_mode = 9; c->num_channel = 9; break;
    default:
    av_log(avctx, AV_LOG_ERROR,
           "Only stereo, 5.0, 5.1 channel layouts supported at the moment!\n");
    return AVERROR_PATCHWELCOME;
    }

    if (c->lfe_channel) {
        init_lfe_fir();
        c->prim_channels--;
        c->channel_order_tab = dca_channel_reorder_lfe[c->a_mode];
        c->lfe_state         = LFE_PRESENT;
        c->lfe_offset        = dca_lfe_index[c->a_mode];
    } else {
        c->channel_order_tab = dca_channel_reorder_nolfe[c->a_mode];
        c->lfe_state         = LFE_MISSING;
    }

    for (i = 0; i < 16; i++) {
        if (avpriv_dca_sample_rates[i] && (avpriv_dca_sample_rates[i] == avctx->sample_rate))
            break;
    }
    if (i == 16) {
        av_log(avctx, AV_LOG_ERROR, "Sample rate %iHz not supported, only ", avctx->sample_rate);
        for (i = 0; i < 16; i++)
            av_log(avctx, AV_LOG_ERROR, "%d, ", avpriv_dca_sample_rates[i]);
        av_log(avctx, AV_LOG_ERROR, "supported.\n");
        return -1;
    }
    c->sample_rate_code = i;

    avctx->frame_size = 32 * PCM_SAMPLES;

    if (!cos_table[127])
        qmf_init();
    return 0;
}
Example #19
0
void AudioResampleImpl::initResampler()
{
    m_resampler.reset(swr_alloc(), [](SwrContext * context){
        swr_free(&context);
    });

	assert(m_from.isValid());
	assert(m_to.isValid());

    av_opt_set_int(m_resampler.get(), "in_channel_layout", av_get_default_channel_layout(m_from.channelCount()), 0); /// @todo Дополнить раскладкой AudioFormat
    av_opt_set_int(m_resampler.get(), "out_channel_layout", av_get_default_channel_layout(m_to.channelCount()), 0);
	av_opt_set_int(m_resampler.get(), "in_sample_rate", m_from.sampleRate(), 0);
    av_opt_set_int(m_resampler.get(), "out_sample_rate", m_to.sampleRate(), 0);
	av_opt_set_sample_fmt(m_resampler.get(), "in_sample_fmt", m_from.sampleFormat(), 0);
	av_opt_set_sample_fmt(m_resampler.get(), "out_sample_fmt", m_to.sampleFormat(), 0); /// Non planar
    int res = swr_init(m_resampler.get());
    if (res != 0) throw FFException(res);
}
Example #20
0
void CBDDemuxer::ProcessClipInfo(CLPI_CL *clpi)
{
  if (!clpi) { return; }
  for (int k = 0; k < clpi->program.num_prog; k++) {
    for (int i = 0; i < clpi->program.progs[k].num_streams; i++) {
      CLPI_PROG_STREAM *stream = &clpi->program.progs[k].streams[i];
      AVStream *avstream = m_lavfDemuxer->GetAVStreamByPID(stream->pid);
      if (!avstream) {
        DbgLog((LOG_TRACE, 10, "CBDDemuxer::ProcessStreams(): Stream with PID 0x%04x not found, trying to add it..", stream->pid));
        m_lavfDemuxer->AddMPEGTSStream(stream->pid, stream->coding_type);
        avstream = m_lavfDemuxer->GetAVStreamByPID(stream->pid);
      }
      if (avstream) {
        if (stream->lang[0] != 0)
          av_dict_set(&avstream->metadata, "language", (const char *)stream->lang, 0);
        if (avstream->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
          if (avstream->codec->width == 0 || avstream->codec->height == 0) {
            switch(stream->format) {
            case BLURAY_VIDEO_FORMAT_480I:
            case BLURAY_VIDEO_FORMAT_480P:
              avstream->codec->width = 720;
              avstream->codec->height = 480;
              break;
            case BLURAY_VIDEO_FORMAT_576I:
            case BLURAY_VIDEO_FORMAT_576P:
              avstream->codec->width = 720;
              avstream->codec->height = 576;
              break;
            case BLURAY_VIDEO_FORMAT_720P:
              avstream->codec->width = 1280;
              avstream->codec->height = 720;
              break;
            case BLURAY_VIDEO_FORMAT_1080I:
            case BLURAY_VIDEO_FORMAT_1080P:
            default:
              avstream->codec->width = 1920;
              avstream->codec->height = 1080;
              break;
            }
          }
        } else if (avstream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
          if (avstream->codec->channels == 0) {
            avstream->codec->channels = (stream->format == BLURAY_AUDIO_FORMAT_MONO) ? 1 : (stream->format == BLURAY_AUDIO_FORMAT_STEREO) ? 2 : 6;
            avstream->codec->channel_layout = av_get_default_channel_layout(avstream->codec->channels);
            avstream->codec->sample_rate = (stream->rate == BLURAY_AUDIO_RATE_96||stream->rate == BLURAY_AUDIO_RATE_96_COMBO) ? 96000 : (stream->rate == BLURAY_AUDIO_RATE_192||stream->rate == BLURAY_AUDIO_RATE_192_COMBO) ? 192000 : 48000;
            if (avstream->codec->codec_id == AV_CODEC_ID_DTS) {
              if (stream->coding_type == BLURAY_STREAM_TYPE_AUDIO_DTSHD)
                avstream->codec->profile = FF_PROFILE_DTS_HD_HRA;
              else if (stream->coding_type == BLURAY_STREAM_TYPE_AUDIO_DTSHD_MASTER)
                avstream->codec->profile = FF_PROFILE_DTS_HD_MA;
            }
          }
        }
      }
    }
  }
}
Example #21
0
void XAudioStream::setSpeed(float speed)
{
	if(m_speed == speed || speed <= 0.0f) return;
	m_speed = speed;
	swr_free(&m_pSwrContext);
	m_pSwrContext = swr_alloc();
	if(m_pSwrContext == NULL) return;
	if(m_pAudioCodecCtx->channel_layout == 0)
	{
		swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed,
			av_get_default_channel_layout(m_pAudioCodecCtx->channels),m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL);
	}else
	{
		swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed,
			m_pAudioCodecCtx->channel_layout,m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL);
	}
	if(swr_init(m_pSwrContext) < 0)
	{
		LogStr("swr_init() fail");
		return;
	}
}
Example #22
0
uint64_t COMXAudioCodecOMX::GetChannelMap()
{
  uint64_t layout;
  int bits = count_bits(m_pCodecContext->channel_layout);
  if (bits == m_pCodecContext->channels)
    layout = m_pCodecContext->channel_layout;
  else
  {
    CLog::Log(LOGINFO, "COMXAudioCodecOMX::GetChannelMap - FFmpeg reported %d channels, but the layout contains %d ignoring", m_pCodecContext->channels, bits);
    layout = av_get_default_channel_layout(m_pCodecContext->channels);
  }
  return layout;
}
Example #23
0
int AudioResampler::InitAudioResampling(IN const std::shared_ptr<MediaFrame>& _pFrame)
{
    // for fdkaac encoder, input samples should be PCM signed16le, otherwise do resampling
    if (_pFrame->AvFrame()->format != sampleFormat) {
#ifdef LOG_TRADITIONAL
        loginfo("input sample_format=%d, need sample_format=%d, initiate resampling",
#else
        loginfo("input sample_format={} need sample_format={}, initiate resampling",
#endif
            _pFrame->AvFrame()->format, sampleFormat);
        pSwr_ = swr_alloc();
        av_opt_set_int(pSwr_, "in_channel_layout", av_get_default_channel_layout(_pFrame->AvFrame()->channels), 0);
        av_opt_set_int(pSwr_, "out_channel_layout", av_get_default_channel_layout(nChannel), 0);
        av_opt_set_int(pSwr_, "in_sample_rate", _pFrame->AvFrame()->sample_rate, 0);
        av_opt_set_int(pSwr_, "out_sample_rate", nSampleRate, 0);
        av_opt_set_sample_fmt(pSwr_, "in_sample_fmt", static_cast<AVSampleFormat>(_pFrame->AvFrame()->format), 0);
        av_opt_set_sample_fmt(pSwr_, "out_sample_fmt", sampleFormat, 0);
        if (swr_init(pSwr_) != 0) {
            logerror("could not initiate resampling");
            return -1;
        }
    }
Example #24
0
void AVFile::_updateSWR()
{
    if (swrCtx) {
        swr_free(&swrCtx);
    }

    if (!_channels || !_sample_rate || !codecCtx)
        return;

    if (codecCtx->channel_layout != (uint64_t)av_get_default_channel_layout(_channels) ||
        codecCtx->sample_fmt != AV_SAMPLE_FMT_FLT ||
        codecCtx->sample_rate != (int)_sample_rate)
    {
        swrCtx = swr_alloc_set_opts(0, av_get_default_channel_layout(_channels), AV_SAMPLE_FMT_FLT, _sample_rate,
                                    codecCtx->channel_layout, codecCtx->sample_fmt, codecCtx->sample_rate,
                                    0, 0);

        if (!swrCtx)
            throw AVException("Unable to allocate swresample context");

        swr_init(swrCtx);
    }
}
Example #25
0
void FillAP(FFMS_AudioProperties &AP, AVCodecContext *CTX, FFMS_Track &Frames) {
	AP.SampleFormat = static_cast<FFMS_SampleFormat>(av_get_packed_sample_fmt(CTX->sample_fmt));
	AP.BitsPerSample = av_get_bytes_per_sample(CTX->sample_fmt) * 8;
	AP.Channels = CTX->channels;
	AP.ChannelLayout = CTX->channel_layout;
	AP.SampleRate = CTX->sample_rate;
	if (!Frames.empty()) {
		AP.NumSamples = (Frames.back()).SampleStart + (Frames.back()).SampleCount;
		AP.FirstTime = ((Frames.front().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
		AP.LastTime = ((Frames.back().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
	}

	if (AP.ChannelLayout == 0)
		AP.ChannelLayout = av_get_default_channel_layout(AP.Channels);
}
Example #26
0
WAVEFORMATEXTENSIBLE *CLAVFAudioHelper::CreateWFMTEX_RAW_PCM(const AVStream *avstream, ULONG *size, const GUID subtype, ULONG *samplesize)
{
    WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)CoTaskMemAlloc(sizeof(WAVEFORMATEXTENSIBLE));
    if (!wfex)
        return nullptr;
    memset(wfex, 0, sizeof(*wfex));

    WAVEFORMATEX *wfe = &wfex->Format;
    wfe->wFormatTag = (WORD)subtype.Data1;
    wfe->nChannels = avstream->codec->channels;
    wfe->nSamplesPerSec = avstream->codec->sample_rate;
    if (avstream->codec->sample_fmt == AV_SAMPLE_FMT_S32 && avstream->codec->bits_per_raw_sample > 0) {
        wfe->wBitsPerSample = avstream->codec->bits_per_raw_sample > 24 ? 32 : (avstream->codec->bits_per_raw_sample > 16 ? 24 : 16);
    } else {
        wfe->wBitsPerSample = av_get_bytes_per_sample(avstream->codec->sample_fmt) << 3;
    }
    wfe->nBlockAlign = wfe->nChannels * wfe->wBitsPerSample / 8;
    wfe->nAvgBytesPerSec = wfe->nSamplesPerSec * wfe->nBlockAlign;

    DWORD dwChannelMask = 0;
    if((wfe->wBitsPerSample > 16 || wfe->nSamplesPerSec > 48000) && wfe->nChannels <= 2) {
        dwChannelMask = wfe->nChannels == 2 ? (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT) : SPEAKER_FRONT_CENTER;
    } else if (wfe->nChannels > 2) {
        dwChannelMask = (DWORD)avstream->codec->channel_layout;
        if (!dwChannelMask) {
            dwChannelMask = (DWORD)av_get_default_channel_layout(wfe->nChannels);
        }
    }

    if(dwChannelMask) {
        wfex->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
        wfex->Format.cbSize = sizeof(*wfex) - sizeof(wfex->Format);
        wfex->dwChannelMask = dwChannelMask;
        if (avstream->codec->sample_fmt == AV_SAMPLE_FMT_S32 && avstream->codec->bits_per_raw_sample > 0) {
            wfex->Samples.wValidBitsPerSample = avstream->codec->bits_per_raw_sample;
        } else {
            wfex->Samples.wValidBitsPerSample = wfex->Format.wBitsPerSample;
        }
        wfex->SubFormat = subtype;
        *size = sizeof(WAVEFORMATEXTENSIBLE);
    } else {
        *size = sizeof(WAVEFORMATEX);
    }

    *samplesize = wfe->wBitsPerSample * wfe->nChannels / 8;

    return wfex;
}
Example #27
0
uint64_t COMXAudioCodecOMX::GetChannelMap()
{
  uint64_t layout;
  int bits = count_bits(m_pCodecContext->channel_layout);
  if (bits == m_pCodecContext->channels)
    layout = m_pCodecContext->channel_layout;
  else
  {
    LOG_TRACE_2 << "COMXAudioCodecOMX::GetChannelMap - FFmpeg reported" << m_pCodecContext->channels <<
        " channels, but the layout contains " << bits << "ignoring";
    layout = av_get_default_channel_layout(m_pCodecContext->channels);
  }


  return layout;
}
Example #28
0
AudioBufferPtr AudioDecoderThread::resampleAudio(char* pDecodedData, int framesDecoded,
        int currentSampleFormat)
{
    if (!m_pResampleContext) {
#ifdef LIBAVRESAMPLE_VERSION
        m_pResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pResampleContext, "in_channel_layout",
                av_get_default_channel_layout(m_pStream->codec->channels), 0);
        av_opt_set_int(m_pResampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_rate", m_InputSampleRate, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_rate", m_AP.m_SampleRate, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_fmt",
                (AVSampleFormat)currentSampleFormat, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        int err = avresample_open(m_pResampleContext);
        AVG_ASSERT(err >= 0);
#else
        m_pResampleContext = av_audio_resample_init(m_AP.m_Channels, 
                m_pStream->codec->channels, m_AP.m_SampleRate, m_InputSampleRate,
                AV_SAMPLE_FMT_S16, (AVSampleFormat)currentSampleFormat, 16, 10, 0, 0.8);
#endif
        AVG_ASSERT(m_pResampleContext);
    }
#ifdef LIBAVRESAMPLE_VERSION
    uint8_t *pResampledData;
    int leftoverSamples = avresample_available(m_pResampleContext);
    int framesAvailable = leftoverSamples +
            av_rescale_rnd(avresample_get_delay(m_pResampleContext) +
                    framesDecoded, m_AP.m_SampleRate, m_InputSampleRate, AV_ROUND_UP);
    av_samples_alloc(&pResampledData, 0, 2, framesAvailable,
            AV_SAMPLE_FMT_S16, 0);
    int framesResampled = avresample_convert(m_pResampleContext, &pResampledData, 0, 
            framesAvailable, (uint8_t**)&pDecodedData, 0, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
    av_freep(&pResampledData);
#else
    short pResampledData[AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
    int framesResampled = audio_resample(m_pResampleContext, pResampledData,
            (short*)pDecodedData, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
#endif
    return pBuffer;
}
Example #29
0
void WaveFormatInit(PWAVEFORMATEXTENSIBLE waveformatEx, PLAYERDECODE* decode)
{
	decode->fmt = AV_SAMPLE_FMT_S16;
	int bits = av_get_bytes_per_sample(decode->fmt);

	PWAVEFORMATEX waveformat = &waveformatEx->Format;	
	waveformat->wFormatTag		= WAVE_FORMAT_PCM;
	waveformat->wBitsPerSample	= bits * 8;	
	waveformat->nChannels		= decode->context->channels;	
	waveformat->nBlockAlign		= waveformat->wBitsPerSample * waveformat->nChannels / 8;
	waveformat->nSamplesPerSec	= decode->context->sample_rate;	
	waveformat->nAvgBytesPerSec	= waveformat->nSamplesPerSec * waveformat->nBlockAlign;		
	
	decode->channels = waveformat->nChannels;
	decode->layout = av_get_default_channel_layout(decode->channels);
	decode->bits = bits;
}
Example #30
0
std::shared_ptr<AVFrame> make_av_audio_frame(const core::const_frame& frame, const core::video_format_desc& format_desc)
{
    auto av_frame = alloc_frame();

    const auto& buffer = frame.audio_data();

    // TODO (fix) Use sample_format_desc.
    av_frame->channels       = format_desc.audio_channels;
    av_frame->channel_layout = av_get_default_channel_layout(av_frame->channels);
    av_frame->sample_rate    = format_desc.audio_sample_rate;
    av_frame->format         = AV_SAMPLE_FMT_S32;
    av_frame->nb_samples     = static_cast<int>(buffer.size() / av_frame->channels);
    FF(av_frame_get_buffer(av_frame.get(), 32));
    std::memcpy(av_frame->data[0], buffer.data(), buffer.size() * sizeof(buffer.data()[0]));

    return av_frame;
}