예제 #1
0
/*
* ZSL
* 虽然名字叫 decode,其实并不解码,只是从 is->sampq 里拿出 af(audioframe,Frame类型)
* 把 af->frame(AVFrame类型)里的 data 经过 swr_convert() 之后,存入 is->audio_buf
* 返回存入的大小(即 resample 之后的大小)
*
*/
int audio_decode_frame(VideoState *is) {
	int resampled_data_size,out_size;
	Frame *af;
	af = frame_queue_peek_readable(&is->sampq);
	frame_queue_next(&is->sampq);
	if (!is->swr_ctx) {
		is->swr_ctx = swr_alloc_set_opts(NULL,
			AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate,
			av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate,
			0, NULL);
		swr_init(is->swr_ctx);
	}
	const uint8_t **in = (const uint8_t **)af->frame->extended_data;
	uint8_t **out = &is->audio_buf;
	//out_size = av_samples_get_buffer_size(NULL, 2, af->frame->nb_samples, AV_SAMPLE_FMT_S16, 1);
	out_size = 2 * 1152 * 2;
	if (out_size < 0) { /*比如 af->frame->nb_samples==0 的时候,这必须要处理一下,不然一会儿 av_fast_malloc() 就出问题了 */
		av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
		return -1;
	}
	int len2;
	av_fast_malloc(&is->audio_buf, &is->audio_buf_size, out_size);
	len2 = swr_convert(is->swr_ctx, out, af->frame->nb_samples, in, af->frame->nb_samples);
	resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
	return resampled_data_size;
}
예제 #2
0
void MediaThread::Pcm::save(const AVFrame *frame, double stamp)
{
    stamp_ = stamp;

#if 0
    memset(buf_, 0, 4096);
    data_len_ = 4096;
#else
    if (!swr_) {
        // 总是输出 2, s16, 32000 ...
        swr_ = swr_alloc_set_opts(0, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, 32000,
                                  frame->channel_layout, (AVSampleFormat)frame->format, frame->sample_rate,
                                  0, 0);

        ch_ = 2;
        samplerate_ = 32000;
        bitsize_ = 16;

        swr_init(swr_);
    }

    size_t out_size = frame->nb_samples * 2 * 2;    // samples * bytes per sample * channels
    if (buf_len_ < out_size) {
        buf_ = (unsigned char*)realloc(buf_, out_size);
        buf_len_ = out_size;
    }

    int samples = swr_convert(swr_, &buf_, frame->nb_samples, (const uint8_t**)frame->extended_data, frame->nb_samples);
    data_len_ = samples * 2 * 2;    // samples * bytes per sample * channels

#endif
}
예제 #3
0
void Parser::InitResampler()
{
    swr_ctx = swr_alloc_set_opts(NULL,
                                 av_get_default_channel_layout(cdc_ctx_out->channels),
                                 cdc_ctx_out->sample_fmt,
                                 cdc_ctx_out->sample_rate,
                                 channelLayoutIn,
                                 (AVSampleFormat)sampleFormatIn,
                                 sampleRateIn,
                                 0,0);

    if(!swr_ctx)
        throw ContextCreatorException() << errno_code(MIR_ERR_ALLOC_SWR_CONTEXT);

    // set options
    av_opt_set_int(swr_ctx, "in_channel_layout",    channelLayoutIn, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate",       sampleRateIn, 0);
    av_opt_set_int(swr_ctx, "in_bit_rate",          bitRateIn, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", (AVSampleFormat)sampleFormatIn, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout",    cdc_ctx_out->channel_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate",       cdc_ctx_out->sample_rate, 0);
    av_opt_set_int(swr_ctx, "out_bit_rate",          cdc_ctx_out->bit_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", cdc_ctx_out->sample_fmt, 0);

    if (swr_init(swr_ctx) < 0)
        throw ContextCreatorException() << errno_code(MIR_ERR_INIT_SWR_CONTEXT);
}
예제 #4
0
// 函数实现
void* renderopen(void *surface, AVRational frate, int pixfmt, int w, int h,
                 int64_t ch_layout, AVSampleFormat sndfmt, int srate)
{
    WAVEFORMATEX wfx = {0};

    RENDER *render = (RENDER*)malloc(sizeof(RENDER));
    memset(render, 0, sizeof(RENDER));
    render->hRenderWnd = (HWND)surface; // save hwnd

    // init for audio
    wfx.cbSize          = sizeof(wfx);
    wfx.wFormatTag      = WAVE_FORMAT_PCM;
    wfx.wBitsPerSample  = 16;    // 16bit
    wfx.nSamplesPerSec  = 44100; // 44.1k
    wfx.nChannels       = 2;     // stereo
    wfx.nBlockAlign     = wfx.nChannels * wfx.wBitsPerSample / 8;
    wfx.nAvgBytesPerSec = wfx.nBlockAlign * wfx.nSamplesPerSec;
    waveOutOpen(&(render->hWaveOut), WAVE_MAPPER, &wfx, (DWORD_PTR)waveOutProc, (DWORD)render, CALLBACK_FUNCTION);
    waveOutPause(render->hWaveOut);
    wavqueue_create(&(render->WavQueue), render->hWaveOut, ((int64_t)44100 * 4 * frate.den / frate.num) & ~0x3);

    /* allocate & init swr context */
    render->pSWRContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, 44100,
                          ch_layout, sndfmt, srate, 0, NULL);
    swr_init(render->pSWRContext);

    // init for video
    render->nVideoWidth  = w;
    render->nVideoHeight = h;
    render->nRenderWidth = GetSystemMetrics(SM_CXSCREEN);
    render->nRenderHeight= GetSystemMetrics(SM_CYSCREEN);
    render->nRenderNewW  = render->nRenderWidth;
    render->nRenderNewH  = render->nRenderHeight;
    render->PixelFormat  = (PixelFormat)pixfmt;

    // create sws context
    render->pSWSContext = sws_getContext(
                              render->nVideoWidth,
                              render->nVideoHeight,
                              render->PixelFormat,
                              render->nRenderWidth,
                              render->nRenderHeight,
                              PIX_FMT_RGB32,
                              SWS_BILINEAR,
                              0, 0, 0);

    render->iFrameTick = 1000 * frate.den / frate.num;
    render->iSleepTick = render->iFrameTick;

    // create dc & bitmaps
    render->hRenderDC = GetDC(render->hRenderWnd);
    render->hBufferDC = CreateCompatibleDC(render->hRenderDC);

    // create bmp queue
    bmpqueue_create(&(render->BmpQueue), render->hBufferDC, GetSystemMetrics(SM_CXSCREEN), GetSystemMetrics(SM_CYSCREEN), 32);

    render->nRenderStatus = 0;
    pthread_create(&(render->hVideoThread), NULL, VideoRenderThreadProc, render);
    return render;
}
예제 #5
0
EC_U32 AudioWaveScale::Init(MediaCtxInfo* pMediaInfo, AudioPCMBuffer *pFirstFrame)
{
    if (EC_NULL == pMediaInfo)
        return Audio_Render_Err_InitFail;

    EC_S32 out_sample_rate = pMediaInfo->m_nSampleRate;
    EC_S64 out_channel_layout = AV_CH_LAYOUT_STEREO;
    EC_S32 out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
    AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

    AVCodecContext *pCodecCtx = (AVCodecContext*)(pMediaInfo->m_pAudioCodecInfo);
    EC_S64 in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels);
    EC_S32 in_sample_rate = pCodecCtx->sample_rate;
    AVSampleFormat in_sample_fmt = pCodecCtx->sample_fmt;

    m_nOutChannels = out_channels;
    m_nOutSampleFormat = out_sample_fmt;

    m_pWaveScaleContext = swr_alloc();
    m_pWaveScaleContext = swr_alloc_set_opts(m_pWaveScaleContext, 
                                             out_channel_layout,
                                             out_sample_fmt,
                                             out_sample_rate,
                                             in_channel_layout, 
                                             in_sample_fmt, 
                                             in_sample_rate, 0, NULL);
    EC_S32 nRet = swr_init(m_pWaveScaleContext);
    if (nRet < 0) return Audio_Render_Err_InitFail;

    m_pScaleOutbuffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
    if (m_pScaleOutbuffer == EC_NULL) return EC_Err_Memory_Low;

    return Audio_Render_Err_None;
}
static int config_output(AVFilterLink *outlink)
{
    int ret;
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    AConvertContext *aconvert = ctx->priv;
    char buf1[64], buf2[64];

    /* if not specified in args, use the format and layout of the output */
    if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE)
        aconvert->out_sample_fmt = outlink->format;
    if (aconvert->out_chlayout   == 0)
        aconvert->out_chlayout   = outlink->channel_layout;

    aconvert->swr = swr_alloc_set_opts(aconvert->swr,
                                       aconvert->out_chlayout, aconvert->out_sample_fmt, inlink->sample_rate,
                                       inlink->channel_layout, inlink->format,           inlink->sample_rate,
                                       0, ctx);
    if (!aconvert->swr)
        return AVERROR(ENOMEM);
    ret = swr_init(aconvert->swr);
    if (ret < 0)
        return ret;

    av_get_channel_layout_string(buf1, sizeof(buf1),
                                 -1, inlink ->channel_layout);
    av_get_channel_layout_string(buf2, sizeof(buf2),
                                 -1, outlink->channel_layout);
    av_log(ctx, AV_LOG_VERBOSE,
           "fmt:%s cl:%s -> fmt:%s cl:%s\n",
           av_get_sample_fmt_name(inlink ->format), buf1,
           av_get_sample_fmt_name(outlink->format), buf2);

    return 0;
}
CAudioReader::CAudioReader(CAudioSource *source, AudioTrack *info)
{
	m_Source = source;
	m_Info = info;
	m_MediaInfo = info->m_Media;
	m_DestSampleRate = source->m_SampleRate;
	m_DestChannels = source->m_Channels;
	m_DestPacketBytes = source->m_PacketBytes;
	
	if (m_MediaInfo->m_IsPlanar)
	{
		m_SourceChannels = m_MediaInfo->m_nChannel;
		m_SourcePacketBytes = av_get_bytes_per_sample((enum AVSampleFormat)m_MediaInfo->m_SampleFormat);
	}
	else
	{
		m_SourceChannels = 1;
		m_SourcePacketBytes = m_MediaInfo->m_nChannel * av_get_bytes_per_sample((enum AVSampleFormat)m_MediaInfo->m_SampleFormat);
	}
	m_SourceSampleRate = m_MediaInfo->m_SampleRate;
	
	for(int i = 0; i < m_SourceChannels; i++)
	{
		m_hFiles[i] = CreateFile(m_MediaInfo->m_AudioTmpFile[i],
			GENERIC_READ,
			FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,                    
			NULL,                 
			OPEN_EXISTING,        
			FILE_ATTRIBUTE_NORMAL,
			NULL);
	}

	if (
		(m_MediaInfo->m_SampleRate != m_Source->m_SampleRate) ||
		(m_MediaInfo->m_nChannel != m_Source->m_Channels) ||
		(m_MediaInfo->m_SampleFormat != m_Source->m_SampleFormat) ||
		(m_MediaInfo->m_channel_layout != m_Source->m_Layout)
		)
	{
		swr_context =
		swr_alloc_set_opts(NULL,
						   m_Source->m_Layout, (AVSampleFormat)source->m_SampleFormat, m_Source->m_SampleRate,
						   m_MediaInfo->m_channel_layout, (AVSampleFormat)m_MediaInfo->m_SampleFormat, m_MediaInfo->m_SampleRate,
						   0, NULL);
		swr_init(swr_context);

		for(int i = 0; i < m_SourceChannels; i++)
		{
			m_ReadBuffer[i] = (uint8_t *)MemoryAlloc(m_SourceSampleRate * m_SourcePacketBytes);
		}

		for(int i = 0; i < m_DestChannels; i++)
		{
			m_ResampleBuffer[i] = (uint8_t *)MemoryAlloc(m_DestSampleRate * m_DestPacketBytes * 2);
		}
	}

	ResetStartStop();
}
예제 #8
0
static void audio_convert(dtaudio_decoder_t *decoder, AVFrame * dst,
                          AVFrame * src)
{
    int nb_sample;
    int dst_buf_size;
    int out_channels;
    //for audio post processor
    //struct SwsContext *m_sws_ctx = NULL;
    struct SwrContext *m_swr_ctx = NULL;
    //ResampleContext *m_resample_ctx=NULL;
    enum AVSampleFormat src_fmt = avctxp->sample_fmt;
    enum AVSampleFormat dst_fmt = AV_SAMPLE_FMT_S16;

    dst->linesize[0] = src->linesize[0];
    *dst = *src;

    dst->data[0] = NULL;
    out_channels = decoder->para.dst_channels;
    nb_sample = frame->nb_samples;
    dst_buf_size = nb_sample * av_get_bytes_per_sample(dst_fmt) * out_channels;
    dst->data[0] = (uint8_t *) av_malloc(dst_buf_size);

    avcodec_fill_audio_frame(dst, out_channels, dst_fmt, dst->data[0], dst_buf_size,
                             0);
    dt_debug(TAG, "SRCFMT:%d dst_fmt:%d \n", src_fmt, dst_fmt);
    /* resample toAV_SAMPLE_FMT_S16 */
    if (src_fmt != dst_fmt || out_channels != decoder->para.channels) {
        if (!m_swr_ctx) {
            uint64_t in_channel_layout = av_get_default_channel_layout(avctxp->channels);
            uint64_t out_channel_layout = av_get_default_channel_layout(out_channels);
            m_swr_ctx = swr_alloc_set_opts(NULL, out_channel_layout, dst_fmt,
                                           avctxp->sample_rate, in_channel_layout, src_fmt, avctxp->sample_rate, 0, NULL);
            swr_init(m_swr_ctx);
        }
        uint8_t **out = (uint8_t **) & dst->data;
        const uint8_t **in = (const uint8_t **) src->extended_data;
        if (m_swr_ctx) {
            int ret, out_count;
            out_count = nb_sample;
            ret = swr_convert(m_swr_ctx, out, out_count, in, nb_sample);
            if (ret < 0) {
                //set audio mute
                memset(dst->data[0], 0, dst_buf_size);
                printf("audio convert failed, set mute data\n");
            }
        }
    } else {                    // no need to convert ,just copy
        memcpy(dst->data[0], src->data[0], src->linesize[0]);
    }
    //free context
    if (m_swr_ctx != NULL) {
        swr_free(&m_swr_ctx);
    }
    //if(m_resample_ctx!=NULL)
    //    audio_resample_close(m_resample_ctx);
}
예제 #9
0
SimpleAT3::SimpleAT3()
	: codec_(0),
		codecCtx_(0),
		swrCtx_(0) {
	frame_ = av_frame_alloc();

	codec_ = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P);
	if (!codec_) {
		// Eh, we shouldn't even have managed to compile. But meh.
		ERROR_LOG(ME, "This version of FFMPEG does not support AV_CODEC_ID_ATRAC3P (Atrac3+). Update your submodule.");
		return;
	}

	codecCtx_ = avcodec_alloc_context3(codec_);
	if (!codecCtx_) {
		ERROR_LOG(ME, "Failed to allocate a codec context");
		return;
	}

	codecCtx_->channels = 2;
	codecCtx_->channel_layout = AV_CH_LAYOUT_STEREO;

	AVDictionary *opts = 0;
	av_dict_set(&opts, "channels", "2", 0);
	av_dict_set(&opts, "sample_rate", "44100", 0);
	if (avcodec_open2(codecCtx_, codec_, &opts) < 0) {
		ERROR_LOG(ME, "Failed to open codec");
		return;
	}

	av_dict_free(&opts);

	// Initializing the sample rate convert. We only really use it to convert float output
	// into int.
	int wanted_channels = 2;
	int64_t wanted_channel_layout = av_get_default_channel_layout(wanted_channels);
	int64_t dec_channel_layout = av_get_default_channel_layout(2);

	swrCtx_ = swr_alloc_set_opts(
			swrCtx_,
			wanted_channel_layout,
			AV_SAMPLE_FMT_S16,
			codecCtx_->sample_rate,
			dec_channel_layout,
			codecCtx_->sample_fmt,
			codecCtx_->sample_rate,
			0,
			NULL);

	if (!swrCtx_ || swr_init(swrCtx_) < 0) {
		ERROR_LOG(ME, "swr_init: Failed to initialize the resampling context");
		avcodec_close(codecCtx_);
		codec_ = 0;
		return;
	}
}
    /**
     * Initialize the audio resampler based on the input and output codec settings.
     * If the input and output sample formats differ, a conversion is required
     * libswresample takes care of this, but requires initialization.
     */
    int AudioDecoder::init_resampler(AVCodecContext *input_codec_context,
            AVCodecContext *output_codec_context)
    {
        int error;

        /**
         * Create a resampler context for the conversion.
         * Set the conversion parameters.
         * Default channel layouts based on the number of channels
         * are assumed for simplicity (they are sometimes not detected
         * properly by the demuxer and/or decoder).
         */
        resample_context = swr_alloc_set_opts(NULL,
                av_get_default_channel_layout(output_codec_context->channels),
                output_codec_context->sample_fmt,
                output_codec_context->sample_rate,
                av_get_default_channel_layout(input_codec_context->channels),
                input_codec_context->sample_fmt,
                input_codec_context->sample_rate,
                0, NULL);

        if (!resample_context) {
            ELOG_WARN( "Could not allocate resample context\n");
            return AVERROR(ENOMEM);
        }


        /**
         * Perform a sanity check so that the number of converted samples is
         * not greater than the number of samples to be converted.
         * If the sample rates differ, this case has to be handled differently
         */

        ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate);

        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_WARN( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }


        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_DEBUG( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }

        ELOG_DEBUG( "swr_init done");

        return 0;
    }
예제 #11
0
void XAudioStream::setSpeed(float speed)
{
	if(m_speed == speed || speed <= 0.0f) return;
	m_speed = speed;
	swr_free(&m_pSwrContext);
	m_pSwrContext = swr_alloc();
	if(m_pSwrContext == NULL) return;
	if(m_pAudioCodecCtx->channel_layout == 0)
	{
		swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed,
			av_get_default_channel_layout(m_pAudioCodecCtx->channels),m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL);
	}else
	{
		swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed,
			m_pAudioCodecCtx->channel_layout,m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL);
	}
	if(swr_init(m_pSwrContext) < 0)
	{
		LogStr("swr_init() fail");
		return;
	}
}
예제 #12
0
    kxMovieError openAudioStream(size_t audioStream)
    {
        AVCodecContext *codecCtx = _formatCtx->streams[audioStream]->codec;
        SwrContext *swrContext = NULL;
        
        AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);
        if(!codec)
            return kxMovieErrorCodecNotFound;
        
        if (avcodec_open2(codecCtx, codec, NULL) < 0)
            return kxMovieErrorOpenCodec;
        

        
        if (!audioCodecIsSupported(codecCtx))
        {
            swrContext = swr_alloc_set_opts(NULL,
                                            av_get_default_channel_layout(codecCtx->channels),
                                            AV_SAMPLE_FMT_S16,
                                            codecCtx->sample_rate,
                                            av_get_default_channel_layout(codecCtx->channels),
                                            codecCtx->sample_fmt,
                                            codecCtx->sample_rate,
                                            0,
                                            NULL);
            
            if (!swrContext ||
                swr_init(swrContext)) {
                
                if (swrContext)
                    swr_free(&swrContext);
                avcodec_close(codecCtx);
                return kxMovieErroReSampler;
            }
        }
        
        _audioFrame = av_frame_alloc();
        
        if (!_audioFrame) {
            if (swrContext)
                swr_free(&swrContext);
            avcodec_close(codecCtx);
            return kxMovieErrorAllocateFrame;
        }
        
        _audioStream = audioStream;
        _audioCodecCtx = codecCtx;
        _swrContext = swrContext;
        return kxMovieErrorNone; 
    }
int AudioResampling(AVCodecContext * audio_dec_ctx,
        AVFrame * pAudioDecodeFrame,
        enum AVSampleFormat out_sample_fmt,
        int out_channels,
        int out_sample_rate,
        uint8_t* out_buf)
{
    struct SwrContext * swr_ctx = 0;
    swr_ctx = swr_alloc_set_opts(swr_ctx,audio_dec_ctx->channel_layout,out_sample_fmt,out_sample_rate,
            audio_dec_ctx->channel_layout,audio_dec_ctx->sample_fmt,audio_dec_ctx->sample_rate,0,0);
    int ret = 0;
    int dst_linesize = 0;
    int resampled_data_size = av_samples_get_buffer_size(&dst_linesize, out_channels,audio_dec_ctx->frame_size,audio_dec_ctx->sample_fmt, 1);
    uint8_t *dst_data = (uint8_t*)av_malloc(resampled_data_size);

    if ((ret = swr_init(swr_ctx)) < 0) {
        printf("Failed to initialize the resampling context\n");
        return -1;
    }

    if (swr_ctx){
        ret = swr_convert(swr_ctx, &dst_data, dst_linesize,
                (const uint8_t **)pAudioDecodeFrame->data, pAudioDecodeFrame->nb_samples);
        resampled_data_size = av_samples_get_buffer_size(&dst_linesize,out_channels,ret,out_sample_fmt,1);
        if (ret < 0) {
            printf("swr_convert error \n");
            return -1;
        }

        if (resampled_data_size < 0) {
            printf("av_samples_get_buffer_size error \n");
            return -1;
        }
    } else{
        printf("swr_ctx null error \n");
        return -1;
    }

    memcpy(out_buf, dst_data, resampled_data_size);

    if (dst_data) {
        av_free(dst_data);
    }

    if (swr_ctx)
    {
        swr_free(&swr_ctx);
    }
    return resampled_data_size;
}
예제 #14
0
void render_audio(void *hrender, AVFrame *audio)
{
    RENDER *render  = (RENDER*)hrender;
    int     sampnum = 0;
    DWORD   apts    = (DWORD)audio->pts;

    if (!render->adev) return;
    do {
        if (render->nAdevBufAvail == 0) {
            adev_request(render->adev, &render->pAdevHdrCur);
            apts += render->nFramePeriod * render->nRenderSpeedCur / 100;
            render->nAdevBufAvail = (int  )render->pAdevHdrCur->size;
            render->pAdevBufCur   = (BYTE*)render->pAdevHdrCur->data;
        }

        if (render->nRenderSpeedCur != render->nRenderSpeedNew) {
            render->nRenderSpeedCur = render->nRenderSpeedNew;

            // set vdev frame rate
            int framerate = (render->FrameRate.num * render->nRenderSpeedCur) / (render->FrameRate.den * 100);
            vdev_setfrate(render->vdev, framerate > 1 ? framerate : 1);

            //++ allocate & init swr context
            if (render->pSWRContext) {
                swr_free(&render->pSWRContext);
            }
            int samprate = 44100 * 100 / render->nRenderSpeedCur;
            render->pSWRContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, samprate,
                render->nChanLayout, render->SampleFormat, render->nSampleRate, 0, NULL);
            swr_init(render->pSWRContext);
            //-- allocate & init swr context
        }

        //++ do resample audio data ++//
        sampnum = swr_convert(render->pSWRContext, (uint8_t**)&render->pAdevBufCur,
            render->nAdevBufAvail / 4, (const uint8_t**)audio->extended_data,
            audio->nb_samples);
        audio->extended_data  = NULL;
        audio->nb_samples     = 0;
        render->nAdevBufAvail -= sampnum * 4;
        render->pAdevBufCur   += sampnum * 4;
        //-- do resample audio data --//

        if (render->nAdevBufAvail == 0) {
            adev_post(render->adev, apts);
        }
    } while (sampnum > 0);
}
예제 #15
0
Swr::Swr(std::int64_t out_ch_layout, AVSampleFormat out_sample_fmt,
         int out_sample_rate, std::int64_t in_ch_layout,
         AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset,
         void* log_ctx)
{
	this->context = swr_alloc_set_opts(nullptr, out_ch_layout,
	                                   out_sample_fmt, out_sample_rate,
	                                   in_ch_layout, in_sample_fmt,
	                                   in_sample_rate, log_offset, log_ctx);
	if (this->context == nullptr) {
		throw std::bad_alloc();
	}

	assert(this->context != nullptr);
	swr_init(this->context);
}
예제 #16
0
void init_swr(){
	uint64_t out_channel_layout=AV_CH_LAYOUT_STEREO;
	//nb_samples: AAC-1024 MP3-1152
	out_sample_rate=pCodecCtx->sample_rate;
	out_channels=av_get_channel_layout_nb_channels(out_channel_layout);

	out_buffer=(uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE*out_channels);
	  //FIX:Some Codec's Context Information is missing
	int in_channel_layout=av_get_default_channel_layout(pCodecCtx->channels);
	//Swr
	au_convert_ctx = swr_alloc();
	swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt,         out_sample_rate,
					 in_channel_layout,  pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL);
	if(swr_init(au_convert_ctx)<0){
		au_convert_ctx=NULL;
	}
	createBufferQueueAudioPlayer(2,out_sample_rate);  
}
예제 #17
0
void AudioDec::set_audio_spec(AVCodecContext* pCodecCtx)
{
	/* 오디오 출력을 위한 구조체 설정 파트 */
	/* sample rate */
	wfx.nSamplesPerSec = pCodecCtx->sample_rate;

	/* number of bits per sample of mono data */
	switch (pCodecCtx->sample_fmt)
	{
	case AV_SAMPLE_FMT_U8: wfx.wBitsPerSample = 8; break;
	case AV_SAMPLE_FMT_S16: wfx.wBitsPerSample = 16; break;
	case AV_SAMPLE_FMT_S32: wfx.wBitsPerSample = 32; break;
	case AV_SAMPLE_FMT_FLT: wfx.wBitsPerSample = sizeof(double)* 8; break;
	case AV_SAMPLE_FMT_FLTP: wfx.wBitsPerSample = 16; break;
	default: wfx.wBitsPerSample = 0; break;
	}
	wfx.wBitsPerSample = 16;
	wfx.nChannels = FFMIN(2, pCodecCtx->channels);

	/* the count in bytes of the size of extra information (after cbSize) */
	wfx.cbSize = 0;
	wfx.wFormatTag = WAVE_FORMAT_PCM;
	wfx.nBlockAlign = (wfx.wBitsPerSample * wfx.nChannels) / 8;
	wfx.nAvgBytesPerSec = wfx.nBlockAlign * wfx.nSamplesPerSec;

	MMRESULT result = waveOutOpen(&hWaveOut, WAVE_MAPPER, &wfx, (DWORD)audio_callback_func_static, (DWORD)&waveFreeBlockCount, CALLBACK_FUNCTION);
	
	if (result != MMSYSERR_NOERROR)
	{
		// 오디오 출력장치를 열지 못했다...
		TRACE("err waveOutOpen \n");
		return;
	}

	pSwrContext = swr_alloc_set_opts(
		NULL, AV_CH_LAYOUT_STEREO,
		AV_SAMPLE_FMT_S16,
		pCodecCtx->sample_rate,
		wfx.nChannels,
		pCodecCtx->sample_fmt,
		pCodecCtx->sample_rate,
		0, NULL);
	swr_init(pSwrContext);
}
예제 #18
0
void render_audio(void *hrender, AVFrame *audio)
{
    RENDER *render  = (RENDER*)hrender;
    int     sampnum = 0;
    int64_t apts    = audio->pts;

    if (!render || !render->adev) return;
    do {
        if (render->adev_buf_avail == 0) {
            adev_lock(render->adev, &render->adev_hdr_cur);
            apts += 10 * render->render_speed_cur * render->frame_rate.den / render->frame_rate.num;
            render->adev_buf_avail = (int     )render->adev_hdr_cur->size;
            render->adev_buf_cur   = (uint8_t*)render->adev_hdr_cur->data;
        }

        if (render->render_speed_cur != render->render_speed_new) {
            render->render_speed_cur = render->render_speed_new;
            //++ allocate & init swr context
            if (render->swr_context) {
                swr_free(&render->swr_context);
            }
            int samprate = (int)(ADEV_SAMPLE_RATE * 100.0 / render->render_speed_cur);
            render->swr_context = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, samprate,
                render->chan_layout, render->sample_fmt, render->sample_rate, 0, NULL);
            swr_init(render->swr_context);
            //-- allocate & init swr context
        }

        //++ do resample audio data ++//
        sampnum = swr_convert(render->swr_context,
            (uint8_t**)&render->adev_buf_cur, render->adev_buf_avail / 4,
            (const uint8_t**)audio->extended_data, audio->nb_samples);
        audio->extended_data    = NULL;
        audio->nb_samples       = 0;
        render->adev_buf_avail -= sampnum * 4;
        render->adev_buf_cur   += sampnum * 4;
        //-- do resample audio data --//

        if (render->adev_buf_avail == 0) {
            adev_unlock(render->adev, apts);
        }
    } while (sampnum > 0);
}
예제 #19
0
파일: avfile.cpp 프로젝트: berkus/arfarius
void AVFile::_updateSWR()
{
    if (swrCtx) {
        swr_free(&swrCtx);
    }

    if (!_channels || !_sample_rate || !codecCtx)
        return;

    if (codecCtx->channel_layout != (uint64_t)av_get_default_channel_layout(_channels) ||
        codecCtx->sample_fmt != AV_SAMPLE_FMT_FLT ||
        codecCtx->sample_rate != (int)_sample_rate)
    {
        swrCtx = swr_alloc_set_opts(0, av_get_default_channel_layout(_channels), AV_SAMPLE_FMT_FLT, _sample_rate,
                                    codecCtx->channel_layout, codecCtx->sample_fmt, codecCtx->sample_rate,
                                    0, 0);

        if (!swrCtx)
            throw AVException("Unable to allocate swresample context");

        swr_init(swrCtx);
    }
}
예제 #20
0
void CTransCoder::SwrInit( AVCodecContext* ctx )
{
	//Out Audio Param
	m_out_channel_layout = AV_CH_LAYOUT_STEREO;
	//nb_samples:
	m_out_nb_samples = 1024;
	m_out_sample_fmt = AV_SAMPLE_FMT_S16;
	m_out_sample_rate = 44100;
	m_out_channels = av_get_channel_layout_nb_channels(m_out_channel_layout);

	//Out buffer size
	int out_buffer_size = av_samples_get_buffer_size( NULL, m_out_channels, m_out_nb_samples, m_out_sample_fmt, 1 );

	m_audioBuffer = (uint8_t *)av_malloc( MAX_AUDIO_FRAME_SIZE );

	int64_t in_channel_layout = av_get_default_channel_layout(ctx->channels);


	m_au_convert_ctx = swr_alloc();
	m_au_convert_ctx = swr_alloc_set_opts( m_au_convert_ctx, m_out_channel_layout, m_out_sample_fmt, m_out_sample_rate,
		in_channel_layout, ctx->sample_fmt, ctx->sample_rate, 0, NULL);
	swr_init(m_au_convert_ctx);
}
예제 #21
0
void AudioOutput::readAVFrame(AVFrame* frame)
{
    if(!swrCtx){
        swrCtx = swr_alloc_set_opts(NULL,
                                    aw->audioCodecCtx->channel_layout, AV_SAMPLE_FMT_S16, aw->audioCodecCtx->sample_rate,
                                    frame->channel_layout, (AVSampleFormat)frame->format, frame->sample_rate,
                                    0, NULL);
        swr_init(swrCtx);
    }
    const uint8_t **in = (const uint8_t **)frame->extended_data;
    uint8_t **out = &data;
    int bufLen = aw->audioCodecCtx->channels * frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
    if(capacity < bufLen){
        data = (uint8_t*)realloc(data, bufLen);
        capacity = bufLen;
    }

   int out_count = (int64_t)(frame->nb_samples) *4* aw->audioCodecCtx->sample_rate / frame->sample_rate + 256;
   /*
   int out_size  = av_samples_get_buffer_size(NULL, frame->channels, out_count, AV_SAMPLE_FMT_S16, 0);
    int len2;
    if (out_size < 0) {
        std::cout<<"av_samples_get_buffer_size() failed\n";
        return;
    }
    */

    int len2 = swr_convert(swrCtx, out, out_count, in, frame->nb_samples);
    //std::cout<<len2<<"\n";
    if (len2 < 0) {
        std::cout<<"swr_convert() failed\n";
        return;
    }
    index = 0;
    size = aw->audioCodecCtx->channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);

}
예제 #22
0
int CAudioDecoder::open(void)
{
	int ret = 0;
	bOpen = true;
	pFormatCtx = avformat_alloc_context();
	const int BUF_LEN = (1024 * 200);
	iobuffer = (unsigned char *)av_malloc(BUF_LEN);
	avio = avio_alloc_context(iobuffer, BUF_LEN, 0, this, fill_iobuffer, NULL, NULL);
	pFormatCtx->pb = avio;
	pFormatCtx->flags = AVFMT_FLAG_CUSTOM_IO;
	if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0){
		bOpen = false;
		return -1; // Couldn't open file
	}

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL)<0){
		bOpen = false;
		return -1; // Couldn't find stream information
	}

	// Find the first video stream
	audioStream = -1;
	unsigned int i;
	for (i = 0; i<pFormatCtx->nb_streams; i++)
	if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
		audioStream = i;
		break;
	}
	if (audioStream == -1){
		bOpen = false;
		return -1; // Didn't find a video stream
	}
	/*AV_SAMPLE_FMT_S16;*/
	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig = pFormatCtx->streams[audioStream]->codec;
	if (!pCodecCtxOrig){
		fprintf(stderr, "Unsupported codec!\n");
		bOpen = false;
		return -1; // Codec not found
	}
	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		bOpen = false;
		return -1; // Codec not found
	}

	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		fprintf(stderr, "Couldn't copy codec context");
		bOpen = false;
		return -1; // Error copying codec context
	}

	// Open codec
	if (avcodec_open2(pCodecCtx, pCodec, NULL)<0){
		bOpen = false;
		return -1; // Could not open codec
	}

	// Allocate audio frame
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	av_init_packet(&packet);
	out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);

	//Out Audio Param
	out_channel_layout = AV_CH_LAYOUT_STEREO;
	out_nb_samples = 1024;
	out_sample_fmt = AV_SAMPLE_FMT_S16;
	out_sample_rate = 44100;
	out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
	//Out Buffer Size
	out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);

	//SDL_AudioSpec
	wanted_spec.freq = out_sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = out_channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = 1024/*out_nb_samples*/;
	wanted_spec.callback = fill_audio;
	wanted_spec.userdata = pCodecCtx;

	if (SDL_OpenAudio(&wanted_spec, NULL)<0){
		printf("can't open audio.\n");
		return -1;
	}

	int64_t in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels);

	au_convert_ctx = swr_alloc();
	au_convert_ctx = swr_alloc_set_opts(au_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate,
		in_channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);
	swr_init(au_convert_ctx);


	bOpen = true;
	//bStop = false;
	//start();
	return ret;
}
예제 #23
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1);
	encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0';
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
#ifdef FFMPEG_USE_CODECPAR
		encoder->audioStream = avformat_new_stream(encoder->context, NULL);
		encoder->audio = avcodec_alloc_context3(acodec);
#else
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
#endif
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
#ifdef AV_CODEC_FLAG_GLOBAL_HEADER
			encoder->audio->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
#ifdef USE_LIBAVRESAMPLE
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
#else
		encoder->resampleContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, encoder->sampleFormat, encoder->sampleRate,
		                                              AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, PREFERRED_SAMPLE_RATE, 0, NULL);
		swr_init(encoder->resampleContext);
#endif
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
		    (strcasecmp(encoder->containerFormat, "mp4") ||
		        strcasecmp(encoder->containerFormat, "m4v") ||
		        strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
#ifdef FFMPEG_USE_NEW_BSF
			av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf);
			avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio);
			av_bsf_init(encoder->absf);
#else
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
#endif
		}
#ifdef FFMPEG_USE_CODECPAR
		avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio);
#endif
	}

#ifdef FFMPEG_USE_CODECPAR
	encoder->videoStream = avformat_new_stream(encoder->context, NULL);
	encoder->video = avcodec_alloc_context3(vcodec);
#else
	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
#endif
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
#ifdef AV_CODEC_FLAG_GLOBAL_HEADER
		encoder->video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
	}

	if (encoder->video->codec->id == AV_CODEC_ID_H264 &&
	    (strcasecmp(encoder->containerFormat, "mp4") ||
	        strcasecmp(encoder->containerFormat, "m4v") ||
	        strcasecmp(encoder->containerFormat, "mov"))) {
		// QuickTime and a few other things require YUV420
		encoder->video->pix_fmt = AV_PIX_FMT_YUV420P;
	}

	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		if (encoder->videoBitrate == 0) {
			av_opt_set(encoder->video->priv_data, "crf", "0", 0);
			encoder->video->pix_fmt = AV_PIX_FMT_YUV444P;
		}
	}

	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	_ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);
#ifdef FFMPEG_USE_CODECPAR
	avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video);
#endif

	if (avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE) < 0) {
		return false;
	}
	return avformat_write_header(encoder->context, 0) >= 0;
}
int audio_decode_frame(VideoState *is, double *pts_ptr) {
  int len1, data_size = 0, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;
    
    uint8_t *out[] = {is->audio_buf};
    int64_t wanted_channel_layout = 0;
    wanted_channel_layout = is->audio_st->codec->channel_layout;
    SwrContext* t_audio_conv = swr_alloc_set_opts(NULL,
                                                  wanted_channel_layout,AV_SAMPLE_FMT_S16,is->audio_st->codec->sample_rate,
                                                  wanted_channel_layout,is->audio_st->codec->sample_fmt, is->audio_st->codec->sample_rate,
                                                  0,NULL);
    swr_init(t_audio_conv);
    
    for(;;) {
        while(is->audio_pkt_size > 0)
        {
            int got_frame = 0;
            int t_audio_size= 0;
            len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
            if(len1 < 0) {
                /* if error, skip frame */
                is->audio_pkt_size = 0;
                break;
            }
            if (got_frame)
            {
                
                int size1 = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                int len = swr_convert(t_audio_conv,
                                      out, (MAX_AUDIO_FRAME_SIZE * 3) / 2/is->audio_st->codec->channels/size1,
                                      (const uint8_t **)is->audio_frame.extended_data, is->audio_frame.nb_samples);
                
                t_audio_size = len * is->audio_st->codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                
                
                //av_packet_unref(pkt);
                swr_free(&t_audio_conv);
                //return len;
                
            }
            is->audio_pkt_data += len1;
            is->audio_pkt_size -= len1;
            //if(data_size <= 0) {
            if(t_audio_size <= 0) {
                /* No data yet, get more frames */
                continue;
            }
            pts = is->audio_clock;
            *pts_ptr = pts;
            n = 2*is->audio_st->codec->channels;
            is->audio_clock += (double)data_size /
            (double)(n * is->audio_st->codec->sample_rate);
            
            /* We have data, return it and come back for more later */
            //return data_size;
            return t_audio_size;
        }
        if(pkt->data)
            av_free_packet(pkt);
        
        if(is->quit) {
            return -1;
        }
        /* next packet */
        if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
            return -1;
        }
        is->audio_pkt_data = pkt->data;
        is->audio_pkt_size = pkt->size;
        
        /* if update, update the audio clock w/pts */
        if(pkt->pts != AV_NOPTS_VALUE) {
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
        }
    }
    
//    
//  for(;;) {
//    while(is->audio_pkt_size > 0) {
//      int got_frame;
//      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
//      if(len1 < 0) {
//	/* if error, skip frame */
//	is->audio_pkt_size = 0;
//	break;
//      }
//      if (got_frame)
//      {
//          data_size = 
//            av_samples_get_buffer_size
//            (
//                NULL, 
//                is->audio_st->codec->channels,
//                is->audio_frame.nb_samples,
//                is->audio_st->codec->sample_fmt,
//                1
//            );
//          memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
//      }
//      is->audio_pkt_data += len1;
//      is->audio_pkt_size -= len1;
//      if(data_size <= 0) {
//	/* No data yet, get more frames */
//	continue;
//      }
//      pts = is->audio_clock;
//      *pts_ptr = pts;
//      n = 2 * is->audio_st->codec->channels;
//      is->audio_clock += (double)data_size /
//	(double)(n * is->audio_st->codec->sample_rate);
//
//      /* We have data, return it and come back for more later */
//      return data_size;
//    }
//    if(pkt->data)
//      av_free_packet(pkt);
//
//    if(is->quit) {
//      return -1;
//    }
//    /* next packet */
//    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
//      return -1;
//    }
//    is->audio_pkt_data = pkt->data;
//    is->audio_pkt_size = pkt->size;
//    /* if update, update the audio clock w/pts */
//    if(pkt->pts != AV_NOPTS_VALUE) {
//      is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
//    }
//
//  }
    return 0;
}
예제 #25
0
/*
 *TODO: broken sample rate(AAC), see mplayer
 */
bool AudioResamplerFF::prepare()
{
    DPTR_D(AudioResamplerFF);
    if (!d.in_format.isValid()) {
        qWarning("src audio parameters 'channel layout(or channels), sample rate and sample format must be set before initialize resampler");
        return false;
    }
    //TODO: also in do this statistics
    if (!d.in_format.channels()) {
        if (!d.in_format.channelLayoutFFmpeg()) { //FIXME: already return
            d.in_format.setChannels(2);
            d.in_format.setChannelLayoutFFmpeg(av_get_default_channel_layout(d.in_format.channels())); //from mplayer2
            qWarning("both channels and channel layout are not available, assume channels=%d, channel layout=%lld", d.in_format.channels(), d.in_format.channelLayoutFFmpeg());
        } else {
            d.in_format.setChannels(av_get_channel_layout_nb_channels(d.in_format.channelLayoutFFmpeg()));
        }
    }
    if (!d.in_format.channels())
        d.in_format.setChannels(2); //TODO: why av_get_channel_layout_nb_channels() may return 0?
    if (!d.in_format.channelLayoutFFmpeg()) {
        qWarning("channel layout not available, use default layout");
        d.in_format.setChannelLayoutFFmpeg(av_get_default_channel_layout(d.in_format.channels()));
    }
    if (!d.out_format.channels()) {
        if (d.out_format.channelLayoutFFmpeg()) {
            d.out_format.setChannels(av_get_channel_layout_nb_channels(d.out_format.channelLayoutFFmpeg()));
        } else {
            d.out_format.setChannels(d.in_format.channels());
            d.out_format.setChannelLayoutFFmpeg(d.in_format.channelLayoutFFmpeg());
        }
    }
    if (d.out_format.channelLayout() == AudioFormat::ChannelLayout_Unsupported) {
        d.out_format.setChannels(d.in_format.channels());
        d.out_format.setChannelLayoutFFmpeg(d.in_format.channelLayoutFFmpeg());
    }
    //now we have out channels
    if (!d.out_format.channelLayoutFFmpeg())
        d.out_format.setChannelLayoutFFmpeg(av_get_default_channel_layout(d.out_format.channels()));
    if (!d.out_format.sampleRate())
        d.out_format.setSampleRate(inAudioFormat().sampleRate());
    if (d.speed <= 0)
        d.speed = 1.0;
    //DO NOT set sample rate here, we should keep the original and multiply 1/speed when needed
    //if (d.speed != 1.0)
    //    d.out_format.setSampleRate(int(qreal(d.out_format.sampleFormat())/d.speed));
    qDebug("swr speed=%.2f", d.speed);

    //d.in_planes = av_sample_fmt_is_planar((enum AVSampleFormat)d.in_sample_format) ? d.in_channels : 1;
    //d.out_planes = av_sample_fmt_is_planar((enum AVSampleFormat)d.out_sample_format) ? d.out_channels : 1;
    if (d.context)
        swr_free(&d.context); //TODO: if no free(of cause free is required), why channel mapping and layout not work if change from left to stero?
    //If use swr_alloc() need to set the parameters (av_opt_set_xxx() manually or with swr_alloc_set_opts()) before calling swr_init()
    d.context = swr_alloc_set_opts(d.context
                                   , d.out_format.channelLayoutFFmpeg()
                                   , (enum AVSampleFormat)outAudioFormat().sampleFormatFFmpeg()
                                   , qreal(outAudioFormat().sampleRate())/d.speed
                                   , d.in_format.channelLayoutFFmpeg()
                                   , (enum AVSampleFormat)inAudioFormat().sampleFormatFFmpeg()
                                   , inAudioFormat().sampleRate()
                                   , 0 /*log_offset*/, 0 /*log_ctx*/);
    /*
    av_opt_set_int(d.context, "in_channel_layout",    d.in_channel_layout, 0);
    av_opt_set_int(d.context, "in_sample_rate",       d.in_format.sampleRate(), 0);
    av_opt_set_sample_fmt(d.context, "in_sample_fmt", (enum AVSampleFormat)in_format.sampleFormatFFmpeg(), 0);
    av_opt_set_int(d.context, "out_channel_layout",    d.out_channel_layout, 0);
    av_opt_set_int(d.context, "out_sample_rate",       d.out_format.sampleRate(), 0);
    av_opt_set_sample_fmt(d.context, "out_sample_fmt", (enum AVSampleFormat)out_format.sampleFormatFFmpeg(), 0);
    */
    qDebug("out: {cl: %lld, fmt: %s, freq: %d}"
           , d.out_format.channelLayoutFFmpeg()
           , qPrintable(d.out_format.sampleFormatName())
           , d.out_format.sampleRate());
    qDebug("in {cl: %lld, fmt: %s, freq: %d}"
           , d.in_format.channelLayoutFFmpeg()
           , qPrintable(d.in_format.sampleFormatName())
           , d.in_format.sampleRate());

    if (!d.context) {
        qWarning("Allocat swr context failed!");
        return false;
    }
    //avresample 0.0.2(FFmpeg 0.11)~1.0.1(FFmpeg 1.1) has no channel mapping. but has remix matrix, so does swresample
//TODO: why crash if use channel mapping for L or R?
#if QTAV_HAVE(SWR_AVR_MAP) //LIBAVRESAMPLE_VERSION_INT < AV_VERSION_INT(1, 1, 0)
    bool remix = false;
    int in_c = d.in_format.channels();
    int out_c = d.out_format.channels();
    /*
     * matrix[i + stride * o] is the weight of input channel i in output channel o.
     */
    double *matrix = 0;
    if (d.out_format.channelLayout() == AudioFormat::ChannelLayout_Left) {
        remix = true;
        matrix = (double*)calloc(in_c*out_c, sizeof(double));
        for (int o = 0; o < out_c; ++o) {
            matrix[0 + in_c * o] = 1;
        }
    }
    if (d.out_format.channelLayout() == AudioFormat::ChannelLayout_Right) {
        remix = true;
        matrix = (double*)calloc(in_c*out_c, sizeof(double));
        for (int o = 0; o < out_c; ++o) {
            matrix[1 + in_c * o] = 1;
        }
    }
    if (!remix && in_c < out_c) {
        remix = true;
        //double matrix[in_c*out_c]; //C99, VLA
        matrix = (double*)calloc(in_c*out_c, sizeof(double));
        for (int i = 0, o = 0; o < out_c; ++o) {
            matrix[i + in_c * o] = 1;
            i = (i + i)%in_c;
        }
    }
    if (remix && matrix) {
        avresample_set_matrix(d.context, matrix, in_c);
        free(matrix);
    }
#else
    bool use_channel_map = false;
    if (d.out_format.channelLayout() == AudioFormat::ChannelLayout_Left) {
        use_channel_map = true;
        memset(d.channel_map, 0, sizeof(d.channel_map));
        for (int i = 0; i < d.out_format.channels(); ++i) {
            d.channel_map[i] = 0;
        }
    }
    if (d.out_format.channelLayout() == AudioFormat::ChannelLayout_Right) {
        use_channel_map = true;
        memset(d.channel_map, 0, sizeof(d.channel_map));
        for (int i = 0; i < d.out_format.channels(); ++i) {
            d.channel_map[i] = 1;
        }
    }
    if (!use_channel_map && d.in_format.channels() < d.out_format.channels()) {
        use_channel_map = true;
        memset(d.channel_map, 0, sizeof(d.channel_map));
        for (int i = 0; i < d.out_format.channels(); ++i) {
            d.channel_map[i] = i % d.in_format.channels();
        }
    }
    if (use_channel_map) {
        av_opt_set_int(d.context, "icl", d.out_format.channelLayoutFFmpeg(), 0);
        //TODO: why crash if layout is mono and set uch(i.e. always the next line)
        av_opt_set_int(d.context, "uch", d.out_format.channels(), 0);
        swr_set_channel_mapping(d.context, d.channel_map);
    }
#endif //QTAV_HAVE(SWR_AVR_MAP)
    int ret = swr_init(d.context);
    if (ret < 0) {
        qWarning("swr_init failed: %s", av_err2str(ret));
        swr_free(&d.context);
        return false;
    }
    return true;
}
예제 #26
0
int decode_audio(AVCodecContext  *ctx, queue_t *qa)
{
    static struct SwrContext *swr_ctx;
    static int64_t src_layout;
    static int src_freq;
    static int src_channels;
    static enum AVSampleFormat src_fmt = -1;
    static AVFrame *aFrame;

    AVPacket   pkt;
    AVPacket    pkt_tmp;
    int64_t dec_channel_layout;
    int len, len2;
    int got_frame;
    int data_size;


    if( astream.count > 192000*2)
        return -1;

    if( get_packet(qa, &pkt) == 0 )
        return 0;

 //          __asm__("int3");

    if (!aFrame)
    {
        if (!(aFrame = avcodec_alloc_frame()))
            return -1;
    } else
        avcodec_get_frame_defaults(aFrame);

    pkt_tmp = pkt;

    while(pkt_tmp.size > 0)
    {
        data_size = 192000;

//        len = avcodec_decode_audio3(ctx,(int16_t*)decoder_buffer,
//                                   &data_size, &pkt_tmp);
        got_frame = 0;
        len = avcodec_decode_audio4(ctx, aFrame, &got_frame, &pkt_tmp);

        if(len >= 0 && got_frame)
        {
            char *samples;
            int ch, plane_size;
            int planar    = av_sample_fmt_is_planar(ctx->sample_fmt);
            int data_size = av_samples_get_buffer_size(&plane_size, ctx->channels,
                                                   aFrame->nb_samples,
                                                   ctx->sample_fmt, 1);

//            if(audio_base == -1.0)
//            {
//                if (pkt.pts != AV_NOPTS_VALUE)
//                    audio_base = get_audio_base() * pkt.pts;
//                printf("audio base %f\n", audio_base);
//            };

            pkt_tmp.data += len;
            pkt_tmp.size -= len;

            dec_channel_layout =
                (aFrame->channel_layout && aFrame->channels == av_get_channel_layout_nb_channels(aFrame->channel_layout)) ?
                aFrame->channel_layout : av_get_default_channel_layout(aFrame->channels);

            if (aFrame->format          != src_fmt     ||
                dec_channel_layout      != src_layout  ||
                aFrame->sample_rate     != src_freq    ||
                !swr_ctx)
            {
                swr_free(&swr_ctx);
                swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16,
                                             aFrame->sample_rate, dec_channel_layout,aFrame->format,
                                             aFrame->sample_rate, 0, NULL);
                if (!swr_ctx || swr_init(swr_ctx) < 0)
                {
                    printf("Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                        aFrame->sample_rate,   av_get_sample_fmt_name(aFrame->format), (int)aFrame->channels,
                        aFrame->sample_rate, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 2);
                    break;
                }

                src_layout   = dec_channel_layout;
                src_channels = aFrame->channels;
                src_freq     = aFrame->sample_rate;
                src_fmt      = aFrame->format;
            };

            if (swr_ctx)
            {
                const uint8_t **in = (const uint8_t **)aFrame->extended_data;
                uint8_t *out[] = {decoder_buffer};
                int out_count = 192000 * 3 / 2 / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                len2 = swr_convert(swr_ctx, out, out_count, in, aFrame->nb_samples);
                if (len2 < 0) {
                    printf("swr_convert() failed\n");
                    break;
                }
                if (len2 == out_count) {
                    printf("warning: audio buffer is probably too small\n");
                    swr_init(swr_ctx);
                }
                data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);

                mutex_lock(&astream.lock);

                samples = astream.buffer+astream.count;

                memcpy(samples, decoder_buffer, data_size);
/*
            memcpy(samples, aFrame->extended_data[0], plane_size);

            if (planar && ctx->channels > 1)
            {
                uint8_t *out = ((uint8_t *)samples) + plane_size;
                for (ch = 1; ch < ctx->channels; ch++)
                {
                    memcpy(out, aFrame->extended_data[ch], plane_size);
                    out += plane_size;
                }
            }
*/
                astream.count += data_size;
                mutex_unlock(&astream.lock);
            };
       }
       else pkt_tmp.size = 0;
    }
    av_free_packet(&pkt);
    return 1;
};
static int
ca_create_swrctx(WAVEFORMATEX *w) {
	struct RTSPConf *rtspconf = rtspconf_global();
	int bufreq, samples;
	//
	if(swrctx != NULL)
		swr_free(&swrctx);
	if(audio_buf != NULL)
		free(audio_buf);
	//
	ga_error("CoreAudio: create swr context - format[%x] freq[%d] channels[%d]\n",
		w->wFormatTag, w->nSamplesPerSec, w->nChannels);
	//
	swrctx = swr_alloc_set_opts(NULL,
		rtspconf->audio_device_channel_layout,
		rtspconf->audio_device_format,
		rtspconf->audio_samplerate,
		CA2SWR_chlayout(w->nChannels),
		CA2SWR_format(w),
		w->nSamplesPerSec, 0, NULL);
	if(swrctx == NULL) {
		ga_error("CoreAudio: cannot create resample context.\n");
		return -1;
	} else {
		ga_error("CoreAudio: resample context (%x,%d,%d) -> (%x,%d,%d)\n",
			(int) CA2SWR_chlayout(w->nChannels),
			(int) CA2SWR_format(w),
			(int) w->nSamplesPerSec,
			(int) rtspconf->audio_device_channel_layout,
			(int) rtspconf->audio_device_format,
			(int) rtspconf->audio_samplerate);
	}
	if(swr_init(swrctx) < 0) {
		swr_free(&swrctx);
		swrctx = NULL;
		ga_error("CoreAudio: resample context init failed.\n");
		return -1;
	}
	// allocate buffer?
	ga_samplerate = rtspconf->audio_samplerate;
	ga_channels = av_get_channel_layout_nb_channels(rtspconf->audio_device_channel_layout);
	ca_samplerate = w->nSamplesPerSec;
	ca_bytes_per_sample = w->wBitsPerSample/8;
	samples = av_rescale_rnd(CA_MAX_SAMPLES,
			rtspconf->audio_samplerate, w->nSamplesPerSec, AV_ROUND_UP);
	bufreq = av_samples_get_buffer_size(NULL,
			rtspconf->audio_channels, samples*2,
			rtspconf->audio_device_format,
			1/*no-alignment*/);
	if((audio_buf = (unsigned char *) malloc(bufreq)) == NULL) {
		ga_error("CoreAudio: cannot allocate resample memory.\n");
		return -1;
	}
	if(audio_source_setup(bufreq, rtspconf->audio_samplerate,
				16/* depends on format */,
				rtspconf->audio_channels) < 0) {
		ga_error("CoreAudio: audio source setup failed.\n");
		return -1;
	}
	ga_error("CoreAudio: max %d samples with %d byte(s) resample buffer allocated.\n",
		samples, bufreq);
	//
	return 0;
}
예제 #28
0
bool CAEEncoderFFmpeg::Initialize(AEAudioFormat &format, bool allow_planar_input)
{
  Reset();

  bool ac3 = CSettings::Get().GetBool("audiooutput.ac3passthrough");

  AVCodec *codec = NULL;
#if 0
  /* the DCA encoder is currently useless for transcode, it creates a 196 kHz DTS-HD like mongrel which is useless for SPDIF */
  bool dts = CSettings::Get().GetBool("audiooutput.dtspassthrough");
  if (dts && (!ac3 || g_advancedSettings.m_audioTranscodeTo.Equals("dts")))
  {
    m_CodecName = "DTS";
    m_CodecID   = AV_CODEC_ID_DTS;
    m_PackFunc  = &CAEPackIEC61937::PackDTS_1024;
    m_BitRate   = DTS_ENCODE_BITRATE;
    codec = avcodec_find_encoder(m_CodecID);
  }
#endif

  /* fallback to ac3 if we support it, we might not have DTS support */
  if (!codec && ac3)
  {
    m_CodecName = "AC3";
    m_CodecID   = AV_CODEC_ID_AC3;
    m_PackFunc  = &CAEPackIEC61937::PackAC3;
    m_BitRate   = AC3_ENCODE_BITRATE;
    codec = avcodec_find_encoder(m_CodecID);
  }

  /* check we got the codec */
  if (!codec)
    return false;

  m_CodecCtx                 = avcodec_alloc_context3(codec);
  m_CodecCtx->bit_rate       = m_BitRate;
  m_CodecCtx->sample_rate    = format.m_sampleRate;
  m_CodecCtx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;

  /* select a suitable data format */
  if (codec->sample_fmts)
  {
    bool hasFloat  = false;
    bool hasDouble = false;
    bool hasS32    = false;
    bool hasS16    = false;
    bool hasU8     = false;
    bool hasFloatP = false;
    bool hasUnknownFormat = false;

    for(int i = 0; codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i)
    {
      switch (codec->sample_fmts[i])
      {
        case AV_SAMPLE_FMT_FLT: hasFloat  = true; break;
        case AV_SAMPLE_FMT_DBL: hasDouble = true; break;
        case AV_SAMPLE_FMT_S32: hasS32    = true; break;
        case AV_SAMPLE_FMT_S16: hasS16    = true; break;
        case AV_SAMPLE_FMT_U8 : hasU8     = true; break;
        case AV_SAMPLE_FMT_FLTP:
          if (allow_planar_input)
            hasFloatP  = true;
          else
            hasUnknownFormat = true;
          break;
        case AV_SAMPLE_FMT_NONE: return false;
        default: hasUnknownFormat = true; break;
      }
    }

    if (hasFloat)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_FLT;
      format.m_dataFormat    = AE_FMT_FLOAT;
    }
    else if (hasFloatP)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP;
      format.m_dataFormat    = AE_FMT_FLOATP;
    }
    else if (hasDouble)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_DBL;
      format.m_dataFormat    = AE_FMT_DOUBLE;
    }
    else if (hasS32)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_S32;
      format.m_dataFormat    = AE_FMT_S32NE;
    }
    else if (hasS16)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
      format.m_dataFormat    = AE_FMT_S16NE;
    }
    else if (hasU8)
    {
      m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_U8;
      format.m_dataFormat    = AE_FMT_U8;
    }
    else if (hasUnknownFormat)
    {
      m_CodecCtx->sample_fmt = codec->sample_fmts[0];
      format.m_dataFormat    = AE_FMT_FLOAT;
      m_NeedConversion       = true;
      CLog::Log(LOGNOTICE, "CAEEncoderFFmpeg::Initialize - Unknown audio format, it will be resampled.");
    }
    else
    {
      CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Initialize - Unable to find a suitable data format for the codec (%s)", m_CodecName.c_str());
      return false;
    }
  }

  m_CodecCtx->channels = BuildChannelLayout(m_CodecCtx->channel_layout, m_Layout);

  /* open the codec */
  if (avcodec_open2(m_CodecCtx, codec, NULL))
  {
    av_freep(&m_CodecCtx);
    return false;
  }

  format.m_frames        = m_CodecCtx->frame_size;
  format.m_frameSamples  = m_CodecCtx->frame_size * m_CodecCtx->channels;
  format.m_frameSize     = m_CodecCtx->channels * (CAEUtil::DataFormatToBits(format.m_dataFormat) >> 3);
  format.m_channelLayout = m_Layout;

  m_CurrentFormat = format;
  m_NeededFrames  = format.m_frames;
  m_OutputSize    = m_PackFunc(NULL, 0, m_Buffer);
  m_OutputRatio   = (double)m_NeededFrames / m_OutputSize;
  m_SampleRateMul = 1.0 / (double)m_CodecCtx->sample_rate;

  if (m_NeedConversion)
  {
    m_SwrCtx = swr_alloc_set_opts(NULL,
                      m_CodecCtx->channel_layout, m_CodecCtx->sample_fmt, m_CodecCtx->sample_rate,
                      m_CodecCtx->channel_layout, AV_SAMPLE_FMT_FLT, m_CodecCtx->sample_rate,
                      0, NULL);
    if (!m_SwrCtx || swr_init(m_SwrCtx) < 0)
    {
      CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Initialize - Failed to initialise resampler.");
      return false;
    }
  }
  CLog::Log(LOGNOTICE, "CAEEncoderFFmpeg::Initialize - %s encoder ready", m_CodecName.c_str());
  return true;
}
예제 #29
0
void BE_ST_InitAudio(void)
{
	g_sdlAudioSubsystemUp = false;
	g_sdlEmulatedOPLChipReady = false;
	int inSampleRate = BE_Cross_GetSelectedGameVerSampleRate();
	bool doDigitized = (inSampleRate != 0);
	if (!doDigitized)
		inSampleRate = OPL_SAMPLE_RATE;

	if (g_refKeenCfg.sndSubSystem)
	{
		if (SDL_InitSubSystem(SDL_INIT_AUDIO) < 0)
		{
			BE_Cross_LogMessage(BE_LOG_MSG_WARNING, "SDL audio system initialization failed,\n%s\n", SDL_GetError());
		}
		else
		{
			g_sdlAudioSpec.freq = g_refKeenCfg.sndSampleRate;
#ifdef MIXER_SAMPLE_FORMAT_FLOAT
			g_sdlAudioSpec.format = AUDIO_F32SYS;
#elif (defined MIXER_SAMPLE_FORMAT_SINT16)
			g_sdlAudioSpec.format = AUDIO_S16SYS;
#endif
			g_sdlAudioSpec.channels = 1;
			// Should be some power-of-two roughly proportional to the sample rate; Using 1024 for 48000Hz.
			for (g_sdlAudioSpec.samples = 1; g_sdlAudioSpec.samples < g_refKeenCfg.sndSampleRate/64; g_sdlAudioSpec.samples *= 2)
			{
			}

			if (doDigitized)
				g_sdlAudioSpec.callback = (g_refKeenCfg.sndSampleRate == inSampleRate) ? BEL_ST_Simple_DigiCallBack : BEL_ST_Resampling_DigiCallBack;
			else
				g_sdlAudioSpec.callback = ((g_refKeenCfg.sndSampleRate == inSampleRate) || !g_refKeenCfg.oplEmulation) ? BEL_ST_Simple_EmuCallBack : BEL_ST_Resampling_EmuCallBack;

			g_sdlAudioSpec.userdata = NULL;
			if (SDL_OpenAudio(&g_sdlAudioSpec, NULL))
			{
				BE_Cross_LogMessage(BE_LOG_MSG_WARNING, "Cannot open SDL audio device,\n%s\n", SDL_GetError());
				SDL_QuitSubSystem(SDL_INIT_AUDIO);
			}
			else
			{
#ifdef REFKEEN_CONFIG_THREADS
				g_sdlCallbackMutex = SDL_CreateMutex();
				if (!g_sdlCallbackMutex)
				{
					BE_Cross_LogMessage(BE_LOG_MSG_ERROR, "Cannot create recursive mutex for SDL audio callback,\n%s\nClosing SDL audio subsystem\n", SDL_GetError());
					SDL_CloseAudio();
					SDL_QuitSubSystem(SDL_INIT_AUDIO);
				}
				else
#endif
				{
					BE_Cross_LogMessage(BE_LOG_MSG_NORMAL, "Audio subsystem initialized, requested spec: freq %d, format %u, channels %d, samples %u\n", (int)g_sdlAudioSpec.freq, (unsigned int)g_sdlAudioSpec.format, (int)g_sdlAudioSpec.channels, (unsigned int)g_sdlAudioSpec.samples);
					g_sdlAudioSubsystemUp = true;
				}
			}
		}
	}
	// If the audio subsystem is off, let us simulate a byte rate
	// of 1000Hz (same as SDL_GetTicks() time units)
	if (!g_sdlAudioSubsystemUp)
	{
		g_sdlAudioSpec.freq = doDigitized ? inSampleRate : (NUM_OF_BYTES_FOR_SOUND_CALLBACK_WITH_DISABLED_SUBSYSTEM / sizeof(BE_ST_SndSample_T));
		g_sdlAudioSpec.callback = doDigitized ? BEL_ST_Resampling_DigiCallBack : BEL_ST_Resampling_EmuCallBack;
		return;
	}

	if (g_refKeenCfg.oplEmulation)
	{
		YM3812Init(1, 3579545, OPL_SAMPLE_RATE);
		g_sdlEmulatedOPLChipReady = true;
	}

	if ((doDigitized || g_sdlEmulatedOPLChipReady) && (g_sdlAudioSpec.freq != inSampleRate))
	{
		// Should allocate this first, for g_sdlSrcData.data_in
		g_sdlMiscOutNumOfSamples = 2*g_sdlAudioSpec.samples;
		g_sdlMiscOutSamples = (BE_ST_SndSample_T *)malloc(sizeof(BE_ST_SndSample_T) * g_sdlMiscOutNumOfSamples); 
		if (g_sdlMiscOutSamples == NULL)
			BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: Out of memory! (Failed to allocate g_sdlMiscOutSamples.)");

#ifndef REFKEEN_RESAMPLER_NONE
		if (g_refKeenCfg.useResampler)
		{
#if (!defined REFKEEN_RESAMPLER_LIBRESAMPLE) && (!defined REFKEEN_RESAMPLER_LIBAVCODEC)
			char errMsg[160];
#endif

#if (defined REFKEEN_RESAMPLER_LIBSWRESAMPLE)
			g_sdlSwrContext = swr_alloc_set_opts(
				NULL,                // allocating a new context
				AV_CH_LAYOUT_MONO,   // out channels layout
				AV_SAMPLE_FMT_S16,   // out format
				g_sdlAudioSpec.freq, // out rate
				AV_CH_LAYOUT_MONO,   // in channels layout
				AV_SAMPLE_FMT_S16,   // in format
				inSampleRate,        // in rate
				0,
				NULL
			);
			if (g_sdlSwrContext == NULL)
				BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: swr_alloc_set_opts failed!");
			int error = swr_init(g_sdlSwrContext);
			if (error != 0)
			{
				// av_err2str requires libavutil/libavutil-ffmpeg, so don't convert code to string
				snprintf(errMsg, sizeof(errMsg), "BE_ST_InitAudio: swr_init failed! Error code: %d", error);
				BE_ST_ExitWithErrorMsg(errMsg);
			}
#elif (defined REFKEEN_RESAMPLER_LIBAVRESAMPLE)
			g_sdlAvAudioResampleContext = avresample_alloc_context();
			if (g_sdlAvAudioResampleContext == NULL)
				BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: avresample_alloc_context failed!");
			av_opt_set_int(g_sdlAvAudioResampleContext, "in_channel_layout",  AV_CH_LAYOUT_MONO,   0);
			av_opt_set_int(g_sdlAvAudioResampleContext, "out_channel_layout", AV_CH_LAYOUT_MONO,   0);
			av_opt_set_int(g_sdlAvAudioResampleContext, "in_sample_rate",     inSampleRate,        0);
			av_opt_set_int(g_sdlAvAudioResampleContext, "out_sample_rate",    g_sdlAudioSpec.freq, 0);
			av_opt_set_int(g_sdlAvAudioResampleContext, "in_sample_fmt",      AV_SAMPLE_FMT_S16,   0);
			av_opt_set_int(g_sdlAvAudioResampleContext, "out_sample_fmt",     AV_SAMPLE_FMT_S16,   0);
			int error = avresample_open(g_sdlAvAudioResampleContext);
			if (error != 0)
			{
				// av_err2str requires libavutil/libavutil-ffmpeg, so don't convert code to string
				snprintf(errMsg, sizeof(errMsg), "BE_ST_InitAudio: swr_init failed! Error code: %d", error);
				BE_ST_ExitWithErrorMsg(errMsg);
			}
#elif (defined REFKEEN_RESAMPLER_LIBAVCODEC)
			avcodec_register_all();
			g_sdlAvResampleContext = av_resample_init(
				g_sdlAudioSpec.freq,	// out rate
				inSampleRate,	// in rate
				16,	// filter length
				10,	// phase count
				0,	// linear FIR filter
				1.0	// cutoff frequency
			);
			if (g_sdlAvResampleContext == NULL)
				BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: av_resample_init failed!");
#elif (defined REFKEEN_RESAMPLER_LIBRESAMPLE)
			g_sdlResampleFactor = (double)g_sdlAudioSpec.freq/inSampleRate;
			g_sdlResampleHandle = resample_open(0, g_sdlResampleFactor, g_sdlResampleFactor);
			if (g_sdlResampleHandle == NULL)
				BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: resample_open failed!");
#elif (defined REFKEEN_RESAMPLER_LIBSOXR)
			soxr_io_spec_t io_spec = soxr_io_spec(SOXR_INT16, SOXR_INT16);
			soxr_quality_spec_t q_spec = soxr_quality_spec(SOXR_LQ, 0); // Default quality spec adds an audible latency for resampling to 8000Hz
			soxr_error_t error;
			g_sdlSoxr = soxr_create(
				inSampleRate, // in rate
				g_sdlAudioSpec.freq, // out rate
				1, // channels
				&error,
				&io_spec,
				&q_spec,
				NULL // runtime spec
			);
			if (g_sdlSoxr == NULL)
			{
				snprintf(errMsg, sizeof(errMsg), "BE_ST_InitAudio: soxr_create failed!\nError: %s", soxr_strerror(error));
				BE_ST_ExitWithErrorMsg(errMsg);
			}
#elif (defined REFKEEN_RESAMPLER_LIBSPEEXDSP)
			int error;
			g_sdlSpeexResamplerState = speex_resampler_init(
				1, // channels
				inSampleRate, // in rate
				g_sdlAudioSpec.freq, // out rate
				0, // quality in the range 0-10 (10 is higher)
				&error
			);
			if (g_sdlSpeexResamplerState == NULL)
			{
				snprintf(errMsg, sizeof(errMsg), "BE_ST_InitAudio: speex_resampler_init failed! Error code: %d\nError: %s", error, speex_resampler_strerror(error));
				BE_ST_ExitWithErrorMsg(errMsg);
			}
#elif (defined REFKEEN_RESAMPLER_LIBSAMPLERATE)
			int error;
			g_sdlSrcResampler = src_new(SRC_SINC_FASTEST, 1, &error);
			if (g_sdlSrcResampler == NULL)
			{
				snprintf(errMsg, sizeof(errMsg), "BE_ST_InitAudio: src_new failed!\nError code: %d", error);
				BE_ST_ExitWithErrorMsg(errMsg);
			}
			g_sdlSrcData.data_in = doDigitized ? g_sdlMiscOutSamples : g_sdlALOutSamples;
			g_sdlSrcData.src_ratio = (double)g_sdlAudioSpec.freq / inSampleRate;
#endif
		}
		else
#endif // REFKEEN_RESAMPLER_NONE
		{
			// The sum of all entries should be g_sdlAudioSpec.freq,
			// "uniformly" distributed over g_sdlALSampleRateConvTable
			g_sdlSampleRateConvTable = (int *)malloc(sizeof(int) * inSampleRate);
			if (g_sdlSampleRateConvTable == NULL)
				BE_ST_ExitWithErrorMsg("BE_ST_InitAudio: Failed to allocate memory for sample rate conversion!");
			g_sdlSampleRateConvTableSize = inSampleRate;
			for (int i = 0; i < inSampleRate; ++i)
			{
				// Using uint64_t cause an overflow is possible
				g_sdlSampleRateConvTable[i] = ((uint64_t)(i+1)*(uint64_t)g_sdlAudioSpec.freq/inSampleRate)-(uint64_t)i*(uint64_t)g_sdlAudioSpec.freq/inSampleRate;
			}
			g_sdlSampleRateConvCurrIndex = 0;
			g_sdlSampleRateConvCounter = 0;
		}
	}
}
예제 #30
0
void THMovie::play(const SDL_Rect &destination_rect, int iChannel)
{
    m_destination_rect = SDL_Rect{ destination_rect.x, destination_rect.y, destination_rect.w, destination_rect.h };

    if(!m_pRenderer)
    {
        m_sLastError = std::string("Cannot play before setting the renderer");
        return;
    }

    m_pVideoQueue = new THAVPacketQueue();
    m_pMoviePictureBuffer->reset();
    m_pMoviePictureBuffer->allocate(m_pRenderer, m_pVideoCodecContext->width, m_pVideoCodecContext->height);

    m_pAudioPacket = nullptr;
    m_iAudioPacketSize = 0;
    m_pbAudioPacketData = nullptr;

    m_iAudioBufferSize = 0;
    m_iAudioBufferIndex = 0;
    m_iAudioBufferMaxSize = 0;

    m_pAudioQueue = new THAVPacketQueue();
    m_iCurSyncPts = 0;
    m_iCurSyncPtsSystemTime = SDL_GetTicks();

    if(m_iAudioStream >= 0)
    {
        Mix_QuerySpec(&m_iMixerFrequency, nullptr, &m_iMixerChannels);
#ifdef CORSIX_TH_USE_FFMPEG
        m_pAudioResampleContext = swr_alloc_set_opts(
            m_pAudioResampleContext,
            m_iMixerChannels==1?AV_CH_LAYOUT_MONO:AV_CH_LAYOUT_STEREO,
            AV_SAMPLE_FMT_S16,
            m_iMixerFrequency,
            m_pAudioCodecContext->channel_layout,
            m_pAudioCodecContext->sample_fmt,
            m_pAudioCodecContext->sample_rate,
            0,
            nullptr);
        swr_init(m_pAudioResampleContext);
#elif defined(CORSIX_TH_USE_LIBAV)
        m_pAudioResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pAudioResampleContext, "in_channel_layout", m_pAudioCodecContext->channel_layout, 0);
        av_opt_set_int(m_pAudioResampleContext, "out_channel_layout", m_iMixerChannels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pAudioResampleContext, "in_sample_rate", m_pAudioCodecContext->sample_rate, 0);
        av_opt_set_int(m_pAudioResampleContext, "out_sample_rate", m_iMixerFrequency, 0);
        av_opt_set_int(m_pAudioResampleContext, "in_sample_fmt", m_pAudioCodecContext->sample_fmt, 0);
        av_opt_set_int(m_pAudioResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        avresample_open(m_pAudioResampleContext);
#endif
        m_pChunk = Mix_QuickLoad_RAW(m_pbChunkBuffer, ms_audioBufferSize);

        m_iChannel = Mix_PlayChannel(iChannel, m_pChunk, -1);
        if(m_iChannel < 0)
        {
            m_iChannel = -1;
            m_sLastError = std::string(Mix_GetError());
        }
        else
        {
            Mix_RegisterEffect(m_iChannel, th_movie_audio_callback, nullptr, this);
        }
    }

    m_pStreamThread = SDL_CreateThread(th_movie_stream_reader_thread, "Stream", this);
    m_pVideoThread = SDL_CreateThread(th_movie_video_thread, "Video", this);
}