static GstFlowReturn
gst_ffmpegaudioresample_transform (GstBaseTransform * trans, GstBuffer * inbuf,
    GstBuffer * outbuf)
{
  GstFFMpegAudioResample *resample = GST_FFMPEGAUDIORESAMPLE (trans);
  gint nbsamples;
  gint ret;

  gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);
  nbsamples = GST_BUFFER_SIZE (inbuf) / (2 * resample->in_channels);

  GST_LOG_OBJECT (resample, "input buffer duration:%" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)));

  GST_DEBUG_OBJECT (resample,
      "audio_resample(ctx, output:%p [size:%d], input:%p [size:%d], nbsamples:%d",
      GST_BUFFER_DATA (outbuf), GST_BUFFER_SIZE (outbuf),
      GST_BUFFER_DATA (inbuf), GST_BUFFER_SIZE (inbuf), nbsamples);

  ret = audio_resample (resample->res, (short *) GST_BUFFER_DATA (outbuf),
      (short *) GST_BUFFER_DATA (inbuf), nbsamples);

  GST_DEBUG_OBJECT (resample, "audio_resample returned %d", ret);

  GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (ret, GST_SECOND,
      resample->out_rate);
  GST_BUFFER_SIZE (outbuf) = ret * 2 * resample->out_channels;

  GST_LOG_OBJECT (resample, "Output buffer duration:%" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));

  return GST_FLOW_OK;
}
// Resample image
VALUE audio_resampler_resample(VALUE self, VALUE frame) {
	AudioResamplerInternal * internal;
	Data_Get_Struct(self, AudioResamplerInternal, internal);

	AudioFrameInternal * internal_frame;
	Data_Get_Struct(frame, AudioFrameInternal, internal_frame);

	// Allocate enough memory for output samples
	// (we have to use the max channels, as there seems to be a bug in FFMPEG of overshooting the buffer when downsampling)
	int max_channels = (internal->src_channels > internal->dst_channels) ? internal->src_channels : internal->dst_channels;
	int bytes_per_sample = av_get_bytes_per_sample(internal->dst_format) * max_channels;

	uint8_t * dst_data = (uint8_t *)av_malloc((internal_frame->samples * internal->dst_rate / internal->src_rate + 32) * bytes_per_sample);
	if (!dst_data) rb_raise(rb_eNoMemError, "Failed to allocate new sample buffer");

	// Resample
	int dst_samples = audio_resample(internal->context,
									 (short *)dst_data,
									 (short *)internal_frame->data,
									 internal_frame->samples);

	// Wrap into Ruby object
	return audio_frame_new2(dst_data,
							internal->dst_channels,
							internal->dst_format,
							dst_samples,
							internal->dst_rate,
							internal_frame->timestamp,
							internal_frame->duration);
}
Esempio n. 3
0
	std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> resample(std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>&& data)
	{
		if(resampler_ && !data.empty())
		{
			buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);
			auto ret = audio_resample(resampler_.get(),
									  reinterpret_cast<short*>(buffer2_.data()), 
									  reinterpret_cast<short*>(data.data()), 
									  data.size() / (av_get_bytes_per_sample(input_sample_format_) * input_channels_)); 
			buffer2_.resize(ret * av_get_bytes_per_sample(output_sample_format_) * output_channels_);
			std::swap(data, buffer2_);
		}

		return std::move(data);
	}
Esempio n. 4
0
AudioBufferPtr AudioDecoderThread::resampleAudio(char* pDecodedData, int framesDecoded,
        int currentSampleFormat)
{
    if (!m_pResampleContext) {
#ifdef LIBAVRESAMPLE_VERSION
        m_pResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pResampleContext, "in_channel_layout",
                av_get_default_channel_layout(m_pStream->codec->channels), 0);
        av_opt_set_int(m_pResampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_rate", m_InputSampleRate, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_rate", m_AP.m_SampleRate, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_fmt",
                (AVSampleFormat)currentSampleFormat, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        int err = avresample_open(m_pResampleContext);
        AVG_ASSERT(err >= 0);
#else
        m_pResampleContext = av_audio_resample_init(m_AP.m_Channels, 
                m_pStream->codec->channels, m_AP.m_SampleRate, m_InputSampleRate,
                AV_SAMPLE_FMT_S16, (AVSampleFormat)currentSampleFormat, 16, 10, 0, 0.8);
#endif
        AVG_ASSERT(m_pResampleContext);
    }
#ifdef LIBAVRESAMPLE_VERSION
    uint8_t *pResampledData;
    int leftoverSamples = avresample_available(m_pResampleContext);
    int framesAvailable = leftoverSamples +
            av_rescale_rnd(avresample_get_delay(m_pResampleContext) +
                    framesDecoded, m_AP.m_SampleRate, m_InputSampleRate, AV_ROUND_UP);
    av_samples_alloc(&pResampledData, 0, 2, framesAvailable,
            AV_SAMPLE_FMT_S16, 0);
    int framesResampled = avresample_convert(m_pResampleContext, &pResampledData, 0, 
            framesAvailable, (uint8_t**)&pDecodedData, 0, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
    av_freep(&pResampledData);
#else
    short pResampledData[AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
    int framesResampled = audio_resample(m_pResampleContext, pResampledData,
            (short*)pDecodedData, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
#endif
    return pBuffer;
}
int
AudioResamplerFfmpeg::resample(boost::uint8_t** input, int plane_size,
    int samples, boost::uint8_t** output) {
#ifdef HAVE_SWRESAMPLE_H
    return swr_convert(_context,
        output, MAX_AUDIO_FRAME_SIZE,
        const_cast<const uint8_t**>(input), samples);
#elif HAVE_AVRESAMPLE_H
    return avresample_convert(_context,
        output, 0, MAX_AUDIO_FRAME_SIZE,
        input, plane_size, samples);
#else
    UNUSED( plane_size );
    return audio_resample(_context, reinterpret_cast<short*>(*output),
        reinterpret_cast<short*>(*input), samples); 
#endif
}
Esempio n. 6
0
short *FFwrapper::AudioResampler::resample(void *in, size_t in_len, size_t *out_len) {
    *out_len = 2 * in_len; /* FFmpeg needs a way to find this out */
    if (*out_len > buf_size) {
        fprintf(stderr, "warning: resample buffer being reallocated\n");
        buf = (short *) av_realloc(buf, *out_len * sizeof(short));
        if (!buf) {
            buf_size = 0;
            throw AllocationError("audio resample buffer");
        }
        buf_size = *out_len;
    }

    *out_len = audio_resample(ctx, buf, (short *) in, in_len);
    if (*out_len == 0) {
        return NULL;
    } else {
        return buf;
    }
}
void LAVCAudioProvider::GetAudio(void *buf, __int64 start, __int64 count)
{
	int16_t *_buf = (int16_t *)buf;
	__int64 _count = num_samples - start;
	if (count < _count)
		_count = count;
	if (_count < 0)
		_count = 0;

	memset(_buf + _count, 0, (count - _count) << 1);

	AVPacket packet;
	while (_count > 0 && av_read_frame(lavcfile->fctx, &packet) >= 0) {
		while (packet.stream_index == audStream) {
			int bytesout = 0, samples;
			if (avcodec_decode_audio(codecContext, buffer, &bytesout, packet.data, packet.size) < 0)
				throw _T("Failed to decode audio");
			if (bytesout == 0)
				break;

			samples = bytesout >> 1;
			if (rsct) {
				if ((__int64)(samples * resample_ratio / codecContext->channels) > _count)
					samples = (__int64)(_count / resample_ratio * codecContext->channels);
				samples = audio_resample(rsct, _buf, buffer, samples / codecContext->channels);

				assert(samples <= _count);
			} else {
				/* currently dead code, rsct != NULL because we're resampling for mono */
				if (samples > _count)
					samples = _count;
				memcpy(_buf, buffer, samples << 1);
			}

			_buf += samples;
			_count -= samples;
			break;
		}

		av_free_packet(&packet);
	}
}
Esempio n. 8
0
int LibAvDecoder::decodePacket(AVCodecContext* cCtx, ReSampleContext* rsCtx, AVPacket* originalPacket, KeyFinder::AudioData* audio){
  // copy packet so we can shift data pointer about without endangering garbage collection
  AVPacket tempPacket;
  tempPacket.size = originalPacket->size;
  tempPacket.data = originalPacket->data;
  // loop in case audio packet contains multiple frames
  while(tempPacket.size > 0){
    int dataSize = frameBufferSize;
    int16_t* dataBuffer = (int16_t*)frameBuffer;
    int bytesConsumed = avcodec_decode_audio3(cCtx, dataBuffer, &dataSize, &tempPacket);
    if(bytesConsumed < 0){ // error
      tempPacket.size = 0;
      return 1;
    }
    tempPacket.data += bytesConsumed;
    tempPacket.size -= bytesConsumed;
    if(dataSize <= 0)
      continue; // nothing decoded
    int newSamplesDecoded = dataSize / av_get_bytes_per_sample(cCtx->sample_fmt);
    // Resample if necessary
    if(cCtx->sample_fmt != AV_SAMPLE_FMT_S16){
      int resampleResult = audio_resample(rsCtx, (short*)frameBufferConverted, (short*)frameBuffer, newSamplesDecoded);
      if(resampleResult < 0){
        throw KeyFinder::Exception(GuiStrings::getInstance()->libavCouldNotResample().toLocal8Bit().constData());
      }
      dataBuffer = (int16_t*)frameBufferConverted;
    }
    int oldSampleCount = audio->getSampleCount();
    try{
      audio->addToSampleCount(newSamplesDecoded);
    }catch(KeyFinder::Exception& e){
      throw e;
    }
    for(int i = 0; i < newSamplesDecoded; i++){
      audio->setSample(oldSampleCount+i, (float)dataBuffer[i]);
    }
  }
  return 0;
}
Esempio n. 9
0
void moreaudio(float *lb, float *rb, int samples)
{
	AVPacket packet;
	int bytes, bytesDecoded;
	int input_samples;
	while (samples)
	{
		if (!buffered_samples) {
			do {
				if(av_read_frame(pAFormatCtx, &packet)<0) {
					fprintf(stderr, "Audio EOF!\n");
					memset(lb, 0, samples*sizeof(float));
					memset(rb, 0, samples*sizeof(float));
					return;
				}
			} while(packet.stream_index!=audioStream);

			bytes = AUDIO_BUF * sizeof(short);

			bytesDecoded = avcodec_decode_audio3(pACodecCtx, iabuf, &bytes, &packet);
			if(bytesDecoded < 0)
			{
				fprintf(stderr, "Error while decoding audio frame\n");
				return;
			}

			input_samples = bytes / (sizeof(short)*pACodecCtx->channels);

			buffered_samples = audio_resample(resampler, (void*)oabuf, iabuf, input_samples);
			poabuf = oabuf;
		}

		*lb++ = *poabuf++ * volume;
		*rb++ = *poabuf++ * volume;
		buffered_samples--;
		samples--;
	}
}
Esempio n. 10
0
// ---------------------------------------------------------------------------------
static PyObject* Resampler_Resample( PyResamplerObject* obj, PyObject *args)
{
	char* sData = NULL;
	int iLen = 0, iSamples= 0, iNewSamples;
	PyObject *cRes;
	if (!PyArg_ParseTuple(args, "s#:"RESAMPLE_NAME, &sData, &iLen ))
		return NULL;

	// Calc samples count
	iSamples= iLen/ ( sizeof( short )* obj->resample_ctx->input_channels );

	// Create output buffer
	iNewSamples= iSamples;
	if( obj->resample_ctx->ratio< 1.0 || obj->resample_ctx->ratio > 1.0 )
		iNewSamples= (int)( iSamples* obj->resample_ctx->ratio );

	cRes= PyString_FromStringAndSize( NULL, iNewSamples* sizeof( short )* obj->resample_ctx->output_channels );
	if( !cRes )
		return NULL;

	audio_resample( obj->resample_ctx, (short*)PyString_AsString( cRes ), (short*)sData, iSamples );
	return cRes;
}
Esempio n. 11
0
int AudioConvertFunc(const char *outfilename,int sample_rate,int channels,int sec,const char *inputfilename,HWND mParentHwnd,UINT mMsg)
{
	AVCodec *aCodec =NULL;
	AVPacket *packet = NULL;
	AVFormatContext *pFormatCtx =NULL;
    AVCodecContext *aCodecCtx= NULL;
	ReSampleContext* ResampleCtx=NULL;
	AVFrame *decoded_frame = NULL;
	int datasize;
	//int tempcount = 0;
	//long total_out_size=0;
	int64_t total_in_convert_size = 0;
	int audioConvertProgress = 0;
	int tempAudioConvertProgress;
	unsigned int i;
	int len, ret, buffer_size, count, audio_stream_index = -1, totle_samplenum = 0;

	FILE *outfile = NULL;// *infile;
	head_pama pt;

	int16_t *audio_buffer = NULL;
	int16_t *resamplebuff = NULL;
	int ResampleChange=0;
	int ChannelsChange=0;
	int tempret;

	packet = (AVPacket*)malloc(sizeof(AVPacket));
	if (packet==NULL)
	{
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)(-1));
		return -1;
	}
	packet->data=NULL;

	buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
	audio_buffer = (int16_t *)av_malloc(buffer_size);
	if (audio_buffer==NULL)
	{
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)(-2));
		return -2;
	}
	
	av_register_all();

	av_init_packet(packet);
#if 0
	/**********尝试分解av_open_input_file函数*************/
	int ret = 0;
	
	AVFormatParameters ap = { 0 };
	AVDictionary *tmp = NULL;
	AVInputFormat *fmt = NULL;
	AVDictionary **options = NULL;
	if (!pFormatCtx && !(pFormatCtx = avformat_alloc_context()))
		return AVERROR(ENOMEM);
	if (fmt)
		pFormatCtx->iformat = fmt;

	if (options)
		av_dict_copy(&tmp, *options, 0);

	if ((ret = av_opt_set_dict(pFormatCtx, &tmp)) < 0)
		goto fail;
	
	AVDictionary *tmp = NULL;

	if (!pFormatCtx && !(pFormatCtx = avformat_alloc_context()))
		return AVERROR(ENOMEM);

	int ret;
	AVProbeData pd = {inputfilename, NULL, 0};

	if (pFormatCtx->pb) {
		pFormatCtx->flags |= AVFMT_FLAG_CUSTOM_IO;
		if (!pFormatCtx->iformat)
			return av_probe_input_buffer(pFormatCtx->pb, &pFormatCtx->iformat, inputfilename, pFormatCtx, 0, 0);
		else if (pFormatCtx->iformat->flags & AVFMT_NOFILE)
			av_log(pFormatCtx, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
			"will be ignored with AVFMT_NOFILE format.\n");
		return 0;
	}

	if ( (pFormatCtx->iformat && pFormatCtx->iformat->flags & AVFMT_NOFILE) ||
		(!pFormatCtx->iformat && (pFormatCtx->iformat = av_probe_input_format(&pd, 0))))
		return 0;

	URLContext *h;
	int err;

	err = ffurl_open(&h, inputfilename, AVIO_RDONLY);
	if (err < 0)
		return err;
	err = ffio_fdopen(pFormatCtx, h);
	if (err < 0) {
		ffurl_close(h);
		return err;
	}

	if (pFormatCtx->iformat)
		return 0;
	av_probe_input_buffer(pFormatCtx->pb, &pFormatCtx->iformat, inputfilename, pFormatCtx, 0, 0);


	if (pFormatCtx->iformat->flags & AVFMT_NEEDNUMBER) {
		if (!av_filename_number_test(inputfilename)) {
			ret = AVERROR(EINVAL);
			goto fail;
		}
	}

	pFormatCtx->duration = pFormatCtx->start_time = AV_NOPTS_VALUE;
	av_strlcpy(pFormatCtx->filename, inputfilename ? inputfilename : "", sizeof(pFormatCtx->filename));

	/* allocate private data */
	if (pFormatCtx->iformat->priv_data_size > 0) {
		if (!(pFormatCtx->priv_data = av_mallocz(pFormatCtx->iformat->priv_data_size))) {
			ret = AVERROR(ENOMEM);
			goto fail;
		}
		if (pFormatCtx->iformat->priv_class) {
			*(const AVClass**)pFormatCtx->priv_data = pFormatCtx->iformat->priv_class;
			av_opt_set_defaults(pFormatCtx->priv_data);
			if ((ret = av_opt_set_dict(pFormatCtx->priv_data, &tmp)) < 0)
				goto fail;
		}
	}

	/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
	if (pFormatCtx->pb)
		ff_id3v2_read(pFormatCtx, ID3v2_DEFAULT_MAGIC);

	if (!(pFormatCtx->flags&AVFMT_FLAG_PRIV_OPT) && pFormatCtx->iformat->read_header)
		if ((ret = pFormatCtx->iformat->read_header(pFormatCtx, &ap)) < 0)
			goto fail;

	if (!(pFormatCtx->flags&AVFMT_FLAG_PRIV_OPT) && pFormatCtx->pb && !pFormatCtx->data_offset)
		pFormatCtx->data_offset = avio_tell(pFormatCtx->pb);

	pFormatCtx->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;

	if (options) {
		av_dict_free(options);
		*options = tmp;
	}
	return 0;

fail:
	av_dict_free(&tmp);
	if (pFormatCtx->pb && !(pFormatCtx->flags & AVFMT_FLAG_CUSTOM_IO))
		avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);
	pFormatCtx = NULL;
	return ret;

	return err;
	/**********尝试分解av_open_input_file函数*************/
	//pFormatCtx = avformat_alloc_context();
#endif
	ret = av_open_input_file(&pFormatCtx, inputfilename, NULL,0, NULL);

	if(ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}
		
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)1);
		return 1;  
	}

	ret = av_find_stream_info(pFormatCtx);

	if( ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)2);
		return 2;
	}

	audio_stream_index=-1;
	for(i=0; i< (signed)pFormatCtx->nb_streams; i++)
	{

		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_stream_index < 0)
		{
			audio_stream_index = i;
			break;
		}
	}

	if(audio_stream_index == -1)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)3);
		return 3;
	}

	aCodecCtx = pFormatCtx->streams[audio_stream_index]->codec;
	if (aCodecCtx==NULL)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)4);
		return 4;
	}
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		/*if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}*/
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)5);
		return 5;
	}
	//resample init
	if (channels==0)
	{
		channels=aCodecCtx->channels;
	}
	if (sample_rate==0)
	{
		sample_rate=aCodecCtx->sample_rate;
	}
	//if (aCodecCtx->channels!=channels)
	//{
	//	ChannelsChange=1;
	//	ResampleChange=1;
	//}
	if (aCodecCtx->sample_rate!=sample_rate||aCodecCtx->channels!=channels)
	{
		ResampleChange=1;
	}
	if (ResampleChange==1)
	{
		ResampleCtx = av_audio_resample_init(channels,aCodecCtx->channels,sample_rate,aCodecCtx->sample_rate,SAMPLE_FMT_S16,SAMPLE_FMT_S16,16,10,0,1.0);
		if (ResampleCtx==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			ResampleChange=0;
			PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)6);
			return 6;
		}
		resamplebuff=(int16_t *)malloc(buffer_size);
		if (resamplebuff==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			
			if (ResampleChange==1&&ResampleCtx!=NULL)
			{
				audio_resample_close(ResampleCtx);
				ResampleCtx=NULL;
			}
			PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)7);
			return 7;
		}
	}
	//
	datasize=sec*sample_rate;
	if(avcodec_open(aCodecCtx, aCodec)<0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)8);
		return 8;
	}

	pt.bits = 16;
	pt.channels = channels;
	pt.rate = sample_rate;

	outfile = fopen(outfilename, "wb");
	if (!outfile) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)9);
		return 9;
	}

	fseek(outfile,44,SEEK_SET);
    while(av_read_frame(pFormatCtx, packet) >= 0) 
	{
		CheckMessageQueue();
	    if(packet->stream_index == audio_stream_index)
	    {
			//while(packet->size > 0)
			//{
				buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
				len = avcodec_decode_audio3(aCodecCtx, audio_buffer, &buffer_size, packet);

				if (len < 0) 
				{
					break;
				}

				if(buffer_size > 0)
				{
					//resample
					if (ResampleChange==1)
					{
						int samples=buffer_size/ ((aCodecCtx->channels) * 2);
						int resamplenum= 0;
						resamplenum = audio_resample(ResampleCtx, 
							resamplebuff, 
							audio_buffer, 
							samples);
						count = fwrite(resamplebuff, 2*channels, resamplenum, outfile);
					}
					
					else
					{
						count = fwrite(audio_buffer, 2*aCodecCtx->channels, buffer_size/((aCodecCtx->channels)*2), outfile);
					}
					totle_samplenum += count;
				}
				//tempcount++;
				//total_out_size += count*2*aCodecCtx->channels;
				total_in_convert_size += packet->size;
				tempAudioConvertProgress = 100*total_in_convert_size/(pFormatCtx->file_size);
				if(tempAudioConvertProgress != audioConvertProgress)
				{
					if(tempAudioConvertProgress == 100)
						tempAudioConvertProgress = 99;
					audioConvertProgress = tempAudioConvertProgress;
					tempret = PostMessage(mParentHwnd,mMsg,DECING_TAG,audioConvertProgress);
				}
				if (packet->data!=NULL)
				{
					av_free_packet(packet);
					packet->data=NULL;
				}
				//packet->size -= len;
				//packet->data += len;
			//}
			if (datasize!=0&&totle_samplenum>=datasize)
			{
				break;
			}
	    }
	}
	audioConvertProgress = 100;
	PostMessage(mParentHwnd,mMsg,DECING_TAG,audioConvertProgress);
	fseek(outfile,0,SEEK_SET);
	wav_write_header(outfile, pt, totle_samplenum);
	
	if (outfile!=NULL)
	{
		fclose(outfile);
		outfile=NULL;
	}
    
	if (audio_buffer!=NULL)
	{
		av_free(audio_buffer);
		audio_buffer=NULL;
	}
	
	if (aCodecCtx!=NULL)
	{
		avcodec_close(aCodecCtx);
		aCodecCtx=NULL;
	}
	
	if (packet!=NULL)
	{
		free(packet);//
		packet=NULL;
	}
	
	if (pFormatCtx!=NULL)
	{
		av_close_input_file(pFormatCtx);
		pFormatCtx=NULL;
	}
	
	if (ResampleChange==1)
	{
		if (resamplebuff!=NULL)
		{
			free(resamplebuff);
			resamplebuff=NULL;
		}
		if (ResampleCtx!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
		}
	}
	if (totle_samplenum<=sample_rate*5)
	{
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)10);
		return 10;
	}
	PostMessage(mParentHwnd,mMsg,FINISH_TAG,NULL);
	return 0;
}
Esempio n. 12
0
int AudioConvertFunc_Buffer(const short *pcmbuffer,int sample_rate,int channels,int sec,const char *inputfilename,int *pcmbuff_size)
{
	AVCodec *aCodec =NULL;
	AVPacket *packet = NULL;
	AVFormatContext *pFormatCtx =NULL;
    AVCodecContext *aCodecCtx= NULL;
	ReSampleContext* ResampleCtx=NULL;
	AVFrame *decoded_frame = NULL;
	int datasize;

	unsigned int i;
	int len, ret, buffer_size, audio_stream_index = -1, totle_samplenum = 0;

	////1.FILE *outfile = NULL;// *infile;
	

	head_pama pt;

	int16_t *audio_buffer = NULL;
	int16_t *resamplebuff = NULL;
	int ResampleChange=0;
	int ChannelsChange=0;
	int totalsize=0;

	packet = (AVPacket*)malloc(sizeof(AVPacket));
	if (packet==NULL)
	{
		return -1;
	}
	packet->data=NULL;

	buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
	audio_buffer = (int16_t *)av_malloc(buffer_size);
	if (audio_buffer==NULL)
	{
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		
		return -2;
	}
	
	av_register_all();

	av_init_packet(packet);

	//pFormatCtx = avformat_alloc_context();
	ret = av_open_input_file(&pFormatCtx, inputfilename, NULL,0, NULL);

	if(ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}
		
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		
		return 1;  
	}

	ret = av_find_stream_info(pFormatCtx);

	if( ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}

		return 2;
	}

	audio_stream_index=-1;
	for(i=0; i< (signed)pFormatCtx->nb_streams; i++)
	{

		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_stream_index < 0)
		{
			audio_stream_index = i;
			break;
		}
	}

	if(audio_stream_index == -1)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}

		return 3;
	}

	aCodecCtx = pFormatCtx->streams[audio_stream_index]->codec;
	if (aCodecCtx==NULL)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		return 4;
	}
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		/*if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}*/
		return 5;
	}
	//resample init
	if (channels==0)
	{
		channels=aCodecCtx->channels;
	}
	if (sample_rate==0)
	{
		sample_rate=aCodecCtx->sample_rate;
	}
	//if (aCodecCtx->channels!=channels)
	//{
	//	ChannelsChange=1;
	//	ResampleChange=1;
	//}
	if (aCodecCtx->sample_rate!=sample_rate||aCodecCtx->channels!=channels)
	{
		ResampleChange=1;
	}
	if (ResampleChange==1)
	{
		ResampleCtx = av_audio_resample_init(channels,aCodecCtx->channels,sample_rate,aCodecCtx->sample_rate,SAMPLE_FMT_S16,SAMPLE_FMT_S16,16,10,0,1.0);
		if (ResampleCtx==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			ResampleChange=0;
			return 6;
		}
		resamplebuff=(int16_t *)malloc(buffer_size);
		if (resamplebuff==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			
			if (ResampleChange==1&&ResampleCtx!=NULL)
			{
				audio_resample_close(ResampleCtx);
				ResampleCtx=NULL;
			}
			return 7;
		}
	}
	//
	datasize=sec*sample_rate;
	if(avcodec_open(aCodecCtx, aCodec)<0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		return 8;
	}

	pt.bits = 16;
	pt.channels = channels;
	pt.rate = sample_rate;

	////2.outfile = fopen(outfilename, "wb");
	

	////3.outfile pcmbuffer
	if (!pcmbuffer) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		return 9;
	}

	////4.fseek(outfile,44,SEEK_SET);


    while(av_read_frame(pFormatCtx, packet) >= 0) 
	{
		CheckMessageQueue();
	    if(packet->stream_index == audio_stream_index)
	    {
			//while(packet->size > 0)
			//{
				buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
				len = avcodec_decode_audio3(aCodecCtx, audio_buffer, &buffer_size, packet);

				if (len < 0) 
				{
					break;
				}

				if(buffer_size > 0)
				{
					//resample
					
					if (ResampleChange==1)
					{
						int samples=buffer_size/ ((aCodecCtx->channels) * 2);
						int resamplenum= 0;
						
						resamplenum = audio_resample(ResampleCtx, 
							resamplebuff, 
							audio_buffer, 
							samples);
						////5.count = fwrite(resamplebuff, 2*channels, resamplenum, outfile);
						memcpy((char*)(pcmbuffer+totalsize/2),resamplebuff,2*channels*resamplenum);
						totalsize+=2*channels*resamplenum;
						totle_samplenum+=resamplenum;
					}
					
					else
					{
						////6.count = fwrite(audio_buffer, 2*aCodecCtx->channels, buffer_size/((aCodecCtx->channels)*2), outfile);
						
						memcpy((char*)(pcmbuffer+totalsize/2),audio_buffer,2*channels*buffer_size/((aCodecCtx->channels)*2));
						totalsize+=2*channels*buffer_size/((aCodecCtx->channels)*2);
						totle_samplenum+=buffer_size/ ((aCodecCtx->channels) * 2);
						
					}
					////totle_samplenum += count;
					if(totalsize>=2*channels*(sample_rate*sec))
					{   
						*pcmbuff_size=totalsize/2/channels;
						break;
					}


				}
				if (packet->data!=NULL)
				{
					av_free_packet(packet);
					packet->data=NULL;
				}
				//packet->size -= len;
				//packet->data += len;
			//}
			if (datasize!=0&&totle_samplenum>=datasize)
			{
				break;
			}
	    }
	}
    
	if (audio_buffer!=NULL)
	{
		av_free(audio_buffer);
		audio_buffer=NULL;
	}
	
	if (aCodecCtx!=NULL)
	{
		avcodec_close(aCodecCtx);
		aCodecCtx=NULL;
	}
	
	if (packet!=NULL)
	{
		free(packet);//
		packet=NULL;
	}
	
	if (pFormatCtx!=NULL)
	{
		av_close_input_file(pFormatCtx);
		pFormatCtx=NULL;
	}
	
	if (ResampleChange==1)
	{
		if (resamplebuff!=NULL)
		{
			free(resamplebuff);
			resamplebuff=NULL;
		}
		if (ResampleCtx!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
		}
	}
	if (totle_samplenum<=sample_rate*5)
	{
		return 10;
	}
	return 0;
}
Esempio n. 13
0
static int resample_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	// Get the resample information
	int output_rate = mlt_properties_get_int( filter_properties, "frequency" );
	int16_t *sample_buffer = mlt_properties_get_data( filter_properties, "buffer", NULL );

	// Obtain the resample context if it exists
	ReSampleContext *resample = mlt_properties_get_data( filter_properties, "audio_resample", NULL );

	// If no resample frequency is specified, default to requested value
	if ( output_rate == 0 )
		output_rate = *frequency;

	// Get the producer's audio
	int error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	if ( error ) return error;

	// Return now if no work to do
	if ( output_rate != *frequency )
	{
		// Will store number of samples created
		int used = 0;

		mlt_log_debug( MLT_FILTER_SERVICE(filter), "channels %d samples %d frequency %d -> %d\n",
			*channels, *samples, *frequency, output_rate );

		// Do not convert to s16 unless we need to change the rate
		if ( *format != mlt_audio_s16 )
		{
			*format = mlt_audio_s16;
			mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
		}

		// Create a resampler if nececessary
		if ( resample == NULL || *frequency != mlt_properties_get_int( filter_properties, "last_frequency" ) )
		{
			// Create the resampler
			resample = av_audio_resample_init( *channels, *channels, output_rate, *frequency,
				AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16, 16, 10, 0, 0.8 );

			// And store it on properties
			mlt_properties_set_data( filter_properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );

			// And remember what it was created for
			mlt_properties_set_int( filter_properties, "last_frequency", *frequency );
		}

		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

		// Resample the audio
		used = audio_resample( resample, sample_buffer, *buffer, *samples );
		int size = used * *channels * sizeof( int16_t );

		// Resize if necessary
		if ( used > *samples )
		{
			*buffer = mlt_pool_realloc( *buffer, size );
			mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
		}

		// Copy samples
		memcpy( *buffer, sample_buffer, size );

		// Update output variables
		*samples = used;
		*frequency = output_rate;
	}
	else
	{
		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
	}

	return error;
}
Esempio n. 14
0
int main(int argc, char* argv[])
{
	AVFormatContext* in_fctx;	
	AVCodecContext* in_cctx;
	AVCodec* in_codec;
	const char* in_filename;
	const char* out_filename;
	char* decoded_buf;
	char* output_buf;
	char* resample_buf;
	char* before_encoding_buf;
	int ret = 0;

	if (argc != 3)
	{
		printf("./audio_convertor input ouput\n");
		return -1;
	}
	//in_filename = "../input/input.aac";
	//out_filename = "output/aac2mp3.mp3";
	in_filename = argv[1];
	out_filename = argv[2];
	decoded_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	output_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	resample_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); 
	before_encoding_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); 

	avcodec_register_all();
	av_register_all();

	in_fctx = avformat_alloc_context();

	ret = av_open_input_file(&in_fctx, in_filename, NULL, 0, NULL);
	if ( ret != 0 )
	{
		printf("open input audio file[%s] fail\n", in_filename);
		return -1;
	}

	ret = av_find_stream_info(in_fctx);
	if ( ret < 0 )
	{
		printf("find stream in audio file[%s] fail\n", in_filename);
		return -1;
	}

	dump_format(in_fctx, 0, in_filename, 0);

	//这里我们假设,如果一个文件包含多个音频流,
	//只对第一个音频流做转码,而对于视频流则忽略
	int i;
	int ast_index = -1;
	for (i = 0; i<(int)in_fctx->nb_streams; ++i)
	{
		if (in_fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			ast_index = i;
			break;
		}
	}
	if (ast_index == -1)
	{
		printf("there is not any audio stream in file[%s]\n", in_filename);
		return 0;
	}
	else
	{
		printf("find audio stream in file[%s]\n", in_filename);
	}
	
	in_cctx = in_fctx->streams[ast_index]->codec;
	//in_cctx->codec_id = CODEC_ID_GSM;
	in_codec = avcodec_find_decoder(in_cctx->codec_id);
	if (!in_codec)
	{
		printf("find decoder for codec_id[%d] fail, file[%s]\n", in_cctx->codec_id, in_filename);
		return -1;
	}
	ret = avcodec_open(in_cctx, in_codec);
	if (ret >= 0)
	{
		printf("open codec[name:%s] for stream[idx:%d] of file[%s]\n", in_codec->name, ast_index, in_filename);
	}

	// 输出部分初始化
	AVOutputFormat* out_fmt;
	AVFormatContext* out_fctx;
	AVCodecContext* out_cctx = NULL;

	out_fmt = av_guess_format(NULL, out_filename, NULL);
	if (!out_fmt)
	{
		printf("Could not deduce output format from file extension: using MPEG-3.\n");
		out_fmt = av_guess_format("mp3", NULL, NULL);
	}
	if (!out_fmt)
	{
		fprintf(stderr, "Could not find suitable output format\n");
		exit(1);
	}

	out_fctx = avformat_alloc_context();
	if (!out_fctx)
	{
		fprintf(stderr, "avformat_alloc_context fail\n");
		exit(1);
	}
	out_fctx->oformat = out_fmt;

	out_cctx = output_decode_init(in_cctx, out_fctx, out_fmt->audio_codec);
	if (!out_cctx)
	{
		fprintf(stderr, "output_codec_init fail\n");
		exit(1);
	}
	/* set the output parameters (must be done even if no parameters). */
	if (av_set_parameters(out_fctx, NULL) < 0) 
	{
		fprintf(stderr, "Invalid output format parameters\n");
		exit(1);
	}
	dump_format(out_fctx, 0, out_filename, 1);

	output_decode_open(out_cctx);

	/* open the output file */
	if (!(out_fmt->flags & AVFMT_NOFILE)) 
	{
		if (url_fopen(&out_fctx->pb, out_filename, URL_WRONLY) < 0)
		{
			fprintf(stderr, "Could not open '%s'\n", out_filename);
			exit(1);
		}
	}
	/* write the stream header, if any */
	if(av_write_header(out_fctx) < 0)
	{
		fprintf(stderr, "Could not write header for output file\n");
		return -1;
	}

	int decoded_size;
	AVPacket in_packet;
	AVPacket out_packet;
	ReSampleContext *rs_ctx = NULL;
	/*
	参考链接:http://hi.baidu.com/wg_wang/item/34396781d20b4b1ec316270b
	两点需要注意:
	(1) 从输入文件中按帧读取数据,解码,按照输出文件的要求,编码,并按帧写入到输出文件中。
	在这里,由于sample_rate和channels可能不同,需要对音频数据进行重采样。
	(2) 由于不同编码类型对一帧音频的数据要求不同,可能需要将输入数据保存起来,直到够输出的编码使用,
	或者,一帧的输入数据可能需要被多次输出。
	这样,要求初始化重采样以及libavutil提供的fifo(libavutils/fifo.h声明)以临时保存数据。
	举个例子:aac的frame_size=1024,mp3的frame_size=1152。若不用这个fifo,则生成的mp3文件是有问题的
	*/
	// 设置从采样
	rs_ctx = av_audio_resample_init(
				out_cctx->channels, in_cctx->channels,
				out_cctx->sample_rate, in_cctx->sample_rate,
				out_cctx->sample_fmt, in_cctx->sample_fmt,
				16, 10, 0, 0.8);

	AVFifoBuffer *iofifo;
	iofifo = av_fifo_alloc(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);
	av_init_packet(&in_packet);
	av_init_packet(&out_packet);

	while (av_read_frame(in_fctx, &in_packet) >= 0)
	{
		while (in_packet.size > 0)
		{
			int used_size;
			decoded_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
			used_size = avcodec_decode_audio3(in_cctx, (int16_t *)decoded_buf, &decoded_size, &in_packet);
			if (used_size < 0)
			{
				printf("avcodec_decode_audio3 fail\n");
				exit(1);
			}
			int bs, frame_bytes;

			bs = audio_resample(rs_ctx, (short *)resample_buf, (short *)decoded_buf, decoded_size/(in_cctx->channels*g_in_bytes));
			ret = av_fifo_generic_write(iofifo, (uint8_t *)resample_buf, bs*out_cctx->channels*g_out_bytes, NULL);
			//ret = av_fifo_generic_write(iofifo, (uint8_t *)decoded_buf, in_cctx->frame_size*in_cctx->channels*g_out_bytes, NULL);

			frame_bytes = out_cctx->frame_size * g_out_bytes * out_cctx->channels;
			while(av_fifo_size(iofifo) >= frame_bytes)
			{
				ret = av_fifo_generic_read(iofifo, before_encoding_buf, frame_bytes, NULL);
				out_packet.size = avcodec_encode_audio(out_cctx, (uint8_t*)output_buf, frame_bytes, (short *)before_encoding_buf);
				out_packet.data = (uint8_t *)output_buf;
				av_write_frame(out_fctx, &out_packet);
			}

			in_packet.size -= used_size;
			in_packet.data += used_size;
		}
	}
	/* write the trailer, if any */
	av_write_trailer(out_fctx);
	if (!(out_fmt->flags & AVFMT_NOFILE)) {
		/* close the output file */
		url_fclose(out_fctx->pb);
	}
}
Esempio n. 15
0
size_t decode_audio(PlayerCtx *ctx, AVPacket *packet, int new_packet, int32_t seekid)
{
	int bytes = ctx->a_stride * AVCODEC_MAX_AUDIO_FRAME_SIZE;
	int decoded;

	decoded = avcodec_decode_audio3(ctx->a_codec_ctx, ctx->a_ibuf, &bytes, packet);
	if (decoded < 0) {
		fprintf(stderr, "Error while decoding audio frame\n");
		return packet->size;
	}
	int in_samples = bytes / ctx->a_stride;
	if (!in_samples)
		return decoded;


	int out_samples;
	out_samples = audio_resample(ctx->a_resampler, ctx->a_rbuf, ctx->a_ibuf, in_samples);

	pthread_mutex_lock(&ctx->a_buf_mutex);

	int free_samples;
	while (1) {
		free_samples = ctx->a_buf_get - ctx->a_buf_put;
		if (free_samples <= 0)
			free_samples += ctx->a_buf_len;

		if (free_samples <= out_samples) {
			printf("Wait for space in audio buffer\n");
			pthread_cond_wait(&ctx->a_buf_not_full, &ctx->a_buf_mutex);
		} else {
			break;
		}
	}
	pthread_mutex_unlock(&ctx->a_buf_mutex);

	if (new_packet && packet->pts != AV_NOPTS_VALUE)
		ctx->a_cur_pts = av_q2d(ctx->a_stream->time_base) * packet->pts;

	int put = ctx->a_buf_put;
	short *rbuf = ctx->a_rbuf;
	int i;
	for (i = 0; i < out_samples; i++) {
		ctx->a_buf[put].l = *rbuf++;
		ctx->a_buf[put].r = *rbuf++;
		rbuf += ctx->a_ch - 2;
		ctx->a_buf[put].seekid = seekid;
		ctx->a_buf[put].pts = ctx->a_cur_pts;
		put++;
		ctx->a_cur_pts += 1.0/SAMPLE_RATE;
		if (put == ctx->a_buf_len)
			put = 0;
	}

	printf("Put %d audio samples at pts %f\n", out_samples, ctx->a_cur_pts);

	pthread_mutex_lock(&ctx->a_buf_mutex);
	ctx->a_buf_put = put;
	pthread_cond_signal(&ctx->a_buf_not_empty);
	pthread_mutex_unlock(&ctx->a_buf_mutex);

	return decoded;
}