コード例 #1
2
ファイル: obs-ffmpeg-aac.c プロジェクト: Posnet/obs-studio
static bool initialize_codec(struct aac_encoder *enc)
{
	int ret;

	enc->aframe  = av_frame_alloc();
	if (!enc->aframe) {
		aac_warn("initialize_codec", "Failed to allocate audio frame");
		return false;
	}

	ret = avcodec_open2(enc->context, enc->aac, NULL);
	if (ret < 0) {
		aac_warn("initialize_codec", "Failed to open AAC codec: %s",
				av_err2str(ret));
		return false;
	}

	enc->frame_size = enc->context->frame_size;
	if (!enc->frame_size)
		enc->frame_size = 1024;

	enc->frame_size_bytes = enc->frame_size * (int)enc->audio_size;

	ret = av_samples_alloc(enc->samples, NULL, enc->context->channels,
			enc->frame_size, enc->context->sample_fmt, 0);
	if (ret < 0) {
		aac_warn("initialize_codec", "Failed to create audio buffer: "
		                             "%s", av_err2str(ret));
		return false;
	}

	return true;
}
コード例 #2
0
ファイル: ffmpeg.c プロジェクト: wedesoft/aiscm
static AVFrame *allocate_output_audio_frame(SCM scm_self, AVCodecContext *audio_codec, enum AVSampleFormat sample_fmt)
{
  AVFrame *retval = allocate_frame(scm_self);
  retval->format = sample_fmt;
  retval->channel_layout = audio_codec->channel_layout;
  retval->sample_rate = audio_codec->sample_rate;

  if (audio_codec->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
    retval->nb_samples = 2 * AV_INPUT_BUFFER_MIN_SIZE;
  else
    retval->nb_samples = audio_codec->frame_size;

#ifdef HAVE_AV_FRAME_GET_BUFFER
  int err = av_frame_get_buffer(retval, 0);
  if (err < 0) {
    ffmpeg_destroy(scm_self);
    scm_misc_error("allocate-output-audio-frame", "Error allocating audio frame memory", SCM_EOL);
  };
#else
  int channels = av_get_channel_layout_nb_channels(retval->channel_layout);
  int err = av_samples_alloc(retval->data, &retval->linesize[0], channels, retval->nb_samples, retval->format, 0);
  // TODO: need av_freep?
  if (err < 0) {
    ffmpeg_destroy(scm_self);
    scm_misc_error("allocate-output-audio-frame", "Could not allocate audio buffer", SCM_EOL);
  };
#endif
  return retval;
}
コード例 #3
0
ファイル: encoder.c プロジェクト: opus111/crossUserMedia
/**
 * Initialize a temporary storage for the specified number of audio samples.
 * The conversion requires temporary storage due to the different format.
 * The number of audio samples to be allocated is specified in frame_size.
 */
static int init_converted_samples(uint8_t ***converted_input_samples,
                                  AVCodecContext *output_codec_context,
                                  int frame_size)
{
    int error;

    /**
     * Allocate as many pointers as there are audio channels.
     * Each pointer will later point to the audio samples of the corresponding
     * channels (although it may be NULL for interleaved formats).
     */
    if (!(*converted_input_samples = calloc(output_codec_context->channels, sizeof(**converted_input_samples)))) {
        fprintf(stderr, "Could not allocate converted input sample pointers\n");
        return AVERROR(ENOMEM);
    }

    /**
     * Allocate memory for the samples of all channels in one consecutive
     * block for convenience.
     */
    if ((error = av_samples_alloc(*converted_input_samples, NULL,
                                  output_codec_context->channels,
                                  frame_size,
                                  output_codec_context->sample_fmt, 0)) < 0) {
        fprintf(stderr, "Could not allocate converted input samples (error '%s')\n", get_error_text(error));
        av_freep(&(*converted_input_samples)[0]);
        free(*converted_input_samples);
        return error;
    }
    return 0;
}
コード例 #4
0
//====================================================================================================================
// Append data to the list creating packet as necessary and filling TempData
//====================================================================================================================
void cSoundBlockList::AppendData(int64_t Position,int16_t *Data,int64_t DataLen) {
    u_int8_t *CurData=(u_int8_t *)Data;
    // Cut data to Packet
    while ((DataLen+CurrentTempSize>=SoundPacketSize)) {
        #if defined(LIBAV) && (LIBAVVERSIONINT<=8)
            u_int8_t *Packet=(u_int8_t *)av_malloc(SoundPacketSize+8);
        #else
            u_int8_t *Packet=NULL;
            int     out_linesize=0;
            av_samples_alloc(&Packet,&out_linesize,Channels,SoundPacketSize+8,SampleFormat,1);
        #endif
        if (Packet) {
            if (CurrentTempSize>0) {                                // Use previously data store in TempData
                int DataToUse=SoundPacketSize-CurrentTempSize;
                memcpy(Packet,TempData,CurrentTempSize);
                memcpy(Packet+CurrentTempSize,CurData,DataToUse);
                DataLen        -=DataToUse;
                CurData        +=DataToUse;
                CurrentTempSize=0;
            } else {                                                // Construct a full packet
                memcpy(Packet,CurData,SoundPacketSize);
                DataLen-=SoundPacketSize;
                CurData+=SoundPacketSize;
            }
            AppendPacket(Position,(int16_t *)Packet);
            Position+=(double(SoundPacketSize/2*Channels)/double(SamplingRate))*AV_TIME_BASE;
        }
    }
    if (DataLen>0) {                                            // Store a partial packet in temp buffer
        // Store data left to TempData
        memcpy(TempData+CurrentTempSize,CurData,DataLen);
        CurrentTempSize+=DataLen;
    }
}
コード例 #5
0
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
                                               int nb_samples)
{
    AVFilterBufferRef *samplesref = NULL;
    uint8_t **data;
    int planar      = av_sample_fmt_is_planar(link->format);
    int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
    int planes      = planar ? nb_channels : 1;
    int linesize;

    if (!(data = av_mallocz(sizeof(*data) * planes)))
        goto fail;

    if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0)
        goto fail;

    samplesref = avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms,
                                                           nb_samples, link->format,
                                                           link->channel_layout);
    if (!samplesref)
        goto fail;

    av_freep(&data);

fail:
    if (data)
        av_freep(&data[0]);
    av_freep(&data);
    return samplesref;
}
コード例 #6
0
ファイル: defaults.c プロジェクト: jfuentesbrevity/ffmbc
AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int perms,
                                                     enum AVSampleFormat sample_fmt, int nb_samples,
                                                     int64_t channel_layout, int planar)
{
    AVFilterBufferRef *samplesref = NULL;
    int linesize[8];
    uint8_t *data[8];
    int nb_channels = av_get_channel_layout_nb_channels(channel_layout);

    /* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */
    if (av_samples_alloc(data, linesize,
                         nb_channels, nb_samples, sample_fmt,
                         planar, 16) < 0)
        return NULL;

    samplesref =
        avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms,
                                                  nb_samples, sample_fmt,
                                                  channel_layout, planar);
    if (!samplesref) {
        av_free(data[0]);
        return NULL;
    }

    return samplesref;
}
コード例 #7
0
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromReadyFrame(QByteArray &result, int64 &samplesAdded) {
	int res = 0;

	if (_dstSamplesData) { // convert needed
		int64_t dstSamples = av_rescale_rnd(swr_get_delay(_swrContext, _srcRate) + _frame->nb_samples, _dstRate, _srcRate, AV_ROUND_UP);
		if (dstSamples > _maxResampleSamples) {
			_maxResampleSamples = dstSamples;
			av_free(_dstSamplesData[0]);

			if ((res = av_samples_alloc(_dstSamplesData, 0, AudioToChannels, _maxResampleSamples, AudioToFormat, 1)) < 0) {
				_dstSamplesData[0] = 0;
				char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
				LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
				return ReadResult::Error;
			}
		}
		if ((res = swr_convert(_swrContext, _dstSamplesData, dstSamples, (const uint8_t**)_frame->extended_data, _frame->nb_samples)) < 0) {
			char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
			LOG(("Audio Error: Unable to swr_convert for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
			return ReadResult::Error;
		}
		int32 resultLen = av_samples_get_buffer_size(0, AudioToChannels, res, AudioToFormat, 1);
		result.append((const char*)_dstSamplesData[0], resultLen);
		samplesAdded += resultLen / _sampleSize;
	} else {
		result.append((const char*)_frame->extended_data[0], _frame->nb_samples * _sampleSize);
		samplesAdded += _frame->nb_samples;
	}
	return ReadResult::Ok;
}
コード例 #8
0
/**
 * Resample a buffer using FFAudioUI->swr_context.
 * The returned out buffer needs to be freed by the caller.
 *
 * @param aio           FFAudioIO context
 * @param out_buf       out buffer
 * @param out_samples   out samples
 * @param in_buf        in buffer
 * @param in_samples    in samples
 * @return number of samples copied/converted or a negative value, should things go wrong
 */
static int resample(FFAudioIO *aio,  uint8_t **out_buf, int out_samples, const uint8_t **in_buf, const int in_samples) {
    int res = 0;
    int64_t out_channel_count;
    enum AVSampleFormat out_sample_format;

    if (out_samples == 0) goto bail; // nothing to do.

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out_sample_format);

    #ifdef DEBUG
        fprintf(stderr, "resample: out_samples=%d in_samples=%d, channels=%d sample_format=%d\n",
            out_samples, in_samples, (int)out_channel_count, out_sample_format);
    #endif

    // allocate temp buffer for resampled data
    res = av_samples_alloc(out_buf, NULL, out_channel_count, out_samples, out_sample_format, 1);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(aio->env, res, "Could not allocate resample buffer.");
        goto bail;
    }

    // run the SWR conversion (even if it is not strictly necessary)
    res = swr_convert(aio->swr_context, out_buf, out_samples, in_buf, in_samples);
    if (res < 0) {
        throwIOExceptionIfError(aio->env, res, "Failed to convert audio data.");
        goto bail;
    }

    bail:

    return res;
}
コード例 #9
0
ファイル: ffmpeg_decoder.cpp プロジェクト: A1-Triard/openmw
bool FFmpeg_Decoder::getAVAudioData()
{
    int got_frame;

    if((*mStream)->codec->codec_type != AVMEDIA_TYPE_AUDIO)
        return false;

    do {
        if(mPacket.size == 0 && !getNextPacket())
            return false;

        /* Decode some data, and check for errors */
        int len = 0;
        if((len=avcodec_decode_audio4((*mStream)->codec, mFrame, &got_frame, &mPacket)) < 0)
            return false;

        /* Move the unread data to the front and clear the end bits */
        int remaining = mPacket.size - len;
        if(remaining <= 0)
            av_free_packet(&mPacket);
        else
        {
            memmove(mPacket.data, &mPacket.data[len], remaining);
            av_shrink_packet(&mPacket, remaining);
        }

        if (!got_frame || mFrame->nb_samples == 0)
            continue;

        if(mSwr)
        {
            if(!mDataBuf || mDataBufLen < mFrame->nb_samples)
            {
                av_freep(&mDataBuf);
                if(av_samples_alloc(&mDataBuf, NULL, av_get_channel_layout_nb_channels(mOutputChannelLayout),
                                    mFrame->nb_samples, mOutputSampleFormat, 0) < 0)
                    return false;
                else
                    mDataBufLen = mFrame->nb_samples;
            }

            if(swr_convert(mSwr, (uint8_t**)&mDataBuf, mFrame->nb_samples,
                (const uint8_t**)mFrame->extended_data, mFrame->nb_samples) < 0)
            {
                return false;
            }
            mFrameData = &mDataBuf;
        }
        else
            mFrameData = &mFrame->data[0];

    } while(got_frame == 0 || mFrame->nb_samples == 0);
    mNextPts += (double)mFrame->nb_samples / (double)(*mStream)->codec->sample_rate;

    return true;
}
コード例 #10
0
ファイル: MpegLoader.cpp プロジェクト: 13W/icq-desktop
    int MpegLoader::readMore(QByteArray &result, qint64 &samplesAdded) 
    {
        int res;
        if ((res = av_read_frame(FmtContext_, &Avpkt_)) < 0)
            return -1;

        if (Avpkt_.stream_index == StreamId_) 
        {
            av_frame_unref(Frame_);
            int got_frame = 0;
            if ((res = avcodec_decode_audio4(CodecContext_, Frame_, &got_frame, &Avpkt_)) < 0) 
            {
                av_packet_unref(&Avpkt_);
                if (res == AVERROR_INVALIDDATA) 
                    return 0;
                return -1;
            }

            if (got_frame) 
            {
                if (OutSamplesData_) 
                {
                    int64_t dstSamples = av_rescale_rnd(swr_get_delay(SwrContext_, SrcRate_) + Frame_->nb_samples, DstRate_, SrcRate_, AV_ROUND_UP);
                    if (dstSamples > MaxResampleSamples_) 
                    {
                        MaxResampleSamples_ = dstSamples;
                        av_free(OutSamplesData_[0]);

                        if ((res = av_samples_alloc(OutSamplesData_, 0, OutChannels, MaxResampleSamples_, OutFormat, 1)) < 0) 
                        {
                            OutSamplesData_[0] = 0;
                            av_packet_unref(&Avpkt_);
                            return -1;
                        }
                    }

                    if ((res = swr_convert(SwrContext_, OutSamplesData_, dstSamples, (const uint8_t**)Frame_->extended_data, Frame_->nb_samples)) < 0) 
                    {
                        av_packet_unref(&Avpkt_);
                        return -1;
                    }

                    qint32 resultLen = av_samples_get_buffer_size(0, OutChannels, res, OutFormat, 1);
                    result.append((const char*)OutSamplesData_[0], resultLen);
                    samplesAdded += resultLen / SampleSize_;
                } 
                else 
                {
                    result.append((const char*)Frame_->extended_data[0], Frame_->nb_samples * SampleSize_);
                    samplesAdded += Frame_->nb_samples;
                }
            }
        }
        av_packet_unref(&Avpkt_);
        return 1;
    }
コード例 #11
0
ファイル: resampling_audio.c プロジェクト: izacus/FFmpeg
int alloc_samples_array_and_data(uint8_t ***data, int *linesize, int nb_channels,
                                    int nb_samples, enum AVSampleFormat sample_fmt, int align)
{
    int nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;

    *data = av_malloc(sizeof(*data) * nb_planes);
    if (!*data)
        return AVERROR(ENOMEM);
    return av_samples_alloc(*data, linesize, nb_channels,
                            nb_samples, sample_fmt, align);
}
コード例 #12
0
int static av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
                                       int nb_samples, enum AVSampleFormat sample_fmt, int align)
{
    int ret, nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;

    *audio_data = av_malloc(nb_planes * sizeof(**audio_data));
    memset(*audio_data, 0, nb_planes * sizeof(**audio_data));
    if (!*audio_data)
        return AVERROR(ENOMEM);
    ret = av_samples_alloc(*audio_data, linesize, nb_channels,
                           nb_samples, sample_fmt, align);
    if (ret < 0)
        av_freep(audio_data);
    return ret;
}
コード例 #13
0
ファイル: AudioDecoderThread.cpp プロジェクト: BeepC/libavg
AudioBufferPtr AudioDecoderThread::resampleAudio(char* pDecodedData, int framesDecoded,
        int currentSampleFormat)
{
    if (!m_pResampleContext) {
#ifdef LIBAVRESAMPLE_VERSION
        m_pResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pResampleContext, "in_channel_layout",
                av_get_default_channel_layout(m_pStream->codec->channels), 0);
        av_opt_set_int(m_pResampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_rate", m_InputSampleRate, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_rate", m_AP.m_SampleRate, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_fmt",
                (AVSampleFormat)currentSampleFormat, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        int err = avresample_open(m_pResampleContext);
        AVG_ASSERT(err >= 0);
#else
        m_pResampleContext = av_audio_resample_init(m_AP.m_Channels, 
                m_pStream->codec->channels, m_AP.m_SampleRate, m_InputSampleRate,
                AV_SAMPLE_FMT_S16, (AVSampleFormat)currentSampleFormat, 16, 10, 0, 0.8);
#endif
        AVG_ASSERT(m_pResampleContext);
    }
#ifdef LIBAVRESAMPLE_VERSION
    uint8_t *pResampledData;
    int leftoverSamples = avresample_available(m_pResampleContext);
    int framesAvailable = leftoverSamples +
            av_rescale_rnd(avresample_get_delay(m_pResampleContext) +
                    framesDecoded, m_AP.m_SampleRate, m_InputSampleRate, AV_ROUND_UP);
    av_samples_alloc(&pResampledData, 0, 2, framesAvailable,
            AV_SAMPLE_FMT_S16, 0);
    int framesResampled = avresample_convert(m_pResampleContext, &pResampledData, 0, 
            framesAvailable, (uint8_t**)&pDecodedData, 0, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
    av_freep(&pResampledData);
#else
    short pResampledData[AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
    int framesResampled = audio_resample(m_pResampleContext, pResampledData,
            (short*)pDecodedData, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
#endif
    return pBuffer;
}
コード例 #14
0
ファイル: defaults.c プロジェクト: ambrero/FFmpeg
AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int perms,
                                                     enum AVSampleFormat sample_fmt, int nb_samples,
                                                     int64_t channel_layout, int planar)
{
    AVFilterBuffer *samples = av_mallocz(sizeof(AVFilterBuffer));
    AVFilterBufferRef *ref = NULL;
    int nb_channels = av_get_channel_layout_nb_channels(channel_layout);

    if (!samples || !(ref = av_mallocz(sizeof(AVFilterBufferRef))))
        goto fail;

    ref->buf                   = samples;
    ref->format                = sample_fmt;

    ref->audio = av_mallocz(sizeof(AVFilterBufferRefAudioProps));
    if (!ref->audio)
        goto fail;

    ref->audio->channel_layout = channel_layout;
    ref->audio->nb_samples     = nb_samples;
    ref->audio->planar         = planar;

    /* make sure the buffer gets read permission or it's useless for output */
    ref->perms = perms | AV_PERM_READ;

    samples->refcount   = 1;
    samples->free       = ff_avfilter_default_free_buffer;

    /* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */
    if (av_samples_alloc(samples->data, samples->linesize,
                         nb_channels, nb_samples, sample_fmt,
                         planar, 16) < 0)
        goto fail;

    memcpy(ref->data,     samples->data,     sizeof(ref->data));
    memcpy(ref->linesize, samples->linesize, sizeof(ref->linesize));

    return ref;

fail:
    if (ref)
        av_free(ref->audio);
    av_free(ref);
    av_free(samples);
    return NULL;
}
コード例 #15
0
void AudioDecoder::handleDecodedFrame(AVFrame* frame) {
	/* compute destination number of samples */
	m_outNumSamples = static_cast<int>(av_rescale_rnd(getDelay(m_resampleCtx, frame->sample_rate)
														  + frame->nb_samples, OUT_SAMPLE_RATE, frame->sample_rate,
													  AV_ROUND_UP));

	if (m_outNumSamples > m_maxOutNumSamples) {
		av_freep(&m_outData[0]);
		auto ret = av_samples_alloc(m_outData, &m_outLinesize, OUT_NUM_CHANNELS, m_outNumSamples,
									OUT_SAMPLE_FORMAT, 1);
		if (ret < 0) {
			mprintf(("FFMPEG: Failed to allocate samples!!!"));
			return;
		}

		m_maxOutNumSamples = m_outNumSamples;
	}

	/* convert to destination format */
	auto ret = resample_convert(m_resampleCtx, m_outData, 0, m_outNumSamples,
								(uint8_t**) frame->data, 0, frame->nb_samples);
	if (ret < 0) {
		mprintf(("FFMPEG: Error while converting audio!\n"));
		return;
	}

	auto outBufsize = av_samples_get_buffer_size(&m_outLinesize, OUT_NUM_CHANNELS, ret, OUT_SAMPLE_FORMAT, 1);
	if (outBufsize < 0) {
		mprintf(("FFMPEG: Could not get sample buffer size!\n"));
		return;
	}

	auto begin = reinterpret_cast<short*>(m_outData[0]);
	auto end = reinterpret_cast<short*>(m_outData[0] + outBufsize);

	auto size = std::distance(begin, end);
	auto newSize = m_audioBuffer.size() + size;

	if (newSize <= m_audioBuffer.capacity()) {
		// We haven't filled the buffer yet
		m_audioBuffer.insert(m_audioBuffer.end(), begin, end);
	} else {
		flushAudioBuffer();
		m_audioBuffer.assign(begin, end);
	}
}
コード例 #16
0
ファイル: visualiserWin.cpp プロジェクト: EQ4/mattuliser
int static decodeFrame(AVCodecContext* codecCtx, uint8_t **buffer,
                       int bufferSize, packetQueue* queue,
                       SwrContext *swr)
{
	AVPacket packet;
	AVFrame *decodedAudioFrame = av_frame_alloc();
	int frameDecoded, framesRead, ret;

	//Get a packet.
	if (!queue->get(&packet))
		goto err_out;

	framesRead = avcodec_decode_audio4(codecCtx, decodedAudioFrame,
	                                   &frameDecoded, &packet);

	av_free_packet(&packet);

	if(framesRead < 0)
		//Skip this packet if we have an error.
		goto err_out;

	// Note that the buffer that we're copying into is 3/2 times the
	// size of a an audio frame, so we shouldn't need to check for a
	// buffer overflow here.
	if(frameDecoded) {
		if (av_samples_alloc(buffer, &ret,
				     av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
				     decodedAudioFrame->nb_samples,
				     AV_SAMPLE_FMT_S16, 0) < 0)
			throw std::runtime_error("Could not allocate decode samples buffer");

		swr_convert(swr, buffer, decodedAudioFrame->nb_samples,
			    (const uint8_t **)decodedAudioFrame->data,
			    decodedAudioFrame->nb_samples);

                goto out;
	}

err_out:
	*buffer = NULL;
	ret = 0;
out:
	av_frame_unref(decodedAudioFrame);
	return ret;
}
コード例 #17
0
void AudioConverter::feedEncoder()
{
	int gotFrame = 0, err;
	uint8_t *output;
	int out_linesize;
	int avail;

	if (!m_encoderInitialized)
		initEncoder();
	
	assert(m_avpktOutUsed == m_avpktOut.size);

	do
	{
		avail = avresample_available(m_resampler);
		av_samples_alloc(&output, &out_linesize, m_destinationFormat.mChannelsPerFrame, avail, m_encoder->sample_fmt, 0);

		if (avresample_read(m_resampler, &output, avail) != avail)
			throw std::runtime_error("avresample_read() failed");
	
		av_init_packet(&m_avpktOut);
		m_avpktOut.data = 0;
		m_avpktOut.size = 0;
		m_avpktOutUsed = 0;

		LOG << "Got " << avail << " samples\n";
		err = avcodec_fill_audio_frame(m_audioFrame, m_encoder->channels,
				m_encoder->sample_fmt, output, avail * m_destinationFormat.mChannelsPerFrame * (m_destinationFormat.mBitsPerChannel / 8),
				m_destinationFormat.mChannelsPerFrame * (m_destinationFormat.mBitsPerChannel / 8));
		
		if (err < 0)
			throw std::runtime_error("avcodec_fill_audio_frame() failed");

		// Encode PCM data
		err = avcodec_encode_audio2(m_encoder, &m_avpktOut, m_audioFrame, &gotFrame);
		av_freep(&output);
		
		if (err < 0)
			throw std::runtime_error("avcodec_encode_audio2() failed");
	}
	while(!gotFrame);
}
コード例 #18
0
ファイル: ffraw.cpp プロジェクト: JohnCrash/ffplayer
	AVRaw *make_audio_raw(int format, int channel, int samples)
	{
		AVRaw * praw = (AVRaw*)malloc(sizeof(AVRaw));
		while (praw)
		{
			memset(praw, 0, sizeof(AVRaw));
			praw->type = RAW_AUDIO;
			praw->format = format;
			praw->channels = channel;
			praw->samples = samples;

			int ret = av_samples_alloc(praw->data, praw->linesize, channel, samples, (AVSampleFormat)format, 0);
			if (ret > 0)
			{
				praw->size = ret;
			}
			else
			{
				char errmsg[ERROR_BUFFER_SIZE];
				av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
				av_log(NULL, AV_LOG_FATAL, "make_audio_raw av_samples_alloc : %s \n", errmsg);
				break;
			}

			return praw;
		}

		/*
		* �������
		*/
		if (praw)
		{
			ffFreeRaw(praw);
		}
		else
		{
			av_log(NULL, AV_LOG_FATAL, "make_audio_raw out of memory.\n");
		}
		praw = NULL;
		return praw;
	}
コード例 #19
0
ファイル: audio.c プロジェクト: SmartJog/ffmpeg
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
                                               int nb_samples)
{
    AVFilterBufferRef *samplesref = NULL;
    uint8_t **data;
    int planar      = av_sample_fmt_is_planar(link->format);
    int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
    int planes      = planar ? nb_channels : 1;
    int linesize;
    int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE |
                     AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN;

    av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES)));

    if (!(data = av_mallocz(sizeof(*data) * planes)))
        goto fail;

    if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0)
        goto fail;

    samplesref = avfilter_get_audio_buffer_ref_from_arrays(data, linesize, full_perms,
                                                           nb_samples, link->format,
                                                           link->channel_layout);
    if (!samplesref)
        goto fail;

    samplesref->audio->sample_rate = link->sample_rate;

    av_freep(&data);

fail:
    if (data)
        av_freep(&data[0]);
    av_freep(&data);
    return samplesref;
}
コード例 #20
0
long audio_tutorial_resample(VideoState *is, struct AVFrame *inframe) {

#ifdef __RESAMPLER__

#ifdef __LIBAVRESAMPLE__

// There is pre 1.0 libavresample and then there is above..
#if LIBAVRESAMPLE_VERSION_MAJOR == 0
    void **resample_input_bytes = (void **)inframe->extended_data;
#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif

#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif


    int resample_nblen = 0;
    long resample_long_bytes = 0;

    if( is->pResampledOut == NULL || inframe->nb_samples > is->resample_size) {
        
#if __LIBAVRESAMPLE__
        is->resample_size = av_rescale_rnd(avresample_get_delay(is->pSwrCtx) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#else
        is->resample_size = av_rescale_rnd(swr_get_delay(is->pSwrCtx,
                                           44100) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#endif

        if(is->pResampledOut != NULL) {
            av_free(is->pResampledOut);
            is->pResampledOut = NULL;
        }

        av_samples_alloc(&is->pResampledOut, &is->resample_lines, 2, is->resample_size,
                         AV_SAMPLE_FMT_S16, 0);

    }


#ifdef __LIBAVRESAMPLE__

// OLD API (0.0.3) ... still NEW API (1.0.0 and above).. very frustrating..
// USED IN FFMPEG 1.0 (LibAV SOMETHING!). New in FFMPEG 1.1 and libav 9
#if LIBAVRESAMPLE_VERSION_INT <= 3
    // AVResample OLD
    resample_nblen = avresample_convert(is->pSwrCtx, (void **)&is->pResampledOut, 0,
                                        is->resample_size,
                                        (void **)resample_input_bytes, 0, inframe->nb_samples);
#else
    //AVResample NEW
    resample_nblen = avresample_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                        0, is->resample_size,
                                        (uint8_t **)resample_input_bytes, 0, inframe->nb_samples);
#endif

#else
    // SWResample
    resample_nblen = swr_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                 is->resample_size,
                                 (const uint8_t **)resample_input_bytes, inframe->nb_samples);
#endif

    resample_long_bytes = av_samples_get_buffer_size(NULL, 2, resample_nblen,
                         AV_SAMPLE_FMT_S16, 1);

    if (resample_nblen < 0) {
        fprintf(stderr, "reSample to another sample format failed!\n");
        return -1;
    }

    return resample_long_bytes;

#else
    return -1;
#endif
}
コード例 #21
0
ファイル: muxer.c プロジェクト: cgutman/AndCast
int submitAudioFrame(char *data, int length, long frameTimestamp) {
    AVPacket pkt;
    int ret;
    AVFrame *frame;
    int got_packet;
    int sampleCount;

    // If no stream header has been written yet, don't do anything
    if (hasWrittenStreamHeader == 0) {
        return 0;
    }

    frame = avcodec_alloc_frame();
    if (frame == NULL) {
        fprintf(stderr, "Failed to allocate frame\n");
        return -1;
    }

    av_init_packet(&pkt);

    // Copy our data in
    memcpy(srcSamplesData[0], data, length);
    srcSamplesCount = length / 4;

    // Resample to floating point
    sampleCount = av_rescale_rnd(swr_get_delay(swrContext, audioCodecCtx->sample_rate) + srcSamplesCount,
        audioCodecCtx->sample_rate, audioCodecCtx->sample_rate, AV_ROUND_UP);
    if (sampleCount > maxDstSamplesCount) {
        // Need to resize the buffer
        av_free(dstSamplesData[0]);

        ret = av_samples_alloc(dstSamplesData, &dstSamplesLinesize, audioCodecCtx->channels,
            sampleCount, audioCodecCtx->sample_fmt, 0);
        if (ret < 0) {
            fprintf(stderr, "av_samples_alloc() failed: %d\n", ret);
            return ret;
        }

        maxDstSamplesCount = sampleCount;
        dstSamplesSize = av_samples_get_buffer_size(NULL, audioCodecCtx->channels, sampleCount,
            audioCodecCtx->sample_fmt, 0);
    }

    ret = swr_convert(swrContext, dstSamplesData, sampleCount,
        (const unsigned char **)srcSamplesData, srcSamplesCount);
    if (ret < 0) {
        fprintf(stderr, "swr_convert() failed: %d\n", ret);
        return ret;
    }

    frame->nb_samples = sampleCount;
    ret = avcodec_fill_audio_frame(frame, audioCodecCtx->channels,
        audioCodecCtx->sample_fmt, dstSamplesData[0], dstSamplesSize, 0);
    if (ret < 0) {
        fprintf(stderr, "avcodec_fill_audio_frame() failed: %d\n", ret);
        avcodec_free_frame(&frame);
                    return ret;
    }

    // pkt is freed on failure or !got_packet
    pkt.data = NULL;
    pkt.size = 0;
    ret = avcodec_encode_audio2(audioCodecCtx, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "avcodec_encode_audio2() failed: %d\n", ret);
        avcodec_free_frame(&frame);
                    return ret;

    }

    if (!got_packet) {
        // Nothing to do here
        return 0;
    }

    pkt.stream_index = audioStream->index;

    pkt.pts = frameTimestamp;

    pthread_mutex_lock(&streamLock);
    ret = av_interleaved_write_frame(formatContext, &pkt);
    pthread_mutex_unlock(&streamLock);

    avcodec_free_frame(&frame);
    av_free_packet(&pkt);

    if (ret != 0) {
        fprintf(stderr, "av_interleaved_write_frame() failed: %d\n", ret);
                    return ret;

    }

    return 0;
}
コード例 #22
0
ファイル: resampling_audio.c プロジェクト: izacus/FFmpeg
int main(int argc, char **argv)
{
    int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
    int src_rate = 48000, dst_rate = 44100;
    uint8_t **src_data = NULL, **dst_data = NULL;
    int src_nb_channels = 0, dst_nb_channels = 0;
    int src_linesize, dst_linesize;
    int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
    enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
    const char *dst_filename = NULL;
    FILE *dst_file;
    int dst_bufsize;
    const char *fmt;
    struct SwrContext *swr_ctx;
    double t;
    int ret;

    if (argc != 2) {
        fprintf(stderr, "Usage: %s output_file\n"
                "API example program to show how to resample an audio stream with libswresample.\n"
                "This program generates a series of audio frames, resamples them to a specified "
                "output format and rate and saves them to an output file named output_file.\n",
            argv[0]);
        exit(1);
    }
    dst_filename = argv[1];

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
        fprintf(stderr, "Could not open destination file %s\n", dst_filename);
        exit(1);
    }

    /* create resampler context */
    swr_ctx = swr_alloc();
    if (!swr_ctx) {
        fprintf(stderr, "Could not allocate resampler context\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* set options */
    av_opt_set_int(swr_ctx, "in_channel_layout",    src_ch_layout, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate",       src_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout",    dst_ch_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate",       dst_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);

    /* initialize the resampling context */
    if ((ret = swr_init(swr_ctx)) < 0) {
        fprintf(stderr, "Failed to initialize the resampling context\n");
        goto end;
    }

    /* allocate source and destination samples buffers */

    src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
    ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels,
                                       src_nb_samples, src_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate source samples\n");
        goto end;
    }

    /* compute the number of converted samples: buffering is avoided
     * ensuring that the output buffer will contain at least all the
     * converted input samples */
    max_dst_nb_samples = dst_nb_samples =
        av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);

    /* buffer is going to be directly written to a rawaudio file, no alignment */
    dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
    ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels,
                                       dst_nb_samples, dst_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate destination samples\n");
        goto end;
    }

    t = 0;
    do {
        /* generate synthetic audio */
        fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);

        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
                                        src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
        if (dst_nb_samples > max_dst_nb_samples) {
            av_free(dst_data[0]);
            ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
                                   dst_nb_samples, dst_sample_fmt, 1);
            if (ret < 0)
                break;
            max_dst_nb_samples = dst_nb_samples;
        }

        /* convert to destination format */
        ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            goto end;
        }
        dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
                                                 ret, dst_sample_fmt, 1);
        printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
        fwrite(dst_data[0], 1, dst_bufsize, dst_file);
    } while (t < 10);

    if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt) < 0))
        goto end;
    fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
            "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
            fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);

end:
    if (dst_file)
        fclose(dst_file);

    if (src_data)
        av_freep(&src_data[0]);
    av_freep(&src_data);

    if (dst_data)
        av_freep(&dst_data[0]);
    av_freep(&dst_data);

    swr_free(&swr_ctx);
    return ret < 0;
}
コード例 #23
0
int udpsocket::ts_demux(void)
{
    AVCodec *pVideoCodec[VIDEO_NUM];
    AVCodec *pAudioCodec[AUDIO_NUM];
    AVCodecContext *pVideoCodecCtx[VIDEO_NUM];
    AVCodecContext *pAudioCodecCtx[AUDIO_NUM];
    AVIOContext * pb;
    AVInputFormat *piFmt;
    AVFormatContext *pFmt;
    uint8_t *buffer;
    int videoindex[VIDEO_NUM];
    int audioindex[AUDIO_NUM];
    AVStream *pVst[VIDEO_NUM];
    AVStream *pAst[AUDIO_NUM];
    AVFrame *pVideoframe[VIDEO_NUM];
    AVFrame *pAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframelast[AUDIO_NUM];
    AVPacket pkt;
    int got_picture;
    int video_num[VIDEO_NUM];
    int audio_num[AUDIO_NUM];
    int frame_size;

    //transcodepool
    transcodepool*  pVideoTransPool[VIDEO_NUM];
    transcodepool*  pAudioTransPool[AUDIO_NUM];

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVideoCodec[i] = NULL;
        pVideoCodecCtx[i] =NULL;
        videoindex[i] = -1;
        pVst[i] = NULL;
        video_num[i] = 0;
        pVideoframe[i] = NULL;
        pVideoframe[i] = av_frame_alloc();
        pVideoTransPool[i] = NULL;
    }
    for( int i=0; i<AUDIO_NUM; i++ ){
        pAudioCodec[i] = NULL;
        pAudioCodecCtx[i] = NULL;
        audioindex[i] = -1;
        pAst[i] = NULL;
        audio_num[i] = 0;
        pOutAudioframe[i] = NULL;
        pOutAudioframe[i] = av_frame_alloc();
        pOutAudioframelast[i] = NULL;
        pOutAudioframelast[i] = av_frame_alloc();
        pAudioframe[i] = NULL;
        pAudioframe[i] = av_frame_alloc();
        pAudioTransPool[i] = NULL;
    }
    pb = NULL;
    piFmt = NULL;
    pFmt = NULL;
    buffer = (uint8_t*)av_mallocz(sizeof(uint8_t)*BUFFER_SIZE);
    got_picture = 0;
    frame_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;

    //encoder
    AVFormatContext *ofmt_ctx = NULL;
    AVPacket enc_pkt;
    AVStream *out_stream;
    AVCodecContext *enc_ctx;
    AVCodec *encoder;

    AVFormatContext *outAudioFormatCtx[AUDIO_NUM];
    AVPacket audio_pkt;
    AVStream *audio_stream[AUDIO_NUM];
    AVCodecContext *AudioEncodeCtx[AUDIO_NUM];
    AVCodec *AudioEncoder[AUDIO_NUM];

    fp_v = fopen("OUT.h264","wb+"); //输出文件
    fp_a = fopen("audio_out.aac","wb+");

    //FFMPEG
    av_register_all();
    pb = avio_alloc_context(buffer, 4096, 0, NULL, read_data, NULL, NULL);
//    printf("thread %d pid %lu tid %lu\n",index,(unsigned long)getpid(),(unsigned long)pthread_self());
    if (!pb) {
        fprintf(stderr, "avio alloc failed!\n");
        return -1;
    }

    int x = av_probe_input_buffer(pb, &piFmt, "", NULL, 0, 0);
    if (x < 0) {
        printf("probe error: %d",x);
       // fprintf(stderr, "probe failed!\n");
    } else {
        fprintf(stdout, "probe success!\n");
        fprintf(stdout, "format: %s[%s]\n", piFmt->name, piFmt->long_name);
    }
    pFmt = avformat_alloc_context();
    pFmt->pb = pb;

    if (avformat_open_input(&pFmt, "", piFmt, NULL) < 0) {
        fprintf(stderr, "avformat open failed.\n");
        return -1;
    } else {
        fprintf(stdout, "open stream success!\n");
    }
    //pFmt->probesize = 4096 * 2000;
    //pFmt->max_analyze_duration = 5 * AV_TIME_BASE;
    //pFmt->probesize = 2048;
   // pFmt->max_analyze_duration = 1000;
    pFmt->probesize = 2048 * 1000 ;
    pFmt->max_analyze_duration = 2048 * 1000;
    if (avformat_find_stream_info(pFmt,0) < 0) {
        fprintf(stderr, "could not fine stream.\n");
        return -1;
    }
    printf("dump format\n");
    av_dump_format(pFmt, 0, "", 0);

    int videox = 0,audiox = 0;
    for (int i = 0; i < pFmt->nb_streams; i++) {
        if(videox == 7 && audiox == 7)
            break;
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videox < 7 ) {
            videoindex[ videox++ ] = i;
        }
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audiox < 7 ) {
            audioindex[ audiox++ ] = i;
        }
    }

    for(int i=0; i<VIDEO_NUM; i++)
        printf("videoindex %d = %d, audioindex %d = %d\n",i , videoindex[i], i ,audioindex[i]);

    if (videoindex[6] < 0 || audioindex[6] < 0) {
        fprintf(stderr, "videoindex=%d, audioindex=%d\n", videoindex[6], audioindex[6]);
        return -1;
    }

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVst[i] = pFmt->streams[videoindex[i]];
        pVideoCodecCtx[i] = pVst[i]->codec;
        pVideoCodec[i] = avcodec_find_decoder(pVideoCodecCtx[i]->codec_id);
        if (!pVideoCodec[i]) {
            fprintf(stderr, "could not find video decoder!\n");
            return -1;
        }
        if (avcodec_open2(pVideoCodecCtx[i], pVideoCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open video codec!\n");
            return -1;
        }
    }

    for( int i=0; i<AUDIO_NUM; i++ ){
        pAst[i] = pFmt->streams[audioindex[i]];
        pAudioCodecCtx[i] = pAst[i]->codec;
        pAudioCodec[i] = avcodec_find_decoder(pAudioCodecCtx[i]->codec_id);
        if (!pAudioCodec[i]) {
            fprintf(stderr, "could not find audio decoder!\n");
            return -1;
        }
        if (avcodec_open2(pAudioCodecCtx[i], pAudioCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open audio codec!\n");
            return -1;
        }
    }

    //video encoder init
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "h264", NULL);
    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(1024*1000);
    AVIOContext *avio_out = NULL;
    avio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
    out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(!out_stream){
        av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
        return -1;
    }
    enc_ctx = out_stream->codec;
    encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    enc_ctx->height = pVideoCodecCtx[0]->height;
    enc_ctx->width = pVideoCodecCtx[0]->width;
    enc_ctx->sample_aspect_ratio = pVideoCodecCtx[0]->sample_aspect_ratio;
    enc_ctx->pix_fmt = encoder->pix_fmts[0];
    out_stream->time_base = pVst[0]->time_base;
//    out_stream->time_base.num = 1;
//    out_stream->time_base.den = 25;
    enc_ctx->me_range = 16;
    enc_ctx->max_qdiff = 4;
    enc_ctx->qmin = 25;
    enc_ctx->qmax = 40;
    enc_ctx->qcompress = 0.6;
    enc_ctx->refs = 3;
    enc_ctx->bit_rate = 1000000;
    int re = avcodec_open2(enc_ctx, encoder, NULL);
    if (re < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream \n");
        return re;
    }

    if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    re = avformat_write_header(ofmt_ctx, NULL);
    if(re < 0){
        av_log(NULL, AV_LOG_ERROR, "Error occured when opening output file\n");
        return re;
    }

    //audio encoder
    for( int i=0; i<AUDIO_NUM; i++){
        outAudioFormatCtx[i] = NULL;
//        audio_pkt = NULL;
        audio_stream[i] = NULL;
        AudioEncodeCtx[i] = NULL;
        AudioEncoder[i] = NULL;
    }
    const char* out_audio_file = "transcodeaudio.aac";          //Output URL

    //Method 1.
    outAudioFormatCtx[0] = avformat_alloc_context();
    outAudioFormatCtx[0]->oformat = av_guess_format(NULL, out_audio_file, NULL);
    AVIOContext *avio_audio_out = NULL;
    avio_audio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_audio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    outAudioFormatCtx[0]->pb = avio_audio_out;
    //Method 2.
    //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    //fmt = pFormatCtx->oformat;

    //Open output URL
    if (avio_open(&outAudioFormatCtx[0]->pb,out_audio_file, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file!\n");
        return -1;
    }

    //Show some information
    av_dump_format(outAudioFormatCtx[0], 0, out_audio_file, 1);

    AudioEncoder[0] = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!AudioEncoder[0]){
        printf("Can not find encoder!\n");
        return -1;
    }
    audio_stream[0] = avformat_new_stream(outAudioFormatCtx[0], AudioEncoder[0]);
    if (audio_stream[0]==NULL){
        return -1;
    }
    AudioEncodeCtx[0] = audio_stream[0]->codec;
    AudioEncodeCtx[0]->codec_id =  outAudioFormatCtx[0]->oformat->audio_codec;
    AudioEncodeCtx[0]->codec_type = AVMEDIA_TYPE_AUDIO;
    AudioEncodeCtx[0]->sample_fmt = AV_SAMPLE_FMT_S16;
    AudioEncodeCtx[0]->sample_rate= 48000;//44100
    AudioEncodeCtx[0]->channel_layout=AV_CH_LAYOUT_STEREO;
    AudioEncodeCtx[0]->channels = av_get_channel_layout_nb_channels(AudioEncodeCtx[0]->channel_layout);
    AudioEncodeCtx[0]->bit_rate = 64000;//64000
    /** Allow the use of the experimental AAC encoder */
    AudioEncodeCtx[0]->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

    /** Set the sample rate for the container. */
    audio_stream[0]->time_base.den = pAudioCodecCtx[0]->sample_rate;
    audio_stream[0]->time_base.num = 1;

    if (avcodec_open2(AudioEncodeCtx[0], AudioEncoder[0],NULL) < 0){
        printf("Failed to open encoder!\n");
        return -1;
    }

    av_samples_get_buffer_size(NULL, AudioEncodeCtx[0]->channels,AudioEncodeCtx[0]->frame_size,AudioEncodeCtx[0]->sample_fmt, 1);

    //uint8_t samples[AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2];
    av_init_packet(&pkt);
    av_init_packet(&audio_pkt);
    av_init_packet(&enc_pkt);
    AVAudioFifo *af = NULL;
    SwrContext *resample_context = NULL;
    long long pts = 0;
    /** Initialize the resampler to be able to convert audio sample formats. */
//    if (init_resampler(input_codec_context, output_codec_context,
//                       &resample_context))
    for(int i=0; i<1; i++){
        printf("work \n");
        printf(" samplerate input = %d , samplerate output = %d\n",pAudioCodecCtx[i]->sample_rate, AudioEncodeCtx[i]->sample_rate);
        resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(AudioEncodeCtx[i]->channels),
                                                          AudioEncodeCtx[i]->sample_fmt,
                                                          AudioEncodeCtx[i]->sample_rate,
                                                          av_get_default_channel_layout(pAudioCodecCtx[i]->channels),
                                                          pAudioCodecCtx[i]->sample_fmt,
                                                          pAudioCodecCtx[i]->sample_rate,
                                                          0, NULL);
        swr_init(resample_context);
    }
    af = av_audio_fifo_alloc(AudioEncodeCtx[0]->sample_fmt, AudioEncodeCtx[0]->channels, 1);
    if(af == NULL)
    {
        printf("error af \n");
        return -1;
    }

    while(1) {
        if (av_read_frame(pFmt, &pkt) >= 0) {
            for( int i=0; i<1; i++ ){
                if (pkt.stream_index == videoindex[i]) {
//                    av_frame_free(&pframe);
                    avcodec_decode_video2(pVideoCodecCtx[i], pVideoframe[i], &got_picture, &pkt);
                    if (got_picture) {
                        if(videoindex[i] == 0){
//                            m_tsRecvPool->write_buffer(pkt.data, pkt.size);
                            pVideoframe[i]->pts = av_frame_get_best_effort_timestamp(pVideoframe[i]);
                            pVideoframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
//                            printf("videoframesize0 = %d, size1 = %d, size2 = %d, size3 = %d, size4 = %d,format = %d\n",pVideoframe[i]->linesize[0],
//                                    pVideoframe[i]->linesize[1],pVideoframe[i]->linesize[2],pVideoframe[i]->linesize[3],pVideoframe[i]->linesize[4],pVideoframe[i]->format);
//                            pVideoTransPool[i]->PutFrame( pVideoframe[i] ,i);
                            int enc_got_frame = 0;
                            /*  ffmpeg encoder */
                            enc_pkt.data = NULL;
                            enc_pkt.size = 0;
                            av_init_packet(&enc_pkt);
                            re = avcodec_encode_video2(ofmt_ctx->streams[videoindex[i]]->codec, &enc_pkt,
                                    pVideoframe[i], &enc_got_frame);
//                            printf("enc_got_frame =%d, re = %d \n",enc_got_frame, re);
                            printf("video Encode 1 Packet\tsize:%d\tpts:%lld\n",enc_pkt.size,enc_pkt.pts);
                            /* prepare packet for muxing */
//                            fwrite(enc_pkt.data,enc_pkt.size, 1, fp_v);
                        }
//                        printf(" video %d decode %d num\n", i, video_num[i]++);
                        break;
                    }

                 }else if (pkt.stream_index == audioindex[i]) {
                    if (avcodec_decode_audio4(pAudioCodecCtx[i], pAudioframe[i], &frame_size, &pkt) >= 0) {
                        if (i == 0){

//                                fwrite(pAudioframe[i]->data[0],pAudioframe[i]->linesize[0], 1, fp_a);
//                                printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                            uint8_t *converted_input_samples = NULL;
                            converted_input_samples = (uint8_t *)calloc(AudioEncodeCtx[i]->channels, sizeof(*converted_input_samples));
                            av_samples_alloc(&converted_input_samples, NULL, AudioEncodeCtx[i]->channels, pAudioframe[i]->nb_samples, AudioEncodeCtx[i]->sample_fmt, 0);
                                        int error = 0;
                            if((error = swr_convert(resample_context, &converted_input_samples, pAudioframe[i]->nb_samples,
                                                   (const uint8_t**)pAudioframe[i]->extended_data, pAudioframe[i]->nb_samples))<0){
                                printf("error  : %d\n",error);
                            }
//                            av_audio_fifo_realloc(af, av_audio_fifo_size(af) + pAudioframe[i]->nb_samples);
                            av_audio_fifo_write(af, (void **)&converted_input_samples, pAudioframe[i]->nb_samples);
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                            pAudioframe[i]->data[0] = frame_buf;
//                            init_converted_samples(&converted_input_samples, output_codec_context, pAudioframe[i]->nb_samples);

                            /** Initialize temporary storage for one output frame. */
//                            printf("pkt.size = %d , pkt.pts = %d ,pkt.dts = %d\n",pkt.size, pkt.pts, pkt.dts);
//                            printf("framesize = %d, audioframesize = %d\n", pAudioframe[i]->nb_samples, frame_size);

//                            pOutAudioframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
                            int got_frame=0;
                            //Encode
//                            av_init_packet(&audio_pkt);
//                            audio_pkt.data = NULL;
//                            audio_pkt.size = 0;
//                            avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
//                            printf("Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                            while(av_audio_fifo_size(af) >= AudioEncodeCtx[i]->frame_size){
                                int frame_size = FFMIN(av_audio_fifo_size(af),AudioEncodeCtx[0]->frame_size);
                                pOutAudioframe[i]->nb_samples =  frame_size;
                                pOutAudioframe[i]->channel_layout = AudioEncodeCtx[0]->channel_layout;
                                pOutAudioframe[i]->sample_rate = AudioEncodeCtx[0]->sample_rate;
                                pOutAudioframe[i]->format = AudioEncodeCtx[0]->sample_fmt;

                                av_frame_get_buffer(pOutAudioframe[i], 0);
                                av_audio_fifo_read(af, (void **)&pOutAudioframe[i]->data, frame_size);

                                pOutAudioframe[i]->pts=pts;
                                pts += pOutAudioframe[i]->nb_samples;

                                audio_pkt.data = NULL;
                                audio_pkt.size = 0;
                                av_init_packet(&audio_pkt);
                                avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
                                printf("audio Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                                fwrite(audio_pkt.data,audio_pkt.size, 1, fp_a);
                            }
                        }
//                        if(i == 0){
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                        }
//                        printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                        break;
                    }
                }
            }
            av_free_packet(&pkt);
            av_free_packet(&enc_pkt);
        }
    }

    av_free(buffer);
    for(int i=0; i<VIDEO_NUM; i++)
        av_free(pVideoframe[i]);

    for(int i=0; i<AUDIO_NUM; i++)
        av_free(pAudioframe[i]);

    return 0;

}
コード例 #24
0
ファイル: af_aconvert.c プロジェクト: Pockets/FFmpeg-MVC
static int init_buffers(AVFilterLink *inlink, int nb_samples)
{
    AConvertContext *aconvert = inlink->dst->priv;
    AVFilterLink * const outlink = inlink->dst->outputs[0];
    int i, packed_stride = 0;
    const unsigned
        packing_conv = inlink->planar != outlink->planar &&
                       aconvert->out_nb_channels != 1,
        format_conv  = inlink->format != outlink->format;
    int nb_channels  = aconvert->out_nb_channels;

    uninit(inlink->dst);
    aconvert->max_nb_samples = nb_samples;

    if (aconvert->convert_chlayout) {
        /* allocate buffer for storing intermediary mixing samplesref */
        uint8_t *data[8];
        int linesize[8];
        int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);

        if (av_samples_alloc(data, linesize, nb_channels, nb_samples,
                             inlink->format, inlink->planar, 16) < 0)
            goto fail_no_mem;
        aconvert->mix_samplesref =
            avfilter_get_audio_buffer_ref_from_arrays(data, linesize, AV_PERM_WRITE,
                                                      nb_samples, inlink->format,
                                                      outlink->channel_layout,
                                                      inlink->planar);
        if (!aconvert->mix_samplesref)
            goto fail_no_mem;
    }

    // if there's a format/packing conversion we need an audio_convert context
    if (format_conv || packing_conv) {
        aconvert->out_samplesref =
            avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        if (!aconvert->out_samplesref)
            goto fail_no_mem;

        aconvert->in_strides [0] = av_get_bytes_per_sample(inlink ->format);
        aconvert->out_strides[0] = av_get_bytes_per_sample(outlink->format);

        aconvert->out_conv = aconvert->out_samplesref->data;
        if (aconvert->mix_samplesref)
            aconvert->in_conv = aconvert->mix_samplesref->data;

        if (packing_conv) {
            // packed -> planar
            if (outlink->planar == AVFILTER_PLANAR) {
                if (aconvert->mix_samplesref)
                    aconvert->packed_data[0] = aconvert->mix_samplesref->data[0];
                aconvert->in_conv         = aconvert->packed_data;
                packed_stride             = aconvert->in_strides[0];
                aconvert->in_strides[0]  *= nb_channels;
            // planar -> packed
            } else {
                aconvert->packed_data[0]  = aconvert->out_samplesref->data[0];
                aconvert->out_conv        = aconvert->packed_data;
                packed_stride             = aconvert->out_strides[0];
                aconvert->out_strides[0] *= nb_channels;
            }
        } else if (outlink->planar == AVFILTER_PACKED) {
            /* If there's no packing conversion, and the stream is packed
             * then we treat the entire stream as one big channel
             */
            nb_channels = 1;
        }

        for (i = 1; i < nb_channels; i++) {
            aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
            aconvert->in_strides[i]  = aconvert->in_strides[0];
            aconvert->out_strides[i] = aconvert->out_strides[0];
        }

        aconvert->audioconvert_ctx =
                av_audio_convert_alloc(outlink->format, nb_channels,
                                       inlink->format,  nb_channels, NULL, 0);
        if (!aconvert->audioconvert_ctx)
            goto fail_no_mem;
    }

    return 0;

fail_no_mem:
    av_log(inlink->dst, AV_LOG_ERROR, "Could not allocate memory.\n");
    return AVERROR(ENOMEM);
}
コード例 #25
0
ファイル: spatialexjni.cpp プロジェクト: brygid/sapDroid
int procAudioResampling(AVCodecContext * audio_dec_ctx,
	AVFrame * pAudioDecodeFrame,
	int out_sample_fmt,
	int out_channels,
	int out_sample_rate,
	uint8_t * audio_chunk)
{
	SwrContext * swr_ctx = NULL;
	int data_size = 0;
	int ret = 0;
	int64_t src_ch_layout = audio_dec_ctx->channel_layout;
	int64_t dst_ch_layout = AV_CH_LAYOUT_STEREO;
	int dst_nb_channels = 0;
	int dst_linesize = 0;
	int src_nb_samples = 0;
	int dst_nb_samples = 0;
	int max_dst_nb_samples = 0;
	uint8_t **dst_data = NULL;
	int resampled_data_size = 0;

	swr_ctx = swr_alloc();
	if (!swr_ctx)
	{
		LOGD("swr_alloc error \n");
		return -1;
	}

	src_ch_layout = (audio_dec_ctx->channels ==
		av_get_channel_layout_nb_channels(audio_dec_ctx->channel_layout)) ?
		audio_dec_ctx->channel_layout :
		av_get_default_channel_layout(audio_dec_ctx->channels);

	if (out_channels == 1)
	{
		dst_ch_layout = AV_CH_LAYOUT_MONO;
		//LOGD("dst_ch_layout: AV_CH_LAYOUT_MONO\n");
	}
	else if (out_channels == 2)
	{
		dst_ch_layout = AV_CH_LAYOUT_STEREO;
		//LOGD("dst_ch_layout: AV_CH_LAYOUT_STEREO\n");
	}
	else
	{
		dst_ch_layout = AV_CH_LAYOUT_SURROUND;
		//LOGD("dst_ch_layout: AV_CH_LAYOUT_SURROUND\n");
	}

	if (src_ch_layout <= 0)
	{
		LOGD("src_ch_layout error \n");
		return -1;
	}

	src_nb_samples = pAudioDecodeFrame->nb_samples;
	if (src_nb_samples <= 0)
	{
		LOGD("src_nb_samples error \n");
		return -1;
	}

	av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
	av_opt_set_int(swr_ctx, "in_sample_rate", audio_dec_ctx->sample_rate, 0);
	av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", audio_dec_ctx->sample_fmt, 0);

	av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
	av_opt_set_int(swr_ctx, "out_sample_rate", out_sample_rate, 0);
	av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", (AVSampleFormat)out_sample_fmt, 0);

	if ((ret = swr_init(swr_ctx)) < 0) {
		LOGD("Failed to initialize the resampling context\n");
		return -1;
	}

	max_dst_nb_samples = dst_nb_samples = av_rescale_rnd(src_nb_samples,
		out_sample_rate, audio_dec_ctx->sample_rate, AV_ROUND_UP);
	if (max_dst_nb_samples <= 0)
	{
		LOGD("av_rescale_rnd error \n");
		return -1;
	}

	dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
	ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
		dst_nb_samples, (AVSampleFormat)out_sample_fmt, 0);
	if (ret < 0)
	{
		LOGD("av_samples_alloc_array_and_samples error \n");
		return -1;
	}


	dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, audio_dec_ctx->sample_rate) +
		src_nb_samples, out_sample_rate, audio_dec_ctx->sample_rate, AV_ROUND_UP);
	if (dst_nb_samples <= 0)
	{
		LOGD("av_rescale_rnd error \n");
		return -1;
	}
	if (dst_nb_samples > max_dst_nb_samples)
	{
		av_free(dst_data[0]);
		ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
			dst_nb_samples, (AVSampleFormat)out_sample_fmt, 1);
		max_dst_nb_samples = dst_nb_samples;
	}

	if (swr_ctx)
	{
		ret = swr_convert(swr_ctx, dst_data, dst_nb_samples,
			(const uint8_t **)pAudioDecodeFrame->data, pAudioDecodeFrame->nb_samples);
		if (ret < 0)
		{
			LOGD("swr_convert error \n");
			return -1;
		}

		resampled_data_size = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
			ret, (AVSampleFormat)out_sample_fmt, 1);
		if (resampled_data_size < 0)
		{
			LOGD("av_samples_get_buffer_size error \n");
			return -1;
		}
	}
	else
	{
		LOGD("swr_ctx null error \n");
		return -1;
	}

	//LOGD("resampled_data_size:%d",resampled_data_size);
	memcpy(audio_chunk, dst_data[0], resampled_data_size);

	if (dst_data)
	{
		av_freep(&dst_data[0]);
	}
	av_freep(&dst_data);
	dst_data = NULL;

	if (swr_ctx)
	{
		swr_free(&swr_ctx);
	}
	return resampled_data_size;
}
コード例 #26
0
ファイル: main_demux.cpp プロジェクト: CheckLiu/pi
static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;

    if (pkt.stream_index == video_stream_idx) {
        /* decode video frame */
        ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding video frame\n");
            return ret;
        }

        if (*got_frame) {
            printf("video_frame%s n:%d coded_n:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   video_frame_count++, frame->coded_picture_number,
                   av_ts2timestr(frame->pts, &video_dec_ctx->time_base));

            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);

            /* write to rawvideo file */
            fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
        }
    } else if (pkt.stream_index == audio_stream_idx) {
        /* decode audio frame */
        ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding audio frame\n");
            return ret;
        }

        if (*got_frame) {
            printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   audio_frame_count++, frame->nb_samples,
                   av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));

            ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels,
                                   frame->nb_samples, frame->format, 1);
            if (ret < 0) {
                fprintf(stderr, "Could not allocate audio buffer\n");
                return AVERROR(ENOMEM);
            }

            /* TODO: extend return code of the av_samples_* functions so that this call is not needed */
            audio_dst_bufsize =
                av_samples_get_buffer_size(NULL, frame->channels,
                                           frame->nb_samples, frame->format, 1);

            /* copy audio data to destination buffer:
             * this is required since rawaudio expects non aligned data */
            av_samples_copy(audio_dst_data, frame->data, 0, 0,
                            frame->nb_samples, frame->channels, frame->format);

            /* write to rawaudio file */
            fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
            av_freep(&audio_dst_data[0]);
        }
    }

    return ret;
}
コード例 #27
0
int audioResampling(AVCodecContext *audio_dec_ctx, AVFrame *pAudioDecodeFrame,
                    int out_sample_fmt, int out_channels, int out_sample_rate, char *out_buf) {
    __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream12");
    SwrContext *swr_ctx = NULL;
    int data_size = 0;
    int ret = 0;
    int64_t src_ch_layout = AV_CH_LAYOUT_STEREO; //初始化这样根据不同文件做调整
    int64_t dst_ch_layout = AV_CH_LAYOUT_STEREO; //这里设定ok
    int dst_nb_channels = 0;
    int dst_linesize = 0;
    int src_nb_samples = 0;
    int dst_nb_samples = 0;
    int max_dst_nb_samples = 0;
    uint8_t **dst_data = NULL;
    int resampled_data_size = 0;
    //重新采样
    if (swr_ctx) {
        swr_free(&swr_ctx);
    }
    swr_ctx = swr_alloc();
    __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream12-1");
    if (!swr_ctx) {
        printf("swr_alloc error \n");
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream13");
        return -1;
    }

    src_ch_layout = (audio_dec_ctx->channel_layout &&
                     audio_dec_ctx->channels ==
                     av_get_channel_layout_nb_channels(audio_dec_ctx->channel_layout)) ?
                    audio_dec_ctx->channel_layout :
                    av_get_default_channel_layout(audio_dec_ctx->channels);
    if (out_channels == 1) {
        dst_ch_layout = AV_CH_LAYOUT_MONO;
    }
    else if (out_channels == 2) {
        dst_ch_layout = AV_CH_LAYOUT_STEREO;
    }
    else {
        //可扩展
    }
    __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream12-2");
    if (src_ch_layout <= 0) {
        printf("src_ch_layout error \n");
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream14");
        return -1;
    }
    __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream12-3");
    src_nb_samples = pAudioDecodeFrame->nb_samples;
    if (src_nb_samples <= 0) {
        printf("src_nb_samples error \n");
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream15");
        return -1;
    }

    /* set options */
    av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate", audio_dec_ctx->sample_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", audio_dec_ctx->sample_fmt, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate", out_sample_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", (AVSampleFormat) out_sample_fmt, 0);
    swr_init(swr_ctx);
    max_dst_nb_samples = dst_nb_samples =
            av_rescale_rnd(src_nb_samples, out_sample_rate, audio_dec_ctx->sample_rate,
                           AV_ROUND_UP);
    if (max_dst_nb_samples <= 0) {
        printf("av_rescale_rnd error \n");
        return -1;
    }

    dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
    ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
                                             dst_nb_samples, (AVSampleFormat) out_sample_fmt, 0);
    if (ret < 0) {
        printf("av_samples_alloc_array_and_samples error \n");
        return -1;
    }


    dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, audio_dec_ctx->sample_rate) +
                                    src_nb_samples, out_sample_rate, audio_dec_ctx->sample_rate,
                                    AV_ROUND_UP);
    if (dst_nb_samples <= 0) {
        return -1;
    }
    if (dst_nb_samples > max_dst_nb_samples) {
        av_free(dst_data[0]);
        ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
                               dst_nb_samples, (AVSampleFormat) out_sample_fmt, 1);
        max_dst_nb_samples = dst_nb_samples;
    }
    data_size = av_samples_get_buffer_size(NULL, audio_dec_ctx->channels,
                                           pAudioDecodeFrame->nb_samples,
                                           audio_dec_ctx->sample_fmt, 1);
    if (data_size <= 0) {
        return -1;
    }
    resampled_data_size = data_size;
    if (swr_ctx) {
        ret = swr_convert(swr_ctx, dst_data, dst_nb_samples,
                          (const uint8_t **) pAudioDecodeFrame->data,
                          pAudioDecodeFrame->nb_samples);
        if (ret <= 0) {
            return -1;
        }

        resampled_data_size = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
                                                         ret, (AVSampleFormat) out_sample_fmt, 1);
        if (resampled_data_size <= 0) {
            return -1;
        }
    }
    else {
        return -1;
    }
    //将值返回去
    memcpy(out_buf, dst_data[0], resampled_data_size);
    if (dst_data) {
        av_freep(&dst_data[0]);
    }
    av_freep(&dst_data);
    dst_data = NULL;

    if (swr_ctx) {
        swr_free(&swr_ctx);
    }
    return resampled_data_size;
}
コード例 #28
0
Chroma::Result Chroma::operator() (const QString& filename)
{
    std::shared_ptr<AVFormatContext> formatCtx;
    {
        AVFormatContext *formatCtxRaw = nullptr;
        if (avformat_open_input (&formatCtxRaw, filename.toLatin1 ().constData (), nullptr, nullptr))
            throw std::runtime_error ("error opening file");

        formatCtx.reset (formatCtxRaw,
        [] (AVFormatContext *ctx) {
            avformat_close_input (&ctx);
        });
    }

    {
        QMutexLocker locker (&CodecMutex_);
        if (avformat_find_stream_info (formatCtx.get (), nullptr) < 0)
            throw std::runtime_error ("could not find stream");
    }

    AVCodec *codec = nullptr;
    const auto streamIndex = av_find_best_stream (formatCtx.get (), AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
    if (streamIndex < 0)
        throw std::runtime_error ("could not find audio stream");

    auto stream = formatCtx->streams [streamIndex];

    bool codecOpened = false;

    std::shared_ptr<AVCodecContext> codecCtx (stream->codec,
    [&codecOpened] (AVCodecContext *ctx) {
        if (codecOpened) avcodec_close (ctx);
    });
    {
        QMutexLocker locker (&CodecMutex_);
        if (avcodec_open2 (codecCtx.get (), codec, nullptr) < 0)
            throw std::runtime_error ("couldn't open the codec");
    }
    codecOpened = true;

    if (codecCtx->channels <= 0)
        throw std::runtime_error ("no channels found");

    std::shared_ptr<SwrContext> swr;
    if (codecCtx->sample_fmt != AV_SAMPLE_FMT_S16)
    {
        swr.reset (swr_alloc (), [] (SwrContext *ctx) {
            if (ctx) swr_free (&ctx);
        });
        av_opt_set_int (swr.get (), "in_channel_layout", codecCtx->channel_layout, 0);
        av_opt_set_int (swr.get (), "out_channel_layout", codecCtx->channel_layout,  0);
        av_opt_set_int (swr.get (), "in_sample_rate", codecCtx->sample_rate, 0);
        av_opt_set_int (swr.get (), "out_sample_rate", codecCtx->sample_rate, 0);
        av_opt_set_sample_fmt (swr.get (), "in_sample_fmt", codecCtx->sample_fmt, 0);
        av_opt_set_sample_fmt (swr.get (), "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        swr_init (swr.get ());
    }

    AVPacket packet;
    av_init_packet (&packet);

    const int maxLength = 120;
    auto remaining = maxLength * codecCtx->channels * codecCtx->sample_rate;
    chromaprint_start (Ctx_, codecCtx->sample_rate, codecCtx->channels);

    std::shared_ptr<AVFrame> frame (av_frame_alloc (),
    [] (AVFrame *frame) {
        av_frame_free (&frame);
    });
    auto maxDstNbSamples = 0;

    uint8_t *dstData [1] = { nullptr };
    std::shared_ptr<void> dstDataGuard (nullptr,
    [&dstData] (void*) {
        if (dstData [0]) av_freep (&dstData [0]);
    });
    while (true)
    {
        if (av_read_frame (formatCtx.get (), &packet) < 0)
            break;

        std::shared_ptr<void> guard (nullptr,
        [&packet] (void*) {
            if (packet.data) av_free_packet (&packet);
        });

        if (packet.stream_index != streamIndex)
            continue;

        av_frame_unref (frame.get ());
        int gotFrame = false;
        auto consumed = avcodec_decode_audio4 (codecCtx.get (), frame.get (), &gotFrame, &packet);

        if (consumed < 0 || !gotFrame)
            continue;

        uint8_t **data = nullptr;
        if (swr)
        {
            if (frame->nb_samples > maxDstNbSamples)
            {
                if (dstData [0])
                    av_freep (&dstData [0]);
                int linesize = 0;
                if (av_samples_alloc (dstData, &linesize, codecCtx->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0)
                    throw std::runtime_error ("cannot allocate memory for resampling");
            }

            if (swr_convert (swr.get (), dstData, frame->nb_samples, const_cast<const uint8_t**> (frame->data), frame->nb_samples) < 0)
                throw std::runtime_error ("cannot resample audio");

            data = dstData;
        }
        else
            data = frame->data;

        auto length = std::min (remaining, frame->nb_samples * codecCtx->channels);
        if (!chromaprint_feed (Ctx_, data [0], length))
            throw std::runtime_error ("cannot feed data");

        bool finished = false;
        if (maxLength)
        {
            remaining -= length;
            if (remaining <= 0)
                finished = true;
        }
        if (finished)
            break;
    }

    if (!chromaprint_finish (Ctx_))
        throw std::runtime_error ("fingerprint calculation failed");

    char *fingerprint = 0;
    if (!chromaprint_get_fingerprint (Ctx_, &fingerprint))
        throw std::runtime_error ("unable to get fingerprint");

    QByteArray result (fingerprint);
    chromaprint_dealloc (fingerprint);

    const double divideFactor = 1. / av_q2d (stream->time_base);
    const double duration = stream->duration / divideFactor;

    return { result, static_cast<int> (duration) };
}
コード例 #29
0
/**
 * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate
 * encoder, and write the results to the Java-side native buffer.
 *
 * @param aio       FFAudio context
 * @param cached    true or false
 * @return number of bytes placed into java buffer or a negative value, if something went wrong
 */
static int decode_packet(FFAudioIO *aio, int cached) {
    int res = 0;
    uint8_t **resample_buf = NULL;
    jobject byte_buffer = NULL;
    uint8_t *javaBuffer = NULL;
    uint32_t out_buf_size = 0;
    int out_buf_samples = 0;
    int64_t out_channel_count;
    int64_t out_sample_rate;
    int flush = aio->got_frame;
    enum AVSampleFormat out;
    int bytesConsumed = 0;

    init_ids(aio->env, aio->java_instance);

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out);

    resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane!

    // make sure we really have an audio packet
    if (aio->decode_packet.stream_index == aio->stream_index) {
        // decode frame
        // got_frame indicates whether we got a frame
        bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet);
        if (bytesConsumed < 0) {
            throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame.");
            return bytesConsumed;
        }

        if (aio->got_frame) {

            aio->decoded_samples += aio->decode_frame->nb_samples;
            out_buf_samples = aio->decode_frame->nb_samples;
#ifdef DEBUG
            fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   aio->decoded_samples, aio->decode_frame->nb_samples,
                   av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base));
#endif

            // adjust out sample number for a different sample rate
            // this is an estimate!!
            out_buf_samples = av_rescale_rnd(
                    swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples,
                    out_sample_rate,
                    aio->stream->codecpar->sample_rate,
                    AV_ROUND_UP
            );

            // allocate new aio->audio_data buffers
            res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame),
                                   aio->decode_frame->nb_samples, aio->decode_frame->format, 1);
            if (res < 0) {
                throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer.");
                return AVERROR(ENOMEM);
            }
            // copy audio data to aio->audio_data
            av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0,
                            aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format);

            res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples);
            if (res < 0) goto bail;
            else out_buf_samples = res;

        } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) {

            res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0);
            if (res < 0) goto bail;
            else out_buf_samples = res;
        } else {
#ifdef DEBUG
            fprintf(stderr, "Got no frame.\n");
#endif
        }

        if (out_buf_samples > 0) {

            res =  av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1);
            if (res < 0) goto bail;
            else out_buf_size = res;

            // ensure native buffer capacity
            if (aio->java_buffer_capacity < out_buf_size) {
                aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size);
            }
            // get java-managed byte buffer reference
            byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID);
            if (!byte_buffer) {
                res = -1;
                throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer.");
                goto bail;
            }

            // we have some samples, let's copy them to the java buffer, using the desired encoding
            javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer);
            if (!javaBuffer) {
                throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer.");
                goto bail;
            }
            if (aio->encode_context) {
                aio->encode_frame->nb_samples = out_buf_samples;
                res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer);
                if (res < 0) {
                    out_buf_size = 0;
                    goto bail;
                }
                out_buf_size = res;
            } else {
                memcpy(javaBuffer, resample_buf[0], out_buf_size);
            }
            // we already wrote to the buffer, now we still need to
            // set new bytebuffer limit and position to 0.
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID);
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size);
        }
    }

    aio->resampled_samples += out_buf_size;

bail:

    if (resample_buf) {
        if (resample_buf[0]) av_freep(&resample_buf[0]);
        av_free(resample_buf);
    }
    if (aio->audio_data[0]) av_freep(&aio->audio_data[0]);

    return res;
}
コード例 #30
0
ファイル: muxing.c プロジェクト: Gnate/FFmpeg-Android
static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret, dst_nb_samples;

    av_init_packet(&pkt);
    c = st->codec;

    if (!flush) {
        get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);

        /* convert samples from native format to destination codec format, using the resampler */
        if (swr_ctx) {
            /* compute destination number of samples */
            dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
                                            c->sample_rate, c->sample_rate, AV_ROUND_UP);
            if (dst_nb_samples > max_dst_nb_samples) {
                av_free(dst_samples_data[0]);
                ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
                                       dst_nb_samples, c->sample_fmt, 0);
                if (ret < 0)
                    exit(1);
                max_dst_nb_samples = dst_nb_samples;
                dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
                                                              c->sample_fmt, 0);
            }

            /* convert to destination format */
            ret = swr_convert(swr_ctx,
                              dst_samples_data, dst_nb_samples,
                              (const uint8_t **)src_samples_data, src_nb_samples);
            if (ret < 0) {
                fprintf(stderr, "Error while converting\n");
                exit(1);
            }
        } else {
            dst_nb_samples = src_nb_samples;
        }

        audio_frame->nb_samples = dst_nb_samples;
        audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
                                 dst_samples_data[0], dst_samples_size, 0);
        samples_count += dst_nb_samples;
    }

    ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (!got_packet) {
        if (flush)
            audio_is_eof = 1;
        return;
    }

    ret = write_frame(oc, &c->time_base, st, &pkt);
    if (ret < 0) {
        fprintf(stderr, "Error while writing audio frame: %s\n",
                av_err2str(ret));
        exit(1);
    }
}