Exemplo n.º 1
0
	void encode_audio_frame(core::read_frame& frame)
	{			
		auto c = audio_st_->codec;

		boost::range::push_back(audio_buf_, convert_audio(frame, c));
		
		std::size_t frame_size = c->frame_size;
		auto input_audio_size = frame_size * av_get_bytes_per_sample(c->sample_fmt) * c->channels;
		
		while(audio_buf_.size() >= input_audio_size)
		{
			safe_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)
			{
				av_free_packet(p);
				delete p;
			});
			av_init_packet(pkt.get());

			if(frame_size > 1)
			{								
				pkt->size = avcodec_encode_audio(c, audio_outbuf_.data(), audio_outbuf_.size(), reinterpret_cast<short*>(audio_buf_.data()));
				audio_buf_.erase(audio_buf_.begin(), audio_buf_.begin() + input_audio_size);
			}
			else
			{
				audio_outbuf_ = std::move(audio_buf_);		
				audio_buf_.clear();
				pkt->size = audio_outbuf_.size();
				pkt->data = audio_outbuf_.data();
			}
		
			if(pkt->size == 0)
				return;

			if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
				pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_st_->time_base);

			pkt->flags		 |= AV_PKT_FLAG_KEY;
			pkt->stream_index = audio_st_->index;
			pkt->data		  = reinterpret_cast<uint8_t*>(audio_outbuf_.data());
		
			av_interleaved_write_frame(oc_.get(), pkt.get());
		}
	}
Exemplo n.º 2
0
void Movie::EncodeAudio(bool last)
{
    AVStream *astream = av->fmt_ctx->streams[av->audio_stream_idx];
    AVCodecContext *acodec = astream->codec;
    
    
    av_fifo_generic_write(av->audio_fifo, &audiobuf[0], audiobuf.size(), NULL);
    
    // bps: bytes per sample
    int channels = acodec->channels;
    int read_bps = 2;
    int write_bps = av_get_bytes_per_sample(acodec->sample_fmt);
    
    int max_read = acodec->frame_size * read_bps * channels;
    int min_read = last ? read_bps * channels : max_read;
    while (av_fifo_size(av->audio_fifo) >= min_read)
    {
        int read_bytes = MIN(av_fifo_size(av->audio_fifo), max_read);
        av_fifo_generic_read(av->audio_fifo, av->audio_data, read_bytes, NULL);
        
        // convert
        int read_samples = read_bytes / (read_bps * channels);
        int write_samples = read_samples;
        if (read_samples < acodec->frame_size)
        {
            // shrink or pad audio frame
            if (acodec->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)
                acodec->frame_size = write_samples;
            else
                write_samples = acodec->frame_size;
        }

        convert_audio(read_samples, acodec->channels, -1,
                      AV_SAMPLE_FMT_S16, av->audio_data,
                      write_samples, acodec->channels, write_samples * write_bps,
                      acodec->sample_fmt, av->audio_data_conv);
                      
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
        avcodec_get_frame_defaults(av->audio_frame);
#else
        av_frame_unref(av->audio_frame);
#endif
        av->audio_frame->nb_samples = write_samples;
        av->audio_frame->pts = av_rescale_q(av->audio_counter,
                                            (AVRational){1, acodec->sample_rate},
                                            acodec->time_base);
        av->audio_counter += write_samples;
        int asize = avcodec_fill_audio_frame(av->audio_frame, acodec->channels,
                                             acodec->sample_fmt,
                                             av->audio_data_conv,
                                             write_samples * write_bps * channels, 1);
        if (asize >= 0)
        {
            AVPacket pkt;
            memset(&pkt, 0, sizeof(AVPacket));
            av_init_packet(&pkt);
            
            int got_pkt = 0;
            if (0 == avcodec_encode_audio2(acodec, &pkt, av->audio_frame, &got_pkt)
                && got_pkt)
            {
                if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts)
                    pkt.pts = pkt.dts;
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base);
                pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base);
                pkt.stream_index = astream->index;
                av_interleaved_write_frame(av->fmt_ctx, &pkt);
                av_free_packet(&pkt);
            }
        }
    }
    if (last)
    {
        bool done = false;
        while (!done)
        {
            AVPacket pkt;
            memset(&pkt, 0, sizeof(AVPacket));
            av_init_packet(&pkt);
            
            int got_pkt = 0;
            if (0 == avcodec_encode_audio2(acodec, &pkt, NULL, &got_pkt)
                && got_pkt)
            {
                if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts)
                    pkt.pts = pkt.dts;
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base);
                pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base);
                pkt.stream_index = astream->index;
                av_interleaved_write_frame(av->fmt_ctx, &pkt);
                av_free_packet(&pkt);
            }
            else
            {
                done = true;
            }
        }
        
    }
    
}
Exemplo n.º 3
0
bool FFmpegDecoder::GetAudio()
{
    AVPacket pkt;
    av_init_packet(&pkt);
    
    while (true)
    {
        int decode = av_read_frame(av->ctx, &pkt);
        if (decode < 0)
            return false;
        if (pkt.stream_index == av->stream_idx)
            break;
        av_free_packet(&pkt);
    }
    
    av->started = true;
    AVPacket pkt_temp;
    av_init_packet(&pkt_temp);
    pkt_temp.data = pkt.data;
    pkt_temp.size = pkt.size;
    
    AVCodecContext *dec_ctx = av->stream->codec;
    
    while (pkt_temp.size > 0)
    {
        AVFrame frame;
        avcodec_get_frame_defaults(&frame);
        int got_frame = 0;
        int bytes_read = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &pkt_temp);
        if (bytes_read < 0)
        {
            av_free_packet(&pkt);
            return false;
        }
        if (got_frame && bytes_read > 0)
        {
            int channels = dec_ctx->channels;
            enum AVSampleFormat in_fmt = dec_ctx->sample_fmt;
            enum AVSampleFormat out_fmt = AV_SAMPLE_FMT_S16;
            
            int stride = -1;
            if (channels > 1 && av_sample_fmt_is_planar(in_fmt))
                stride = frame.extended_data[1] - frame.extended_data[0];

            int written = convert_audio(frame.nb_samples, channels,
                                        stride,
                                        in_fmt, frame.extended_data[0],
                                        frame.nb_samples, channels,
                                        -1,
                                        out_fmt, av->temp_data);
            
            av_fifo_generic_write(av->fifo, av->temp_data, written, NULL);

            pkt_temp.data += bytes_read;
            pkt_temp.size -= bytes_read;
        }
    }
    
    av_free_packet(&pkt);
    return true;
}