Ejemplo n.º 1
0
unsigned int AudioEncoder::GetFrameSize() {
#if SSR_USE_AVCODEC_ENCODE_AUDIO2
	return (GetCodecContext()->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)? DEFAULT_FRAME_SAMPLES : GetCodecContext()->frame_size;
#else
	return (GetCodecContext()->frame_size <= 1)? DEFAULT_FRAME_SAMPLES : GetCodecContext()->frame_size;
#endif
}
Ejemplo n.º 2
0
OMX_ERRORTYPE LibavAudioDec::AudioFilterCodecInit()
{
    AVCodec* codec;
	codec =  avcodec_find_decoder(codecID);
    if(!codec){
        LOG_ERROR("find decoder fail, codecID %x" , codecID);
        return OMX_ErrorUndefined;
    }

    codecContext = avcodec_alloc_context3(codec);
    if(!codecContext){
        LOG_ERROR("alloc context fail");
        return OMX_ErrorUndefined;
    }

    GetCodecContext();
    if(avcodec_open2(codecContext, codec, NULL) < 0){
        LOG_ERROR(" %x codec open fail", codecID);
        return OMX_ErrorUndefined;
    }

    frame = av_frame_alloc();
    if(frame == NULL){
        return OMX_ErrorInsufficientResources;
    }
    return OMX_ErrorNone;
}
Ejemplo n.º 3
0
AudioEncoder::AudioEncoder(Muxer* muxer, AVStream* stream, AVCodecContext *codec_context, AVCodec* codec, AVDictionary** options)
	: BaseEncoder(muxer, stream, codec_context, codec, options) {

#if !SSR_USE_AVCODEC_ENCODE_AUDIO2
	// allocate a temporary buffer
	if(GetCodecContext()->frame_size <= 1) {
		// This is really weird, the old API uses the size of the *output* buffer to determine the number of
		// input samples if the number of input samples (i.e. frame_size) is not fixed (i.e. frame_size <= 1).
		m_temp_buffer.resize(DEFAULT_FRAME_SAMPLES * GetCodecContext()->channels * av_get_bits_per_sample(GetCodecContext()->codec_id) / 8);
	} else {
		m_temp_buffer.resize(std::max(FF_MIN_BUFFER_SIZE, 256 * 1024));
	}
#endif

	StartThread();

}
Ejemplo n.º 4
0
VideoEncoder::VideoEncoder(Muxer* muxer, AVStream* stream, AVCodecContext* codec_context, AVCodec* codec, AVDictionary** options)
	: BaseEncoder(muxer, stream, codec_context, codec, options) {

#if !SSR_USE_AVCODEC_ENCODE_VIDEO2
	// allocate a temporary buffer
	// Apparently libav/ffmpeg completely ignores the size of the buffer, and if it's too small it just crashes.
	// Originally it was 256k, which is large enough for about 99.9% of the packets, but it still occasionally crashes.
	// So now I'm using a buffer that's always at least large enough to hold a 256k header and *two* completely uncompressed frames.
	// (one YUV frame takes w * h * 1.5 bytes)
	// Newer versions of libav/ffmpeg have deprecated avcodec_encode_video and added a new function which does the allocation
	// automatically, just like avcodec_encode_audio2, but that function isn't available in Ubuntu 12.04/12.10 yet.
	m_temp_buffer.resize(std::max<unsigned int>(FF_MIN_BUFFER_SIZE, 256 * 1024 + GetCodecContext()->width * GetCodecContext()->height * 3));
#endif

	StartThread();
}
Ejemplo n.º 5
0
AVSampleFormat AudioEncoder::GetSampleFormat() {
	return GetCodecContext()->sample_fmt;
}
Ejemplo n.º 6
0
bool AudioEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->GetFrame()->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->GetFrame()->channels == GetCodecContext()->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->GetFrame()->sample_rate == GetCodecContext()->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Sending of audio frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of audio packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->GetFrame()->data[0];
	int bytes_encoded = avcodec_encode_audio(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
Ejemplo n.º 7
0
bool VideoEncoder::EncodeFrame(AVFrame* frame) {

#if SSR_USE_AVFRAME_FORMAT
	if(frame != NULL) {
		Q_ASSERT(frame->format == GetCodecContext()->pix_fmt);
	}
#endif

#if SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), frame, &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + QObject::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStreamIndex(), std::move(packet));
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), frame);
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + QObject::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStreamIndex(), std::move(packet));
		return true;

	} else {
		return false;
	}

#endif

}
Ejemplo n.º 8
0
void VideoEncoder::FillCodecContext(AVCodec* codec) {

	GetCodecContext()->width = m_width;
	GetCodecContext()->height = m_height;
	GetCodecContext()->time_base.num = 1;
	GetCodecContext()->time_base.den = m_frame_rate;
	GetCodecContext()->bit_rate = m_bit_rate;
	GetCodecContext()->pix_fmt = PIX_FMT_YUV420P;
	GetCodecContext()->sample_aspect_ratio.num = 1;
	GetCodecContext()->sample_aspect_ratio.den = 1;
	GetCodecContext()->flags |= CODEC_FLAG_LOOP_FILTER;
	GetCodecContext()->thread_count = m_opt_threads;


	if(m_opt_minrate != (unsigned int) -1)
		GetCodecContext()->rc_min_rate = m_opt_minrate;
	if(m_opt_maxrate != (unsigned int) -1)
		GetCodecContext()->rc_max_rate = m_opt_maxrate;
	if(m_opt_bufsize != (unsigned int) -1)
		GetCodecContext()->rc_buffer_size = m_opt_bufsize;

#if !SSR_USE_AVCODEC_PRIVATE_CRF
	if(m_opt_crf != (unsigned int) -1)
		GetCodecContext()->crf = m_opt_crf;
#endif
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
	if(m_opt_preset != "")
		X264Preset(GetCodecContext(), m_opt_preset.toAscii().constData());
#endif

}
Ejemplo n.º 9
0
bool VideoEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_WIDTH_HEIGHT
		assert(frame->GetFrame()->width == GetCodecContext()->width);
		assert(frame->GetFrame()->height == GetCodecContext()->height);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->pix_fmt);
#endif
#if SSR_USE_AVFRAME_SAR
		assert(frame->GetFrame()->sample_aspect_ratio.num == GetCodecContext()->sample_aspect_ratio.num);
		assert(frame->GetFrame()->sample_aspect_ratio.den == GetCodecContext()->sample_aspect_ratio.den);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Sending of video frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of video packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), (frame == NULL)? NULL : frame->GetFrame());
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
Ejemplo n.º 10
0
unsigned int VideoEncoder::GetFrameRate() {
	assert(GetCodecContext()->time_base.num == 1);
	return GetCodecContext()->time_base.den;
}
Ejemplo n.º 11
0
unsigned int VideoEncoder::GetHeight() {
	return GetCodecContext()->height;
}
Ejemplo n.º 12
0
unsigned int VideoEncoder::GetWidth() {
	return GetCodecContext()->width;
}
Ejemplo n.º 13
0
AVPixelFormat VideoEncoder::GetPixelFormat() {
	return GetCodecContext()->pix_fmt;
}
Ejemplo n.º 14
0
unsigned int AudioEncoder::GetChannels() {
	return GetCodecContext()->channels;
}
Ejemplo n.º 15
0
void Stream::SetFrameRate(const AVRational& frame_rate)
{
    AVRational fps = GetCodecContext().GetEncoder().GetSupportedFrameRate(frame_rate);
    get()->codec->time_base.num = fps.den;
    get()->codec->time_base.den = fps.num;
}
Ejemplo n.º 16
0
unsigned int AudioEncoder::GetSampleRate() {
	return GetCodecContext()->sample_rate;
}
Ejemplo n.º 17
0
void CCAudioDecoder::Run()
{
    AudioDecoderStatus status = AUDIO_DECODER_STATUS_ENUM_UNKNOW;

    AVFormatContext* pAVFormatCtx = NULL;
    AVCodecContext* pAudioCodecCtx = NULL;
    AVRational audioTimeBase;

    AVFrame* pDecodedFrame = NULL;
    int gotFrame = 0;
    int decodedLen = 0;

    int audioFrameQueueSize = 0;

    bool bDataManagerEof = false;

    while(m_bRunning)
    {
        SmartPtr<Event> event;
        if(PopFrontMessage(event))
        {
            switch(event.GetPtr()->type)
            {
                case MESSAGE_TYPE_ENUM_FINDED_AUDIO_STREAM:
                {
                    CCChannels channels = -1;
                    CCRates rates = -1 ;
                    CCType type = CCType::unknow;
                    int asIndex = -1;

                    std::vector<Any> audioStreamInfo
                                        = any_cast<std::vector<Any> >(event.GetPtr()->anyParams);
                    pAVFormatCtx = any_cast<AVFormatContext*>(audioStreamInfo[0]);
                    asIndex = any_cast<int>(audioStreamInfo[1]);

                    GetCodecContext(pAVFormatCtx, asIndex, &pAudioCodecCtx, &audioTimeBase);
                    int ret = GetAudioInformation(pAudioCodecCtx, &channels, &rates, &type);

                    if(ret == 0)
                    {
                        std::vector<Any> audioInformartion;
                        audioInformartion.push_back(Any(channels));
                        audioInformartion.push_back(Any(rates));
                        audioInformartion.push_back(Any(type));

                        PostMessage(MESSAGE_OBJECT_ENUM_AUDIO_DECODER,
                                    MESSAGE_OBJECT_ENUM_AUDIO_RENDER,
                                    MESSAGE_TYPE_ENUM_GET_AUDIO_INFORMATION,
                                    Any(audioInformartion));
                        //crate the frame buffer size
                        pDecodedFrame = avcodec_alloc_frame();

                        PostMessage(MESSAGE_OBJECT_ENUM_AUDIO_DECODER,
                                    MESSAGE_OBJECT_ENUM_DATA_MANAGER,
                                    MESSAGE_TYPE_ENUM_AUDIO_DECODER_READY,
                                    Any());

                        //turn the audio decoder status to working
                        status = AUDIO_DECODER_STATUS_ENUM_WORKING;
                    }
                }
                break;
                case MESSAGE_TYPE_ENUM_GET_AUDIO_PACKET:
                {
                    SmartPtr<CCPacket> shdPacket =
                                            any_cast<SmartPtr<CCPacket> >(event.GetPtr()->anyParams);
                    m_audioPacketQueue.push(shdPacket);
                }
                break;
                case MESSAGE_TYPE_ENUM_AUDIO_RENDER_A_FRAME:
                {
                    //status = AUDIO_DECODER_STATUS_ENUM_SLEEPING;
                    //update
                    audioFrameQueueSize --;
                }
                break;
                case MESSAGE_TYPE_ENUM_DATA_MANAGER_EOF:
                {
                    bDataManagerEof = true;
                }
                break;
                case MESSAGE_TYPE_ENUM_CLIENT_STOP:
                {
                    status = AUDIO_DECODER_STATUS_ENUM_DEADED;
                }
                break;
            } // end switch case
        }// end if get a message

        //working in somethings
        switch(status)
        {
            case AUDIO_DECODER_STATUS_ENUM_WORKING:
            {
                //std::cout << "Audio Decoder are working" << std::endl;

                if(audioFrameQueueSize < MAX_AUDIO_FRAME_QUEUE_SIZE)
                {
                    if(!m_audioPacketQueue.empty())
                    {
                        //static int count = 0;
                        //std::cout << "decoder a audio packet" << ++count << std::endl;

                        SmartPtr<CCPacket> shdPacket = m_audioPacketQueue.front();
                        m_audioPacketQueue.pop();

                        AVPacket packet = shdPacket.GetPtr()->GetPacket();

                        while(packet.size > 0)
                        {
                            avcodec_get_frame_defaults(pDecodedFrame);

                            decodedLen = avcodec_decode_audio4(pAudioCodecCtx,
                                                               pDecodedFrame,
                                                               &gotFrame,
                                                               &packet);

                            packet.data += decodedLen;
                            packet.size -= decodedLen;

                            if(gotFrame)
                            {
                                //std::cout << "Get a frame" << std::endl;
                                //count = 0;
                                //increment the queue size count
                                audioFrameQueueSize ++;

                                int decodedDataSize = av_samples_get_buffer_size(NULL,
                                                                        pAudioCodecCtx->channels,
                                                                        pDecodedFrame->nb_samples,
                                                                        pAudioCodecCtx->sample_fmt,
                                                                        1);

                                SmartPtr<AudioFrame> audioFrame(new AudioFrame(pDecodedFrame->data[0], decodedDataSize));
                                PostMessage(MESSAGE_OBJECT_ENUM_AUDIO_DECODER,
                                            MESSAGE_OBJECT_ENUM_AUDIO_RENDER,
                                            MESSAGE_TYPE_ENUM_GET_AUDIO_FRAME,
                                            Any(audioFrame));
                            }
                        }// end while decoder packet


                        //tell the datamanager we have decoded a packet
                        PostMessage(MESSAGE_OBJECT_ENUM_AUDIO_DECODER,
                                    MESSAGE_OBJECT_ENUM_DATA_MANAGER,
                                    MESSAGE_TYPE_ENUM_AUDIO_DEOCDER_A_PACKET,
                                    Any());
                    }// end the packet queue is not empty
                }// end not enough audio frame
                else if(bDataManagerEof)//there is no data for data manager
                {
                    PostMessage(MESSAGE_OBJECT_ENUM_AUDIO_DECODER,
                                MESSAGE_OBJECT_ENUM_AUDIO_RENDER,
                                MESSAGE_TYPE_ENUM_DATA_MANAGER_EOF,
                                Any());
                    m_bRunning = false;

                    continue;
                }else
                {
                    Sleep(10);
                }
            }
            break;
            case AUDIO_DECODER_STATUS_ENUM_SLEEPING:
            {
                Sleep(50);
            }
            break;
            case AUDIO_DECODER_STATUS_ENUM_UNKNOW:
            {

            }
            break;
            case AUDIO_DECODER_STATUS_ENUM_DEADED:
            {
                m_bRunning = false;
                continue;
            }
            break;
        }// end switch case
    }

    std::cout << "The audio decoder is deaded" << std::endl;
}