bool VideoEncoder::encode(const AVFrame* decodedData, AVPacket& encodedData) { // Be sure that data of AVPacket is NULL so that the encoder will allocate it encodedData.data = NULL; AVCodecContext& avCodecContext = _codec.getAVCodecContext(); #if LIBAVCODEC_VERSION_MAJOR > 53 int gotPacket = 0; const int ret = avcodec_encode_video2(&avCodecContext, &encodedData, decodedData, &gotPacket); if(ret != 0) { throw std::runtime_error("Encode video frame error: avcodec encode video frame - " + getDescriptionFromErrorCode(ret)); } return gotPacket == 1; #else const int ret = avcodec_encode_video(&avCodecContext, encodedData.data, encodedData.size, decodedData); if(ret < 0) { throw std::runtime_error("Encode video frame error: avcodec encode video frame - " + getDescriptionFromErrorCode(ret)); } return true; #endif }
void FilterGraph::process(const std::vector<IFrame*>& inputs, IFrame& output) { // Init the filter graph if(!_isInit) init(inputs, output); // Check whether we can bypass the input audio buffers const bool bypassBuffers = _inputAudioFrameBuffers.empty() || (areInputFrameSizesEqual(inputs) && areFrameBuffersEmpty()); size_t minInputFrameSamplesNb = 0; if(!bypassBuffers) { // Fill the frame buffer with inputs for(size_t index = 0; index < inputs.size(); ++index) { if(!inputs.at(index)->getDataSize()) { LOG_DEBUG("Empty frame from filter graph input " << index << ". Remaining audio frames in buffer: " << _inputAudioFrameBuffers.at(index).getBufferSize()); continue; } _inputAudioFrameBuffers.at(index).addFrame(inputs.at(index)); } // Get the minimum input frames size minInputFrameSamplesNb = getMinInputFrameSamplesNb(inputs); } // Setup input frames into the filter graph for(size_t index = 0; index < inputs.size(); ++index) { // Retrieve frame from buffer or directly from input IFrame* inputFrame = (bypassBuffers)? inputs.at(index) : _inputAudioFrameBuffers.at(index).getFrameSampleNb(minInputFrameSamplesNb); const int ret = av_buffersrc_add_frame_flags(_filters.at(index)->getAVFilterContext(), &inputFrame->getAVFrame(), AV_BUFFERSRC_FLAG_PUSH); if(ret < 0) { throw std::runtime_error("Error when adding a frame to the source buffer used to start to process filters: " + getDescriptionFromErrorCode(ret)); } } // Pull filtered data from the filter graph for(;;) { const int ret = av_buffersink_get_frame(_filters.at(_filters.size() - 1)->getAVFilterContext(), &output.getAVFrame()); if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) break; if(ret < 0) { throw std::runtime_error("Error reading buffer from buffersink: " + getDescriptionFromErrorCode(ret)); } } }
bool AudioDecoder::decodeNextFrame() { if(!_isSetup) setupDecoder(); int got_frame = 0; while( ! got_frame ) { CodedData data; bool nextPacketRead = _inputStream->readNextPacket( data ); if( ! nextPacketRead ) // error or end of file data.clear(); int ret = avcodec_decode_audio4( &_inputStream->getAudioCodec().getAVCodecContext(), _frame, &got_frame, &data.getAVPacket() ); if( ! nextPacketRead && ret == 0 && got_frame == 0 ) // no frame could be decompressed return false; if( ret < 0 ) { throw std::runtime_error( "an error occured during audio decoding" + getDescriptionFromErrorCode( ret ) ); } } return true; }
void Option::checkFFmpegGetOption( const int ffmpegReturnCode ) const { if( ffmpegReturnCode ) { throw std::runtime_error( "unknown key " + getName() + ": " + getDescriptionFromErrorCode( ffmpegReturnCode ) ); } }
void VideoFrame::assign(const unsigned char* ptrValue) { const int ret = avpicture_fill(reinterpret_cast<AVPicture*>(_frame), ptrValue, getPixelFormat(), getWidth(), getHeight()); if(ret < 0) { std::stringstream os; os << "Unable to assign an image buffer of " << getSize() << " bytes: " << getDescriptionFromErrorCode(ret); throw std::runtime_error(os.str()); } }
void FilterGraph::pushFilter(Filter& filter) { AVFilterContext* context = NULL; const int err = avfilter_graph_create_filter(&context, &filter.getAVFilter(), filter.getInstanceName().c_str(), filter.getOptions().c_str(), NULL, _graph); filter.setAVFilterContext(context); if(err < 0) { std::string msg("Cannot add filter "); msg += filter.getName(); msg += " (instance="; msg += filter.getInstanceName(); msg += ") to the graph: "; msg += getDescriptionFromErrorCode(err); throw std::runtime_error(msg); } }
void FilterGraph::init(const std::vector<IFrame*>& inputs, IFrame& output) { // push filters to the graph addInBuffer(inputs); addOutBuffer(output); for(size_t i = 0; i < _filters.size(); ++i) { pushFilter(*_filters.at(i)); } // connect filters for(size_t index = 0; index < _filters.size() - 1; ++index) { size_t indexOfOutputFilterToConnect = index + 1; size_t indexOfInputPadOfDestinationFilter = 0; // handle cases with several inputs if(index < inputs.size()) { indexOfOutputFilterToConnect = inputs.size(); indexOfInputPadOfDestinationFilter = index; } LOG_INFO("Connect filter " << _filters.at(index)->getName() << " to filter " << _filters.at(indexOfOutputFilterToConnect)->getName()) const int err = avfilter_link(_filters.at(index)->getAVFilterContext(), 0, _filters.at(indexOfOutputFilterToConnect)->getAVFilterContext(), indexOfInputPadOfDestinationFilter); if(err < 0) { throw std::runtime_error("Error when connecting filters."); } } // configuring the graph LOG_INFO("Configuring filter graph.") const int err = avfilter_graph_config(_graph, NULL); if(err < 0) { throw std::runtime_error("Error configuring the filter graph: " + getDescriptionFromErrorCode(err)); } _isInit = true; }
void Option::checkFFmpegSetOption( const int ffmpegReturnCode, const std::string& optionValue ) { if( ffmpegReturnCode ) { throw std::runtime_error( "setting " + getName() + " parameter to " + optionValue + ": " + getDescriptionFromErrorCode( ffmpegReturnCode ) ); } }