void AudioEncoder::PrepareStream(AVStream* stream, AVCodec* codec, AVDictionary** options, const std::vector<std::pair<QString, QString> >& codec_options, unsigned int bit_rate, unsigned int channels, unsigned int sample_rate) { if(channels == 0) { Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Channel count is zero.")); throw LibavException(); } if(sample_rate == 0) { Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Sample rate is zero.")); throw LibavException(); } stream->codec->bit_rate = bit_rate; stream->codec->channels = channels; stream->codec->sample_rate = sample_rate; stream->codec->time_base.num = 1; stream->codec->time_base.den = sample_rate; #if SSR_USE_AVSTREAM_TIME_BASE stream->time_base = stream->codec->time_base; #endif stream->codec->thread_count = 1; // parse options QString sample_format_name; for(unsigned int i = 0; i < codec_options.size(); ++i) { const QString &key = codec_options[i].first, &value = codec_options[i].second; if(key == "threads") { stream->codec->thread_count = ParseCodecOptionInt(key, value, 1, 100); } else if(key == "qscale") { stream->codec->flags |= CODEC_FLAG_QSCALE; stream->codec->global_quality = lrint(ParseCodecOptionDouble(key, value, -1.0e6, 1.0e6, FF_QP2LAMBDA)); } else if(key == "sampleformat") { sample_format_name = value; } else { av_dict_set(options, key.toUtf8().constData(), value.toUtf8().constData(), 0); } } // choose the sample format stream->codec->sample_fmt = AV_SAMPLE_FMT_NONE; for(unsigned int i = 0; i < SUPPORTED_SAMPLE_FORMATS.size(); ++i) { if(!sample_format_name.isEmpty() && sample_format_name != SUPPORTED_SAMPLE_FORMATS[i].m_name) continue; if(!AVCodecSupportsSampleFormat(codec, SUPPORTED_SAMPLE_FORMATS[i].m_format)) continue; Logger::LogInfo("[AudioEncoder::PrepareStream] " + Logger::tr("Using sample format %1.").arg(SUPPORTED_SAMPLE_FORMATS[i].m_name)); stream->codec->sample_fmt = SUPPORTED_SAMPLE_FORMATS[i].m_format; break; } if(stream->codec->sample_fmt == AV_SAMPLE_FMT_NONE) { Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Encoder requires an unsupported sample format!")); throw LibavException(); } }
void VideoEncoder::PrepareStream(AVStream* stream, AVCodec* codec, AVDictionary** options, const std::vector<std::pair<QString, QString> >& codec_options, unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) { if(width == 0 || height == 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is zero!")); throw LibavException(); } if(width > 10000 || height > 10000) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is too large, the maximum width and height is %1!").arg(10000)); throw LibavException(); } if(width % 2 != 0 || height % 2 != 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is not an even number!")); throw LibavException(); } if(frame_rate == 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Frame rate is zero!")); throw LibavException(); } stream->codec->bit_rate = bit_rate; stream->codec->width = width; stream->codec->height = height; stream->codec->time_base.num = 1; stream->codec->time_base.den = frame_rate; #if SSR_USE_AVSTREAM_TIME_BASE stream->time_base = stream->codec->time_base; #endif stream->codec->pix_fmt = AV_PIX_FMT_NONE; for(unsigned int i = 0; i < SUPPORTED_PIXEL_FORMATS.size(); ++i) { if(AVCodecSupportsPixelFormat(codec, SUPPORTED_PIXEL_FORMATS[i].m_format)) { stream->codec->pix_fmt = SUPPORTED_PIXEL_FORMATS[i].m_format; if(SUPPORTED_PIXEL_FORMATS[i].m_is_yuv) { stream->codec->color_primaries = AVCOL_PRI_BT709; stream->codec->color_trc = AVCOL_TRC_BT709; stream->codec->colorspace = AVCOL_SPC_BT709; stream->codec->color_range = AVCOL_RANGE_MPEG; stream->codec->chroma_sample_location = AVCHROMA_LOC_CENTER; } else { stream->codec->colorspace = AVCOL_SPC_RGB; } break; } } if(stream->codec->pix_fmt == AV_PIX_FMT_NONE) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Encoder requires an unsupported pixel format!")); throw LibavException(); } stream->codec->sample_aspect_ratio.num = 1; stream->codec->sample_aspect_ratio.den = 1; stream->sample_aspect_ratio = stream->codec->sample_aspect_ratio; stream->codec->thread_count = std::max(1, (int) std::thread::hardware_concurrency()); for(unsigned int i = 0; i < codec_options.size(); ++i) { const QString &key = codec_options[i].first, &value = codec_options[i].second; if(key == "threads") { stream->codec->thread_count = ParseCodecOptionInt(key, value, 1, 100); } else if(key == "qscale") { stream->codec->flags |= CODEC_FLAG_QSCALE; stream->codec->global_quality = lrint(ParseCodecOptionDouble(key, value, -1.0e6, 1.0e6, FF_QP2LAMBDA)); } else if(key == "minrate") { stream->codec->rc_min_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "maxrate") { stream->codec->rc_max_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "bufsize") { stream->codec->rc_buffer_size = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "keyint") { stream->codec->gop_size = ParseCodecOptionInt(key, value, 1, 1000000); #if !SSR_USE_AVCODEC_PRIVATE_PRESET } else if(key == "crf") { stream->codec->crf = ParseCodecOptionInt(key, value, 0, 51); #endif #if !SSR_USE_AVCODEC_PRIVATE_PRESET } else if(key == "preset") { X264Preset(stream->codec, value.toUtf8().constData()); #endif } else { av_dict_set(options, key.toUtf8().constData(), value.toUtf8().constData(), 0); } } }