Пример #1
0
AVStream* Muxer::CreateStream(AVCodec* codec) {
	Q_ASSERT(!m_started);
	Q_ASSERT(m_format_context->nb_streams < MUXER_MAX_STREAMS);

	// create a new stream
#if SSR_USE_AVFORMAT_NEW_STREAM
	AVStream *stream = avformat_new_stream(m_format_context, codec);
#else
	AVStream *stream = av_new_stream(m_format_context, m_format_context->nb_streams);
#endif
	if(stream == NULL) {
		Logger::LogError("[Muxer::AddStream] " + QObject::tr("Error: Can't create new stream!"));
		throw LibavException();
	}

#if !SSR_USE_AVFORMAT_NEW_STREAM
	if(avcodec_get_context_defaults3(stream->codec, codec) < 0) {
		Logger::LogError("[Muxer::AddStream] " + QObject::tr("Error: Can't get codec context defaults!"));
		throw LibavException();
	}
	stream->codec->codec_id = codec->id;
	stream->codec->codec_type = codec->type;
#endif

	// not sure why this is needed, but it's in the example code and it doesn't work without this
	if(m_format_context->oformat->flags & AVFMT_GLOBALHEADER)
		stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	return stream;
}
Пример #2
0
void Muxer::Init() {

	// get the format we want (this is just a pointer, we don't have to free this)
	AVOutputFormat *format = av_guess_format(m_container_name.toAscii().constData(), NULL, NULL);
	if(format == NULL) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't find chosen output format!"));
		throw LibavException();
	}

	Logger::LogInfo("[Muxer::Init] " + Logger::tr("Using format %1 (%2).").arg(format->name).arg(format->long_name));

	// allocate format context
	m_format_context = avformat_alloc_context();
	if(m_format_context == NULL) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't allocate format context!"));
		throw LibavException();
	}
	m_format_context->oformat = format;

	// open file
	if(avio_open(&m_format_context->pb, m_output_file.toLocal8Bit().constData(), AVIO_FLAG_WRITE) < 0) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't open output file!"));
		throw LibavException();
	}

}
Пример #3
0
void Muxer::Init() {

	// get the format we want (this is just a pointer, we don't have to free this)
	AVOutputFormat *format = av_guess_format(m_container_name.toUtf8().constData(), NULL, NULL);
	if(format == NULL) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't find chosen output format!"));
		throw LibavException();
	}

	Logger::LogInfo("[Muxer::Init] " + Logger::tr("Using format %1 (%2).").arg(format->name).arg(format->long_name));

	// allocate format context
	// ffmpeg probably wants us to use avformat_alloc_output_context2 instead, but libav doesn't have it and I can't figure out why it's needed anyway
	m_format_context = avformat_alloc_context();
	if(m_format_context == NULL) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't allocate format context!"));
		throw LibavException();
	}
	m_format_context->oformat = format;

	// open file
	if(avio_open(&m_format_context->pb, m_output_file.toLocal8Bit().constData(), AVIO_FLAG_WRITE) < 0) {
		Logger::LogError("[Muxer::Init] " + Logger::tr("Error: Can't open output file!"));
		throw LibavException();
	}

}
Пример #4
0
AVStream* Muxer::AddStream(AVCodec* codec, AVCodecContext** codec_context) {
	assert(!m_started);
	assert(m_format_context->nb_streams < MUXER_MAX_STREAMS);

	Logger::LogInfo("[Muxer::AddStream] " + Logger::tr("Using codec %1 (%2).").arg(codec->name).arg(codec->long_name));

	// create a new stream
#if SSR_USE_AVSTREAM_CODECPAR
	AVStream *stream = avformat_new_stream(m_format_context, NULL);
#elif SSR_USE_AVFORMAT_NEW_STREAM
	AVStream *stream = avformat_new_stream(m_format_context, codec);
#else
	AVStream *stream = av_new_stream(m_format_context, m_format_context->nb_streams);
#endif
	if(stream == NULL) {
		Logger::LogError("[Muxer::AddStream] " + Logger::tr("Error: Can't create new stream!"));
		throw LibavException();
	}
	assert(stream->index == (int) m_format_context->nb_streams - 1);
#if SSR_USE_AVSTREAM_CODECPAR
	*codec_context = avcodec_alloc_context3(codec);
	if(*codec_context == NULL) {
		Logger::LogError("[Muxer::AddStream] " + Logger::tr("Error: Can't create new codec context!"));
		throw LibavException();
	}
#else
	assert(stream->codec != NULL);
	*codec_context = stream->codec;
#endif
	//stream->id = m_format_context->nb_streams - 1;

#if !SSR_USE_AVFORMAT_NEW_STREAM
	// initialize the codec context (only needed for old API)
	if(avcodec_get_context_defaults3(*codec_context, codec) < 0) {
		Logger::LogError("[Muxer::AddStream] " + Logger::tr("Error: Can't get codec context defaults!"));
		throw LibavException();
	}
	(*codec_context)->codec_id = codec->id;
	(*codec_context)->codec_type = codec->type;
#endif

	// not sure why this is needed, but it's in the example code and it doesn't work without this
	if(m_format_context->oformat->flags & AVFMT_GLOBALHEADER)
		(*codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;

	// if the codec is experimental, allow it
	if(codec->capabilities & CODEC_CAP_EXPERIMENTAL) {
		Logger::LogWarning("[Muxer::AddStream] " + Logger::tr("Warning: This codec is considered experimental by libav/ffmpeg."));
		(*codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
	}

#if SSR_USE_SIDE_DATA_ONLY_PACKETS && !SSR_USE_SIDE_DATA_ONLY_PACKETS_DEPRECATED
	// this option was added with the intent to deprecate it again in the next version,
	// because the ffmpeg/libav devs like deprecating things :)
	(*codec_context)->side_data_only_packets = 1;
#endif

	return stream;
}
Пример #5
0
void AudioEncoder::PrepareStream(AVStream* stream, AVCodec* codec, AVDictionary** options, const std::vector<std::pair<QString, QString> >& codec_options,
								 unsigned int bit_rate, unsigned int channels, unsigned int sample_rate) {

	if(channels == 0) {
		Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Channel count is zero."));
		throw LibavException();
	}
	if(sample_rate == 0) {
		Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Sample rate is zero."));
		throw LibavException();
	}

	stream->codec->bit_rate = bit_rate;
	stream->codec->channels = channels;
	stream->codec->sample_rate = sample_rate;
	stream->codec->time_base.num = 1;
	stream->codec->time_base.den = sample_rate;
#if SSR_USE_AVSTREAM_TIME_BASE
	stream->time_base = stream->codec->time_base;
#endif
	stream->codec->thread_count = 1;

	// parse options
	QString sample_format_name;
	for(unsigned int i = 0; i < codec_options.size(); ++i) {
		const QString &key = codec_options[i].first, &value = codec_options[i].second;
		if(key == "threads") {
			stream->codec->thread_count = ParseCodecOptionInt(key, value, 1, 100);
		} else if(key == "qscale") {
			stream->codec->flags |= CODEC_FLAG_QSCALE;
			stream->codec->global_quality = lrint(ParseCodecOptionDouble(key, value, -1.0e6, 1.0e6, FF_QP2LAMBDA));
		} else if(key == "sampleformat") {
			sample_format_name = value;
		} else {
			av_dict_set(options, key.toUtf8().constData(), value.toUtf8().constData(), 0);
		}
	}

	// choose the sample format
	stream->codec->sample_fmt = AV_SAMPLE_FMT_NONE;
	for(unsigned int i = 0; i < SUPPORTED_SAMPLE_FORMATS.size(); ++i) {
		if(!sample_format_name.isEmpty() && sample_format_name != SUPPORTED_SAMPLE_FORMATS[i].m_name)
			continue;
		if(!AVCodecSupportsSampleFormat(codec, SUPPORTED_SAMPLE_FORMATS[i].m_format))
			continue;
		Logger::LogInfo("[AudioEncoder::PrepareStream] " + Logger::tr("Using sample format %1.").arg(SUPPORTED_SAMPLE_FORMATS[i].m_name));
		stream->codec->sample_fmt = SUPPORTED_SAMPLE_FORMATS[i].m_format;
		break;
	}
	if(stream->codec->sample_fmt == AV_SAMPLE_FMT_NONE) {
		Logger::LogError("[AudioEncoder::PrepareStream] " + Logger::tr("Error: Encoder requires an unsupported sample format!"));
		throw LibavException();
	}

}
Пример #6
0
AVCodec* Muxer::FindCodec(const QString& codec_name) {
	AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
	if(codec == NULL) {
		Logger::LogError("[Muxer::FindCodec] " + Logger::tr("Error: Can't find codec!"));
		throw LibavException();
	}
	return codec;
}
Пример #7
0
AVStream* Muxer::AddStream(AVCodec* codec) {
	assert(!m_started);
	assert(m_format_context->nb_streams < MUXER_MAX_STREAMS);

	Logger::LogInfo("[Muxer::AddStream] " + Logger::tr("Using codec %1 (%2).").arg(codec->name).arg(codec->long_name));

	// create a new stream
#if SSR_USE_AVFORMAT_NEW_STREAM
	AVStream *stream = avformat_new_stream(m_format_context, codec);
#else
	AVStream *stream = av_new_stream(m_format_context, m_format_context->nb_streams);
#endif
	if(stream == NULL) {
		Logger::LogError("[Muxer::AddStream] " + Logger::tr("Error: Can't create new stream!"));
		throw LibavException();
	}
	assert(stream->index == (int) m_format_context->nb_streams - 1);
	assert(stream->codec != NULL);
	//stream->id = m_format_context->nb_streams - 1;

#if !SSR_USE_AVFORMAT_NEW_STREAM
	// initialize the codec context (only needed for old API)
	if(avcodec_get_context_defaults3(stream->codec, codec) < 0) {
		Logger::LogError("[Muxer::AddStream] " + Logger::tr("Error: Can't get codec context defaults!"));
		throw LibavException();
	}
	stream->codec->codec_id = codec->id;
	stream->codec->codec_type = codec->type;
#else
	//assert(stream->codec->codec_id == codec->id);
	//assert(stream->codec->codec_type == codec->type);
#endif

	// not sure why this is needed, but it's in the example code and it doesn't work without this
	if(m_format_context->oformat->flags & AVFMT_GLOBALHEADER)
		stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	// if the codec is experimental, allow it
	if(codec->capabilities & CODEC_CAP_EXPERIMENTAL) {
		Logger::LogWarning("[Muxer::AddStream] " + Logger::tr("Warning: This codec is considered experimental by libav/ffmpeg."));
		stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
	}

	return stream;
}
Пример #8
0
int ParseCodecOptionInt(const QString& key, const QString& value, int min, int max, int multiply) {
	bool parsed;
	int value_int = value.toInt(&parsed);
	if(!parsed) {
		Logger::LogError("[ParseCodecOptionInt] " + Logger::tr("Error: Option '%1' could not be parsed!").arg(key));
		throw LibavException();
	}
	return clamp(value_int, min, max) * multiply;
}
Пример #9
0
double ParseCodecOptionDouble(const QString& key, const QString& value, double min, double max, double multiply) {
	bool parsed;
	double value_double = value.toDouble(&parsed);
	if(!parsed) {
		Logger::LogError("[ParseCodecOptionDouble] " + Logger::tr("Error: Option '%1' could not be parsed!").arg(key));
		throw LibavException();
	}
	return clamp(value_double, min, max) * multiply;
}
Пример #10
0
void BaseEncoder::Init(AVCodec* codec, AVDictionary** options) {

	// open codec
	if(avcodec_open2(m_stream->codec, codec, options) < 0) {
		Logger::LogError("[BaseEncoder::Init] " + Logger::tr("Error: Can't open codec!"));
		throw LibavException();
	}
	m_codec_opened = true;

	// show a warning for every option that wasn't recognized
	AVDictionaryEntry *t = NULL;
	while((t = av_dict_get(*options, "", t, AV_DICT_IGNORE_SUFFIX)) != NULL) {
		Logger::LogWarning("[BaseEncoder::Init] " + Logger::tr("Warning: Codec option '%1' was not recognised!").arg(t->key));
	}

}
Пример #11
0
void Muxer::Start() {
	assert(!m_started);

	// make sure all encoders were created successfully
	for(unsigned int i = 0; i < m_format_context->nb_streams; ++i) {
		assert(m_encoders[i] != NULL);
	}

	// write header
	if(avformat_write_header(m_format_context, NULL) != 0) {
		Logger::LogError("[Muxer::Start] " + Logger::tr("Error: Can't write header!", "Don't translate 'header'"));
		throw LibavException();
	}

	m_started = true;
	m_thread = std::thread(&Muxer::MuxerThread, this);

}
Пример #12
0
VideoEncoder* Muxer::AddVideoEncoder(const QString& codec_name, const std::vector<std::pair<QString, QString> >& codec_options,
									 unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) {
	AVCodec *codec = FindCodec(codec_name);
	AVCodecContext *codec_context = NULL;
	AVStream *stream = AddStream(codec, &codec_context);
	VideoEncoder *encoder;
	AVDictionary *options = NULL;
	try {
		VideoEncoder::PrepareStream(stream, codec_context, codec, &options, codec_options, bit_rate, width, height, frame_rate);
		m_encoders[stream->index] = encoder = new VideoEncoder(this, stream, codec_context, codec, &options);
#if SSR_USE_AVSTREAM_CODECPAR
		if(avcodec_parameters_from_context(stream->codecpar, codec_context) < 0) {
			Logger::LogError("[Muxer::AddVideoEncoder] " + Logger::tr("Error: Can't copy parameters to stream!"));
			throw LibavException();
		}
#endif
		av_dict_free(&options);
	} catch(...) {
		av_dict_free(&options);
		throw;
	}
	return encoder;
}
Пример #13
0
bool AudioEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->GetFrame()->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->GetFrame()->channels == GetCodecContext()->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->GetFrame()->sample_rate == GetCodecContext()->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Sending of audio frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of audio packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->GetFrame()->data[0];
	int bytes_encoded = avcodec_encode_audio(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
Пример #14
0
VideoEncoder::VideoEncoder(Muxer* muxer, const QString& codec_name, const std::vector<std::pair<QString, QString> >& codec_options,
						   unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate)
	: BaseEncoder(muxer) {
	try {

		m_bit_rate = bit_rate;
		m_width = width;
		m_height = height;
		m_frame_rate = frame_rate;

		m_opt_threads = std::max(1u, std::thread::hardware_concurrency());
		m_opt_minrate = (unsigned int) -1;
		m_opt_maxrate = (unsigned int) -1;
		m_opt_bufsize = (unsigned int) -1;

#if !SSR_USE_AVCODEC_PRIVATE_CRF
		m_opt_crf = (unsigned int) -1;
#endif
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
		m_opt_preset = "";
#endif

		if(m_width == 0 || m_height == 0) {
			Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is zero!"));
			throw LibavException();
		}
		if(m_width > 10000 || m_height > 10000) {
			Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is too large, the maximum width and height is %1!").arg(10000));
			throw LibavException();
		}
		if(m_width % 2 != 0 || m_height % 2 != 0) {
			Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is not an even number!"));
			throw LibavException();
		}
		if(m_frame_rate == 0) {
			Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Frame rate it zero!"));
			throw LibavException();
		}

		// start the encoder
		AVDictionary *options = NULL;
		try {
			for(unsigned int i = 0; i < codec_options.size(); ++i) {
				if(codec_options[i].first == "threads")
					m_opt_threads = codec_options[i].second.toUInt();
				else if(codec_options[i].first == "minrate")
					m_opt_minrate = codec_options[i].second.toUInt() * 1024; // kbps
				else if(codec_options[i].first == "maxrate")
					m_opt_maxrate = codec_options[i].second.toUInt() * 1024; // kbps
				else if(codec_options[i].first == "bufsize")
					m_opt_bufsize = codec_options[i].second.toUInt() * 1024; // kbit
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
				else if(codec_options[i].first == "crf")
					m_opt_crf = codec_options[i].second.toUInt();
#endif
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
				else if(codec_options[i].first == "preset")
					m_opt_preset = codec_options[i].second;
#endif
				else
					av_dict_set(&options, codec_options[i].first.toAscii().constData(), codec_options[i].second.toAscii().constData(), 0);
			}
			CreateCodec(codec_name, &options);
			av_dict_free(&options);
		} catch(...) {
			av_dict_free(&options);
			throw;
		}

#if !SSR_USE_AVCODEC_ENCODE_VIDEO2
		// allocate a temporary buffer
		// Apparently libav/ffmpeg completely ignores the size of the buffer, and if it's too small it just crashes.
		// Originally it was 256k, which is large enough for about 99.9% of the packets, but it still occasionally crashes.
		// So now I'm using a buffer that's always at least large enough to hold a 256k header and *two* completely uncompressed frames.
		// (one YUV frame takes w * h * 1.5 bytes)
		// Newer versions of libav/ffmpeg have deprecated avcodec_encode_video and added a new function which does the allocation
		// automatically, just like avcodec_encode_audio2, but that function isn't available in Ubuntu 12.04/12.10 yet.
		m_temp_buffer.resize(std::max<unsigned int>(FF_MIN_BUFFER_SIZE, 256 * 1024 + m_width * m_height * 3));
#endif

		GetMuxer()->RegisterEncoder(GetStreamIndex(), this);

	} catch(...) {
		Destruct();
		throw;
	}
}
Пример #15
0
bool VideoEncoder::EncodeFrame(AVFrame* frame) {

#if SSR_USE_AVFRAME_FORMAT
	if(frame != NULL) {
		Q_ASSERT(frame->format == GetCodecContext()->pix_fmt);
	}
#endif

#if SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), frame, &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + QObject::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStreamIndex(), std::move(packet));
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), frame);
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + QObject::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStreamIndex(), std::move(packet));
		return true;

	} else {
		return false;
	}

#endif

}
Пример #16
0
void Muxer::MuxerThread() {
	try {

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread started."));

		double total_time = 0.0;

		// start muxing
		for( ; ; ) {

			// get a packet from a stream that isn't done yet
			std::unique_ptr<AVPacketWrapper> packet;
			unsigned int current_stream = 0, streams_done = 0;
			for(unsigned int i = 0; i < m_format_context->nb_streams; ++i) {
				StreamLock lock(&m_stream_data[i]);
				if(lock->m_packet_queue.empty()) {
					if(lock->m_is_done)
						++streams_done;
				} else {
					current_stream = i;
					packet = std::move(lock->m_packet_queue.front());
					lock->m_packet_queue.pop_front();
					break;
				}
			}

			// if all streams are done, we can stop
			if(streams_done == m_format_context->nb_streams) {
				break;
			}

			// if there is no packet, wait and try again later
			if(packet == NULL) {
				usleep(20000);
				continue;
			}

			// try to figure out the time (the exact value is not critical, it's only used for bitrate statistics)
			AVStream *stream = m_format_context->streams[current_stream];
			double packet_time = 0.0;
			if(packet->GetPacket()->dts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->dts * ToDouble(stream->codec->time_base);
			else if(packet->GetPacket()->pts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->pts * ToDouble(stream->codec->time_base);
			if(packet_time > total_time)
				total_time = packet_time;

			// prepare packet
			packet->GetPacket()->stream_index = current_stream;
#if SSR_USE_AV_PACKET_RESCALE_TS
			av_packet_rescale_ts(packet->GetPacket(), stream->codec->time_base, stream->time_base);
#else
			if(packet->GetPacket()->pts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->pts = av_rescale_q(packet->GetPacket()->pts, stream->codec->time_base, stream->time_base);
			}
			if(packet->GetPacket()->dts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->dts = av_rescale_q(packet->GetPacket()->dts, stream->codec->time_base, stream->time_base);
			}
#endif

			// write the packet (again, why does libav/ffmpeg call this a frame?)
			if(av_interleaved_write_frame(m_format_context, packet->GetPacket()) != 0) {
				Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Error: Can't write frame to muxer!"));
				throw LibavException();
			}

			// the data is now owned by libav/ffmpeg, so don't free it
			packet->SetFreeOnDestruct(false);

			// update the byte counter
			{
				SharedLock lock(&m_shared_data);
				lock->m_total_bytes = m_format_context->pb->pos + (m_format_context->pb->buf_ptr - m_format_context->pb->buffer);
				if(lock->m_stats_previous_time == NOPTS_DOUBLE) {
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
				double timedelta = total_time - lock->m_stats_previous_time;
				if(timedelta > 0.999999) {
					lock->m_stats_actual_bit_rate = (double) ((lock->m_total_bytes - lock->m_stats_previous_bytes) * 8) / timedelta;
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
			}

		}

		// tell the others that we're done
		m_is_done = true;

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread stopped."));

	} catch(const std::exception& e) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Exception '%1' in muxer thread.").arg(e.what()));
	} catch(...) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Unknown exception in muxer thread."));
	}
}
Пример #17
0
void Muxer::MuxerThread() {
	try {

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread started."));

		// start muxing
		for( ; ; ) {

			// find the oldest stream that isn't done yet
			unsigned int oldest_stream = INVALID_STREAM;
			double oldest_pts = std::numeric_limits<double>::max();
			for(unsigned int i = 0; i < m_format_context->nb_streams; ++i) {
				StreamLock lock(&m_stream_data[i]);
				if(!lock->m_is_done || !lock->m_packet_queue.empty()) {
					double pts = ToDouble(m_format_context->streams[i]->pts) * ToDouble(m_format_context->streams[i]->time_base);
					if(pts < oldest_pts) {
						oldest_stream = i;
						oldest_pts = pts;
					}
				}
			}

			// if there are no packets left, we're done
			if(oldest_stream == INVALID_STREAM) {
				break;
			}

			// get the packet
			std::unique_ptr<AVPacketWrapper> packet;
			{
				StreamLock lock(&m_stream_data[oldest_stream]);
				if(!lock->m_packet_queue.empty()) {
					packet = std::move(lock->m_packet_queue.front());
					lock->m_packet_queue.pop_front();
				}
			}

			// if there is no packet, wait and try again later
			if(packet == NULL) {
				usleep(20000);
				continue;
			}

			// prepare packet
			AVStream *st = m_format_context->streams[oldest_stream];
			packet->GetPacket()->stream_index = oldest_stream;
			if(packet->GetPacket()->pts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->pts = av_rescale_q(packet->GetPacket()->pts, st->codec->time_base, st->time_base);
			}
			if(packet->GetPacket()->dts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->dts = av_rescale_q(packet->GetPacket()->dts, st->codec->time_base, st->time_base);
			}

			// write the packet (again, why does libav/ffmpeg call this a frame?)
			// The packet should already be interleaved now, but containers can have custom interleaving specifications,
			// so it's a good idea to call av_interleaved_write_frame anyway.
			if(av_interleaved_write_frame(m_format_context, packet->GetPacket()) != 0) {
				Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Error: Can't write frame to muxer!"));
				throw LibavException();
			}

			// the data is now owned by libav/ffmpeg, so don't free it
			packet->SetFreeOnDestruct(false);

			// update the byte counter
			{
				SharedLock lock(&m_shared_data);
				lock->m_total_bytes = m_format_context->pb->pos + (m_format_context->pb->buf_ptr - m_format_context->pb->buffer);
				if(lock->m_stats_previous_pts == NOPTS_DOUBLE) {
					lock->m_stats_previous_pts = oldest_pts;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
				double timedelta = oldest_pts - lock->m_stats_previous_pts;
				if(timedelta > 0.999999) {
					lock->m_stats_actual_bit_rate = (double) ((lock->m_total_bytes - lock->m_stats_previous_bytes) * 8) / timedelta;
					lock->m_stats_previous_pts = oldest_pts;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
			}

		}

		// tell the others that we're done
		m_is_done = true;

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread stopped."));

	} catch(const std::exception& e) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Exception '%1' in muxer thread.").arg(e.what()));
	} catch(...) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Unknown exception in muxer thread."));
	}
}
Пример #18
0
void BenchmarkConvert(unsigned int w, unsigned int h, PixelFormat in_format, PixelFormat out_format, const QString& in_format_name, const QString& out_format_name, NewImageFunc in_image, NewImageFunc out_image, ConvertFunc fallback
#if SSR_USE_X86_ASM
, ConvertFunc ssse3
#endif
) {

	std::mt19937 rng(12345);
#if SSR_USE_X86_ASM
	bool use_ssse3 = (CPUFeatures::HasMMX() && CPUFeatures::HasSSE() && CPUFeatures::HasSSE2() && CPUFeatures::HasSSE3() && CPUFeatures::HasSSSE3());
#endif

	// the queue needs to use enough memory to make sure that the CPU cache is flushed
	unsigned int pixels = w * h;
	unsigned int queue_size = 1 + 20000000 / pixels;
	unsigned int run_size = queue_size * 20;

	// create queue
	std::vector<std::unique_ptr<ImageGeneric> > queue_in(queue_size);
	std::vector<std::unique_ptr<ImageGeneric> > queue_out(queue_size);
	for(unsigned int i = 0; i < queue_size; ++i) {
		queue_in[i] = in_image(w, h, rng);
		queue_out[i] = out_image(w, h, rng);
	}

	// run test
	unsigned int time_swscale = 0, time_fallback = 0, time_ssse3 = 0;
	{
		SwsContext *sws = sws_getCachedContext(NULL,
											   w, h, in_format,
											   w, h, out_format,
											   SWS_BILINEAR, NULL, NULL, NULL);
		if(sws == NULL) {
			Logger::LogError("[BenchmarkScale] " + Logger::tr("Error: Can't get swscale context!", "Don't translate 'swscale'"));
			throw LibavException();
		}
		sws_setColorspaceDetails(sws,
								 sws_getCoefficients(SWS_CS_ITU709), 0,
								 sws_getCoefficients(SWS_CS_DEFAULT), 0,
								 0, 1 << 16, 1 << 16);
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size / 2; ++i) {
			unsigned int ii = i % queue_size;
			sws_scale(sws, queue_in[ii]->m_data.data(), queue_in[ii]->m_stride.data(), 0, h, queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data());
		}
		int64_t t2 = hrt_time_micro();
		time_swscale = (t2 - t1) / (run_size / 2);
	}
	{
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size; ++i) {
			unsigned int ii = i % queue_size;
			fallback(w, h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data());
		}
		int64_t t2 = hrt_time_micro();
		time_fallback = (t2 - t1) / run_size;
	}
#if SSR_USE_X86_ASM
	if(use_ssse3) {
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size; ++i) {
			unsigned int ii = i % queue_size;
			ssse3(w, h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data());
		}
		int64_t t2 = hrt_time_micro();
		time_ssse3 = (t2 - t1) / run_size;
	}
#endif

	// print result
	QString size = QString("%1x%2").arg(w).arg(h);
	Logger::LogInfo("[BenchmarkConvert] " + Logger::tr("%1 %2 to %3 %4  |  SWScale %5 us  |  Fallback %6 us (%7%)  |  SSSE3 %8 us (%9%)")
					.arg(in_format_name, 6).arg(size, 9).arg(out_format_name, 6).arg(size, 9)
					.arg(time_swscale, 6)
					.arg(time_fallback, 6).arg(100 * time_fallback / time_swscale, 3)
					.arg(time_ssse3, 6).arg(100 * time_ssse3 / time_fallback, 3));

}
Пример #19
0
bool AudioEncoder::EncodeFrame(AVFrame* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->channels == GetStream()->codec->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->sample_rate == GetStream()->codec->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->format == GetStream()->codec->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetStream()->codec, packet->GetPacket(), frame, &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->data[0];
	int bytes_encoded = avcodec_encode_audio(GetStream()->codec, m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetStream()->codec->coded_frame != NULL && GetStream()->codec->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetStream()->codec->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		return true;

	} else {
		return false;
	}

#endif

}
Пример #20
0
void VideoEncoder::PrepareStream(AVStream* stream, AVCodec* codec, AVDictionary** options, const std::vector<std::pair<QString, QString> >& codec_options,
								 unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) {

	if(width == 0 || height == 0) {
		Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is zero!"));
		throw LibavException();
	}
	if(width > 10000 || height > 10000) {
		Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is too large, the maximum width and height is %1!").arg(10000));
		throw LibavException();
	}
	if(width % 2 != 0 || height % 2 != 0) {
		Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is not an even number!"));
		throw LibavException();
	}
	if(frame_rate == 0) {
		Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Frame rate is zero!"));
		throw LibavException();
	}

	stream->codec->bit_rate = bit_rate;
	stream->codec->width = width;
	stream->codec->height = height;
	stream->codec->time_base.num = 1;
	stream->codec->time_base.den = frame_rate;
#if SSR_USE_AVSTREAM_TIME_BASE
	stream->time_base = stream->codec->time_base;
#endif
	stream->codec->pix_fmt = AV_PIX_FMT_NONE;
	for(unsigned int i = 0; i < SUPPORTED_PIXEL_FORMATS.size(); ++i) {
		if(AVCodecSupportsPixelFormat(codec, SUPPORTED_PIXEL_FORMATS[i].m_format)) {
			stream->codec->pix_fmt = SUPPORTED_PIXEL_FORMATS[i].m_format;
			if(SUPPORTED_PIXEL_FORMATS[i].m_is_yuv) {
				stream->codec->color_primaries = AVCOL_PRI_BT709;
				stream->codec->color_trc = AVCOL_TRC_BT709;
				stream->codec->colorspace = AVCOL_SPC_BT709;
				stream->codec->color_range = AVCOL_RANGE_MPEG;
				stream->codec->chroma_sample_location = AVCHROMA_LOC_CENTER;
			} else {
				stream->codec->colorspace = AVCOL_SPC_RGB;
			}
			break;
		}
	}
	if(stream->codec->pix_fmt == AV_PIX_FMT_NONE) {
		Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Encoder requires an unsupported pixel format!"));
		throw LibavException();
	}
	stream->codec->sample_aspect_ratio.num = 1;
	stream->codec->sample_aspect_ratio.den = 1;
	stream->sample_aspect_ratio = stream->codec->sample_aspect_ratio;
	stream->codec->thread_count = std::max(1, (int) std::thread::hardware_concurrency());

	for(unsigned int i = 0; i < codec_options.size(); ++i) {
		const QString &key = codec_options[i].first, &value = codec_options[i].second;
		if(key == "threads") {
			stream->codec->thread_count = ParseCodecOptionInt(key, value, 1, 100);
		} else if(key == "qscale") {
			stream->codec->flags |= CODEC_FLAG_QSCALE;
			stream->codec->global_quality = lrint(ParseCodecOptionDouble(key, value, -1.0e6, 1.0e6, FF_QP2LAMBDA));
		} else if(key == "minrate") {
			stream->codec->rc_min_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps
		} else if(key == "maxrate") {
			stream->codec->rc_max_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps
		} else if(key == "bufsize") {
			stream->codec->rc_buffer_size = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps
		} else if(key == "keyint") {
			stream->codec->gop_size = ParseCodecOptionInt(key, value, 1, 1000000);
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
		} else if(key == "crf") {
			stream->codec->crf = ParseCodecOptionInt(key, value, 0, 51);
#endif
#if !SSR_USE_AVCODEC_PRIVATE_PRESET
		} else if(key == "preset") {
			X264Preset(stream->codec, value.toUtf8().constData());
#endif
		} else {
			av_dict_set(options, key.toUtf8().constData(), value.toUtf8().constData(), 0);
		}
	}

}
Пример #21
0
PageOutput::PageOutput(MainWindow* main_window)
	: QWidget(main_window->centralWidget()) {

	m_main_window = main_window;

	m_old_container = (enum_container) 0;
	m_old_container_av = 0;

	// main codecs
	// (initializer lists should use explicit types for Clang)
	m_containers = {
		ContainerData({"Matroska (MKV)", "matroska", QStringList({"mkv"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("Matroska") + " (*.mkv)",
			{VIDEO_CODEC_H264, VIDEO_CODEC_VP8, VIDEO_CODEC_THEORA},
			{AUDIO_CODEC_VORBIS, AUDIO_CODEC_MP3, AUDIO_CODEC_AAC, AUDIO_CODEC_UNCOMPRESSED}}),
		ContainerData({"MP4", "mp4", QStringList({"mp4"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("MP4") + " (*.mp4)",
			{VIDEO_CODEC_H264},
			{AUDIO_CODEC_VORBIS, AUDIO_CODEC_MP3, AUDIO_CODEC_AAC}}),
		ContainerData({"WebM", "webm", QStringList({"webm"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("WebM") + " (*.webm)",
			{VIDEO_CODEC_VP8},
			{AUDIO_CODEC_VORBIS}}),
		ContainerData({"OGG", "ogg", QStringList({"ogg"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("OGG") + " (*.ogg)",
			{VIDEO_CODEC_THEORA},
			{AUDIO_CODEC_VORBIS}}),
		ContainerData({tr("Other..."), "other", QStringList(), "", std::set<enum_video_codec>({}), std::set<enum_audio_codec>({})}),
	};
	m_video_codecs = {
		{"H.264"       , "libx264"  },
		{"VP8"         , "libvpx"   },
		{"Theora"      , "libtheora"},
		{tr("Other..."), "other"    },
	};
	m_audio_codecs = {
		{"Vorbis"          , "libvorbis"   },
		{"MP3"             , "libmp3lame"  },
		{"AAC"             , "libvo_aacenc"},
		{tr("Uncompressed"), "pcm_s16le"   },
		{tr("Other...")    , "other"       },
	};

	// alternative aac codec
	if(!AVCodecIsInstalled(m_audio_codecs[AUDIO_CODEC_AAC].avname)) {
		m_audio_codecs[AUDIO_CODEC_AAC].avname = "aac";
	}

	// load AV container list
	m_containers_av.clear();
	for(AVOutputFormat *format = av_oformat_next(NULL); format != NULL; format = av_oformat_next(format)) {
		if(format->video_codec == AV_CODEC_ID_NONE)
			continue;
		ContainerData c;
		c.name = format->long_name;
		c.avname = format->name;
		c.suffixes = QString(format->extensions).split(',', QString::SkipEmptyParts);
		if(c.suffixes.isEmpty()) {
			c.filter = "";
		} else {
			c.filter = tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg(c.avname) + " (*." + c.suffixes[0];
			for(int i = 1; i < c.suffixes.size(); ++i) {
				c.suffixes[i] = c.suffixes[i].trimmed(); // needed because libav/ffmpeg isn't very consistent when they say 'comma-separated'
				c.filter += " *." + c.suffixes[i];
			}
			c.filter += ")";
		}
		m_containers_av.push_back(c);
	}
	std::sort(m_containers_av.begin(), m_containers_av.end());

	// load AV codec list
	m_video_codecs_av.clear();
	m_audio_codecs_av.clear();
	for(AVCodec *codec = av_codec_next(NULL); codec != NULL; codec = av_codec_next(codec)) {
		if(!av_codec_is_encoder(codec))
			continue;
		if(codec->type == AVMEDIA_TYPE_VIDEO && VideoEncoder::AVCodecIsSupported(codec->name)) {
			VideoCodecData c;
			c.name = codec->long_name;
			c.avname = codec->name;
			m_video_codecs_av.push_back(c);
		}
		if(codec->type == AVMEDIA_TYPE_AUDIO && AudioEncoder::AVCodecIsSupported(codec->name)) {
			AudioCodecData c;
			c.name = codec->long_name;
			c.avname = codec->name;
			m_audio_codecs_av.push_back(c);
		}
	}
	std::sort(m_video_codecs_av.begin(), m_video_codecs_av.end());
	std::sort(m_audio_codecs_av.begin(), m_audio_codecs_av.end());

	if(m_containers_av.empty()) {
		Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable container in libavformat!"));
		throw LibavException();
	}
	if(m_video_codecs_av.empty()) {
		Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable video codec in libavcodec!"));
		throw LibavException();
	}
	if(m_audio_codecs_av.empty()) {
		Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable audio codec in libavcodec!"));
		throw LibavException();
	}

	m_profile_box = new ProfileBox(this, "output-profiles", &LoadProfileSettingsCallback, &SaveProfileSettingsCallback, this);

	QGroupBox *groupbox_file = new QGroupBox(tr("File"), this);
	{
		QLabel *label_file = new QLabel(tr("Save as:"), groupbox_file);
		m_lineedit_file = new QLineEdit(groupbox_file);
		m_lineedit_file->setToolTip(tr("The recording will be saved to this location."));
		QPushButton *button_browse = new QPushButton(tr("Browse..."), groupbox_file);
		m_checkbox_separate_files = new QCheckBox(tr("Separate file per segment"), groupbox_file);
		m_checkbox_separate_files->setToolTip(tr("If checked, a separate video file will be created every time you pause and resume the recording.\n"
												 "If the original file name is 'test.mkv', the segments will be saved as 'test-YYYY-MM-DD_HH.MM.SS.mkv'."));
		QLabel *label_container = new QLabel(tr("Container:"), groupbox_file);
		m_combobox_container = new QComboBox(groupbox_file);
		for(unsigned int i = 0; i < CONTAINER_COUNT; ++i) {
			QString name = "\u200e" + m_containers[i].name + "\u200e";
			if(i != CONTAINER_OTHER && !AVFormatIsInstalled(m_containers[i].avname))
				name += " \u200e" + tr("(not installed)") + "\u200e";
			m_combobox_container->addItem(name);
		}
		m_combobox_container->setToolTip(tr("The container (file format) that will be used to save the recording.\n"
											"Note that not all codecs are supported by all containers, and that not all media players can read all file formats.\n"
											"- Matroska (MKV) supports all the codecs, but is less well-known.\n"
											"- MP4 is the most well-known format and will play on almost any modern media player, but supports only H.264 video\n"
											"   (and many media players only support AAC audio).\n"
											"- WebM is intended for embedding video into websites (with the HTML5 <video> tag). The format was created by Google.\n"
											"   WebM is supported by default in Firefox, Chrome and Opera, and plugins are available for Internet Explorer and Safari.\n"
											"   It supports only VP8 and Vorbis.\n"
											"- OGG supports only Theora and Vorbis."));
		m_label_container_av = new QLabel(tr("Container name:"), groupbox_file);
		m_combobox_container_av = new QComboBox(groupbox_file);
		for(unsigned int i = 0; i < m_containers_av.size(); ++i) {
			ContainerData &c = m_containers_av[i];
			m_combobox_container_av->addItem(c.avname);
		}
		m_combobox_container_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg format, but many of them are not useful or may not work."));

		connect(m_combobox_container, SIGNAL(activated(int)), this, SLOT(OnUpdateSuffixAndContainerFields()));
		connect(m_combobox_container_av, SIGNAL(activated(int)), this, SLOT(OnUpdateSuffixAndContainerFields()));
		connect(button_browse, SIGNAL(clicked()), this, SLOT(OnBrowse()));

		QGridLayout *layout = new QGridLayout(groupbox_file);
		layout->addWidget(label_file, 0, 0);
		layout->addWidget(m_lineedit_file, 0, 1);
		layout->addWidget(button_browse, 0, 2);
		layout->addWidget(m_checkbox_separate_files, 1, 0, 1, 3);
		layout->addWidget(label_container, 2, 0);
		layout->addWidget(m_combobox_container, 2, 1, 1, 2);
		layout->addWidget(m_label_container_av, 3, 0);
		layout->addWidget(m_combobox_container_av, 3, 1, 1, 2);
	}
	QGroupBox *groupbox_video = new QGroupBox(tr("Video"), this);
	{
		QLabel *label_video_codec = new QLabel(tr("Codec:"), groupbox_video);
		m_combobox_video_codec = new QComboBox(groupbox_video);
		for(unsigned int i = 0; i < VIDEO_CODEC_COUNT; ++i) {
			m_combobox_video_codec->addItem(m_video_codecs[i].name);
		}
		m_combobox_video_codec->setToolTip(tr("The codec that will be used to compress the video stream.\n"
											  "- H.264 (libx264) is by far the best codec - high quality and very fast.\n"
											  "- VP8 (libvpx) is quite good but also quite slow.\n"
											  "- Theora (libtheora) isn't really recommended because the quality isn't very good."));
		m_label_video_codec_av = new QLabel(tr("Codec name:"), groupbox_video);
		m_combobox_video_codec_av = new QComboBox(groupbox_video);
		for(unsigned int i = 0; i < m_video_codecs_av.size(); ++i) {
			VideoCodecData &c = m_video_codecs_av[i];
			m_combobox_video_codec_av->addItem(c.avname);
		}
		m_combobox_video_codec_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg video codec, but many of them are not useful or may not work."));
		m_label_video_kbit_rate = new QLabel(tr("Bit rate (in kbps):"), groupbox_video);
		m_lineedit_video_kbit_rate = new QLineEdit(groupbox_video);
		m_lineedit_video_kbit_rate->setToolTip(tr("The video bit rate (in kilobit per second). A higher value means a higher quality."
												  "\nIf you have no idea where to start, try 5000 and change it if needed."));
		m_label_h264_crf = new QLabel(tr("Constant rate factor:", "libx264 setting: don't translate this unless you can come up with something sensible"), groupbox_video);
		m_slider_h264_crf = new QSlider(Qt::Horizontal, groupbox_video);
		m_slider_h264_crf->setRange(0, 51);
		m_slider_h264_crf->setSingleStep(1);
		m_slider_h264_crf->setPageStep(5);
		m_slider_h264_crf->setToolTip(tr("This setting changes the video quality. A lower value means a higher quality.\n"
										 "The allowed range is 0-51 (0 means lossless, the default is 23)."));
		m_label_h264_crf_value = new QLabel(groupbox_video);
		m_label_h264_crf_value->setNum(m_slider_h264_crf->value());
		m_label_h264_crf_value->setAlignment(Qt::AlignRight | Qt::AlignVCenter);
		m_label_h264_crf_value->setMinimumWidth(QFontMetrics(m_label_h264_crf_value->font()).width("99") + 2);
		m_label_h264_preset = new QLabel(tr("Preset:", "libx264 setting: don't translate this unless you can come up with something sensible"), groupbox_video);
		m_combobox_h264_preset = new QComboBox(groupbox_video);
		for(unsigned int i = 0; i < H264_PRESET_COUNT; ++i) {
			m_combobox_h264_preset->addItem(EnumToString((enum_h264_preset) i));
		}
		m_combobox_h264_preset->setToolTip(tr("The encoding speed. A higher speed uses less CPU (making higher recording frame rates possible),\n"
											  "but results in larger files. The quality shouldn't be affected too much."));
		m_label_vp8_cpu_used = new QLabel(tr("CPU used:", "libvpx setting: don't translate this unless you can come up with something sensible"), groupbox_video);
		m_combobox_vp8_cpu_used = new QComboBox(groupbox_video);
		m_combobox_vp8_cpu_used->addItem("5 (" + tr("fastest") + ")");
		m_combobox_vp8_cpu_used->addItem("4");
		m_combobox_vp8_cpu_used->addItem("3");
		m_combobox_vp8_cpu_used->addItem("2");
		m_combobox_vp8_cpu_used->addItem("1");
		m_combobox_vp8_cpu_used->addItem("0 (" + tr("slowest") + ")");
		m_combobox_vp8_cpu_used->setToolTip(tr("The encoding speed. A higher value uses *less* CPU time. (I didn't choose the name, this is the name\n"
											   "used by the VP8 encoder). Higher values result in lower quality video, unless you increase the bit rate too."));
		m_label_video_options = new QLabel(tr("Custom options:"), groupbox_video);
		m_lineedit_video_options = new QLineEdit(groupbox_video);
		m_lineedit_video_options->setToolTip(tr("Custom codec options separated by commas (e.g. option1=value1,option2=value2,option3=value3)"));
		m_checkbox_video_allow_frame_skipping = new QCheckBox(tr("Allow frame skipping"), groupbox_video);
		m_checkbox_video_allow_frame_skipping->setToolTip(tr("If checked, the video encoder will be allowed to skip frames if the input frame rate is\n"
															 "lower than the output frame rate. If not checked, input frames will be duplicated to fill the holes.\n"
															 "This increases the file size and CPU usage, but reduces the latency for live streams in some cases.\n"
															 "It shouldn't affect the appearance of the video."));

		connect(m_combobox_video_codec, SIGNAL(activated(int)), this, SLOT(OnUpdateVideoCodecFields()));
		connect(m_slider_h264_crf, SIGNAL(valueChanged(int)), m_label_h264_crf_value, SLOT(setNum(int)));

		QGridLayout *layout = new QGridLayout(groupbox_video);
		layout->addWidget(label_video_codec, 0, 0);
		layout->addWidget(m_combobox_video_codec, 0, 1, 1, 2);
		layout->addWidget(m_label_video_codec_av, 1, 0);
		layout->addWidget(m_combobox_video_codec_av, 1, 1, 1, 2);
		layout->addWidget(m_label_video_kbit_rate, 2, 0);
		layout->addWidget(m_lineedit_video_kbit_rate, 2, 1, 1, 2);
		layout->addWidget(m_label_h264_crf, 3, 0);
		layout->addWidget(m_slider_h264_crf, 3, 1);
		layout->addWidget(m_label_h264_crf_value, 3, 2);
		layout->addWidget(m_label_h264_preset, 4, 0);
		layout->addWidget(m_combobox_h264_preset, 4, 1, 1, 2);
		layout->addWidget(m_label_vp8_cpu_used, 5, 0);
		layout->addWidget(m_combobox_vp8_cpu_used, 5, 1, 1, 2);
		layout->addWidget(m_label_video_options, 6, 0);
		layout->addWidget(m_lineedit_video_options, 6, 1, 1, 2);
		layout->addWidget(m_checkbox_video_allow_frame_skipping, 7, 0, 1, 3);
	}
	m_groupbox_audio = new QGroupBox(tr("Audio"));
	{
		QLabel *label_audio_codec = new QLabel(tr("Codec:"), m_groupbox_audio);
		m_combobox_audio_codec = new QComboBox(m_groupbox_audio);
		for(unsigned int i = 0; i < AUDIO_CODEC_COUNT; ++i) {
			m_combobox_audio_codec->addItem(m_audio_codecs[i].name);
		}
		m_combobox_audio_codec->setToolTip(tr("The codec that will be used to compress the audio stream. You shouldn't worry too much about\n"
											  "this, because the size of the audio data is usually negligible compared to the size of the video data.\n"
											  "And if you're only recording your own voice (i.e. no music), the quality won't matter that much anyway.\n"
											  "- Vorbis (libvorbis) is great, this is the recommended codec.\n"
											  "- MP3 (libmp3lame) is reasonably good.\n"
											  "- AAC is a good codec, but the implementations used here (libvo_aacenc or the experimental ffmpeg aac encoder)\n"
											  "   are pretty bad. Only use it if you have no other choice.\n"
											  "- Uncompressed will simply store the sound data without compressing it. The file will be quite large, but it's very fast."));
		m_label_audio_codec_av = new QLabel(tr("Codec name:"), m_groupbox_audio);
		m_combobox_audio_codec_av = new QComboBox(m_groupbox_audio);
		for(unsigned int i = 0; i < m_audio_codecs_av.size(); ++i) {
			AudioCodecData &c = m_audio_codecs_av[i];
			m_combobox_audio_codec_av->addItem(c.avname);
		}
		m_combobox_audio_codec_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg audio codec, but many of them are not useful or may not work."));
		m_label_audio_kbit_rate = new QLabel(tr("Bit rate (in kbps):"), m_groupbox_audio);
		m_lineedit_audio_kbit_rate = new QLineEdit(m_groupbox_audio);
		m_lineedit_audio_kbit_rate->setToolTip(tr("The audio bit rate (in kilobit per second). A higher value means a higher quality. The typical value is 128."));
		m_label_audio_options = new QLabel(tr("Custom options:"), m_groupbox_audio);
		m_lineedit_audio_options = new QLineEdit(m_groupbox_audio);
		m_lineedit_audio_options->setToolTip(tr("Custom codec options separated by commas (e.g. option1=value1,option2=value2,option3=value3)"));

		connect(m_combobox_audio_codec, SIGNAL(activated(int)), this, SLOT(OnUpdateAudioCodecFields()));

		QGridLayout *layout = new QGridLayout(m_groupbox_audio);
		layout->addWidget(label_audio_codec, 0, 0);
		layout->addWidget(m_combobox_audio_codec, 0, 1);
		layout->addWidget(m_label_audio_codec_av, 1, 0);
		layout->addWidget(m_combobox_audio_codec_av, 1, 1);
		layout->addWidget(m_label_audio_kbit_rate, 2, 0);
		layout->addWidget(m_lineedit_audio_kbit_rate, 2, 1);
		layout->addWidget(m_label_audio_options, 3, 0);
		layout->addWidget(m_lineedit_audio_options, 3, 1);
	}
	QPushButton *button_back = new QPushButton(g_icon_go_previous, tr("Back"), this);
	QPushButton *button_continue = new QPushButton(g_icon_go_next, tr("Continue"), this);

	connect(button_back, SIGNAL(clicked()), m_main_window, SLOT(GoPageInput()));
	connect(button_continue, SIGNAL(clicked()), this, SLOT(OnContinue()));

	QVBoxLayout *layout = new QVBoxLayout(this);
	layout->addWidget(m_profile_box);
	layout->addWidget(groupbox_file);
	layout->addWidget(groupbox_video);
	layout->addWidget(m_groupbox_audio);
	layout->addStretch();
	{
		QHBoxLayout *layout2 = new QHBoxLayout();
		layout->addLayout(layout2);
		layout2->addWidget(button_back);
		layout2->addWidget(button_continue);
	}

	OnUpdateContainerFields();
	OnUpdateVideoCodecFields();
	OnUpdateAudioCodecFields();

}
Пример #22
0
void BenchmarkScale(unsigned int in_w, unsigned int in_h, unsigned int out_w, unsigned int out_h) {

	std::mt19937 rng(12345);
#if SSR_USE_X86_ASM
	bool use_ssse3 = (CPUFeatures::HasMMX() && CPUFeatures::HasSSE() && CPUFeatures::HasSSE2() && CPUFeatures::HasSSE3() && CPUFeatures::HasSSSE3());
#endif

	// the queue needs to use enough memory to make sure that the CPU cache is flushed
	unsigned int pixels = std::max(in_w * in_h, out_w * out_h);
	unsigned int queue_size = 1 + 20000000 / pixels;
	unsigned int run_size = queue_size * 20;

	// create queue
	std::vector<std::unique_ptr<ImageGeneric> > queue_in(queue_size);
	std::vector<std::unique_ptr<ImageGeneric> > queue_out(queue_size);
	for(unsigned int i = 0; i < queue_size; ++i) {
		queue_in[i] = NewImageBGRA(in_w, in_h, rng);
		queue_out[i] = NewImageBGRA(out_w, out_h, rng);
	}

	// run test
	unsigned int time_swscale = 0, time_fallback = 0, time_ssse3 = 0;
	{
		SwsContext *sws = sws_getCachedContext(NULL,
											   in_w, in_h, AV_PIX_FMT_BGRA,
											   out_w, out_h, AV_PIX_FMT_BGRA,
											   SWS_BILINEAR, NULL, NULL, NULL);
		if(sws == NULL) {
			Logger::LogError("[BenchmarkScale] " + Logger::tr("Error: Can't get swscale context!", "Don't translate 'swscale'"));
			throw LibavException();
		}
		sws_setColorspaceDetails(sws,
								 sws_getCoefficients(SWS_CS_ITU709), 0,
								 sws_getCoefficients(SWS_CS_DEFAULT), 0,
								 0, 1 << 16, 1 << 16);
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size / 2; ++i) {
			unsigned int ii = i % queue_size;
			sws_scale(sws, queue_in[ii]->m_data.data(), queue_in[ii]->m_stride.data(), 0, in_h, queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data());
		}
		int64_t t2 = hrt_time_micro();
		time_swscale = (t2 - t1) / (run_size / 2);
	}
	{
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size; ++i) {
			unsigned int ii = i % queue_size;
			Scale_BGRA_Fallback(in_w, in_h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0],
								out_w, out_h, queue_out[ii]->m_data[0], queue_out[ii]->m_stride[0]);
		}
		int64_t t2 = hrt_time_micro();
		time_fallback = (t2 - t1) / run_size;
	}
#if SSR_USE_X86_ASM
	if(use_ssse3) {
		int64_t t1 = hrt_time_micro();
		for(unsigned int i = 0; i < run_size; ++i) {
			unsigned int ii = i % queue_size;
			Scale_BGRA_SSSE3(in_w, in_h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0],
							 out_w, out_h, queue_out[ii]->m_data[0], queue_out[ii]->m_stride[0]);
		}
		int64_t t2 = hrt_time_micro();
		time_ssse3 = (t2 - t1) / run_size;
	}
#endif

	// print result
	QString in_size = QString("%1x%2").arg(in_w).arg(in_h);
	QString out_size = QString("%1x%2").arg(out_w).arg(out_h);
	Logger::LogInfo("[BenchmarkScale] " + Logger::tr("BGRA %1 to BGRA %2  |  SWScale %3 us  |  Fallback %4 us (%5%)  |  SSSE3 %6 us (%7%)")
					.arg(in_size, 9).arg(out_size, 9)
					.arg(time_swscale, 6)
					.arg(time_fallback, 6).arg(100 * time_fallback / time_swscale, 3)
					.arg(time_ssse3, 6).arg(100 * time_ssse3 / time_fallback, 3));

}
Пример #23
0
bool VideoEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_WIDTH_HEIGHT
		assert(frame->GetFrame()->width == GetCodecContext()->width);
		assert(frame->GetFrame()->height == GetCodecContext()->height);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->pix_fmt);
#endif
#if SSR_USE_AVFRAME_SAR
		assert(frame->GetFrame()->sample_aspect_ratio.num == GetCodecContext()->sample_aspect_ratio.num);
		assert(frame->GetFrame()->sample_aspect_ratio.den == GetCodecContext()->sample_aspect_ratio.den);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Sending of video frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of video packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), (frame == NULL)? NULL : frame->GetFrame());
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}