コード例 #1
0
// Resample audio and map channels (if needed)
void FrameMapper::ResampleMappedAudio(tr1::shared_ptr<Frame> frame, long int original_frame_number)
{
	// Init audio buffers / variables
	int total_frame_samples = 0;
	int channels_in_frame = frame->GetAudioChannelsCount();
	int sample_rate_in_frame = frame->SampleRate();
	int samples_in_frame = frame->GetAudioSamplesCount();
	ChannelLayout channel_layout_in_frame = frame->ChannelsLayout();

	AppendDebugMethod("FrameMapper::ResampleMappedAudio", "frame->number", frame->number, "original_frame_number", original_frame_number, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "sample_rate_in_frame", sample_rate_in_frame, "", -1);

	// Get audio sample array
	float* frame_samples_float = NULL;
	// Get samples interleaved together (c1 c2 c1 c2 c1 c2)
	frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame);

	// Calculate total samples
	total_frame_samples = samples_in_frame * channels_in_frame;

	// Create a new array (to hold all S16 audio samples for the current queued frames)
	int16_t* frame_samples = new int16_t[total_frame_samples];

	// Translate audio sample values back to 16 bit integers
	for (int s = 0; s < total_frame_samples; s++)
		// Translate sample value and copy into buffer
		frame_samples[s] = int(frame_samples_float[s] * (1 << 15));


	// Deallocate float array
	delete[] frame_samples_float;
	frame_samples_float = NULL;

	AppendDebugMethod("FrameMapper::ResampleMappedAudio (got sample data from frame)", "frame->number", frame->number, "total_frame_samples", total_frame_samples, "target channels", info.channels, "channels_in_frame", channels_in_frame, "target sample_rate", info.sample_rate, "samples_in_frame", samples_in_frame);


	// Create input frame (and allocate arrays)
	AVFrame *audio_frame = AV_ALLOCATE_FRAME();
	AV_RESET_FRAME(audio_frame);
	audio_frame->nb_samples = total_frame_samples / channels_in_frame;

	int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) frame_samples,
			audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame, 1);

	if (error_code < 0)
	{
		AppendDebugMethod("FrameMapper::ResampleMappedAudio ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
		throw ErrorEncodingVideo("Error while resampling audio in frame mapper", frame->number);
	}

	// Update total samples & input frame size (due to bigger or smaller data types)
	total_frame_samples = Frame::GetSamplesPerFrame(frame->number, target, info.sample_rate, info.channels);

	AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number);

	// Create output frame (and allocate arrays)
	AVFrame *audio_converted = AV_ALLOCATE_FRAME();
	AV_RESET_FRAME(audio_converted);
	audio_converted->nb_samples = total_frame_samples;
	av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, total_frame_samples, AV_SAMPLE_FMT_S16, 0);

	AppendDebugMethod("FrameMapper::ResampleMappedAudio (preparing for resample)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", AV_SAMPLE_FMT_S16, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels);

	int nb_samples = 0;
	// Force the audio resampling to happen in order (1st thread to last thread), so the waveform
	// is smooth and continuous.
	#pragma omp ordered
	{
		// setup resample context
		if (!avr) {
			avr = avresample_alloc_context();
			av_opt_set_int(avr,  "in_channel_layout", channel_layout_in_frame, 0);
			av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
			av_opt_set_int(avr,  "in_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
			av_opt_set_int(avr, "out_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
			av_opt_set_int(avr,  "in_sample_rate",    sample_rate_in_frame,    0);
			av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
			av_opt_set_int(avr,  "in_channels",       channels_in_frame,    0);
			av_opt_set_int(avr, "out_channels",       info.channels,    0);
			avresample_open(avr);
		}

		// Convert audio samples
		nb_samples = avresample_convert(avr, 	// audio resample context
				audio_converted->data, 			// output data pointers
				audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
				audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
				audio_frame->data,				// input data pointers
				audio_frame->linesize[0],		// input plane size, in bytes (0 if unknown)
				audio_frame->nb_samples);		// number of input samples to convert
	}

	// Create a new array (to hold all resampled S16 audio samples)
	int16_t* resampled_samples = new int16_t[(nb_samples * info.channels)];

	// Copy audio samples over original samples
	memcpy(resampled_samples, audio_converted->data[0], (nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels));

	// Free frames
	free(audio_frame->data[0]); // TODO: Determine why av_free crashes on Windows
	AV_FREE_FRAME(&audio_frame);
	av_free(audio_converted->data[0]);
	AV_FREE_FRAME(&audio_converted);
	frame_samples = NULL;

	// Resize the frame to hold the right # of channels and samples
	int channel_buffer_size = nb_samples;
	frame->ResizeAudio(info.channels, channel_buffer_size, info.sample_rate, info.channel_layout);

	AppendDebugMethod("FrameMapper::ResampleMappedAudio (Audio successfully resampled)", "nb_samples", nb_samples, "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "channels_in_frame", channels_in_frame, "info.channels", info.channels, "info.channel_layout", info.channel_layout);

	// Array of floats (to hold samples for each channel)
	float *channel_buffer = new float[channel_buffer_size];

	// Divide audio into channels. Loop through each channel
	for (int channel_filter = 0; channel_filter < info.channels; channel_filter++)
	{
		// Init array
		for (int z = 0; z < channel_buffer_size; z++)
			channel_buffer[z] = 0.0f;

		// Loop through all samples and add them to our Frame based on channel.
		// Toggle through each channel number, since channel data is stored like (left right left right)
		int channel = 0;
		int position = 0;
		for (int sample = 0; sample < (nb_samples * info.channels); sample++)
		{
			// Only add samples for current channel
			if (channel_filter == channel)
			{
				// Add sample (convert from (-32768 to 32768)  to (-1.0 to 1.0))
				channel_buffer[position] = resampled_samples[sample] * (1.0f / (1 << 15));

				// Increment audio position
				position++;
			}

			// increment channel (if needed)
			if ((channel + 1) < info.channels)
				// move to next channel
				channel ++;
			else
				// reset channel
				channel = 0;
		}

		// Add samples to frame for this channel
		frame->AddAudio(true, channel_filter, 0, channel_buffer, position, 1.0f);

		AppendDebugMethod("FrameMapper::ResampleMappedAudio (Add audio to channel)", "number of samples", position, "channel_filter", channel_filter, "", -1, "", -1, "", -1, "", -1);
	}

	// Update frame's audio meta data
	frame->SampleRate(info.sample_rate);
	frame->ChannelsLayout(info.channel_layout);

	// clear channel buffer
	delete[] channel_buffer;
	channel_buffer = NULL;

	// Delete arrays
	delete[] resampled_samples;
	resampled_samples = NULL;
}
コード例 #2
0
ファイル: Timeline.cpp プロジェクト: nwgat/libopenshot
// Process a new layer of video or audio
void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
{
	// Get the clip's frame & image
	tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);

	// No frame found... so bail
	if (!source_frame)
		return;

	// Debug output
	AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);

	/* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
	if (source_clip->Waveform())
	{
		// Debug output
		AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);

		// Get the color of the waveform
		int red = source_clip->wave_color.red.GetInt(clip_frame_number);
		int green = source_clip->wave_color.green.GetInt(clip_frame_number);
		int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
		int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);

		// Generate Waveform Dynamically (the size of the timeline)
		tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
		source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
	}

	/* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
	 * effects on the top clip. */
	if (is_top_clip)
		source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());

	// Declare an image to hold the source frame's image
	tr1::shared_ptr<QImage> source_image;

	/* COPY AUDIO - with correct volume */
	if (source_clip->Reader()->info.has_audio) {

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);

		if (source_frame->GetAudioChannelsCount() == info.channels)
			for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
			{
				float initial_volume = 1.0f;
				float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
				float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)

				// If no ramp needed, set initial volume = clip's volume
				if (isEqual(previous_volume, volume))
					initial_volume = volume;

				// Apply ramp to source frame (if needed)
				if (!isEqual(previous_volume, volume))
					source_frame->ApplyGainRamp(channel, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);

				// TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
				// Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
				// number of samples returned is variable... and does not match the number expected.
				// This is a crude solution at best. =)
				if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
					// Force timeline frame to match the source frame
					new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);

				// Copy audio samples (and set initial volume).  Mix samples with existing audio samples.  The gains are added together, to
				// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
				new_frame->AddAudio(false, channel, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);

			}
		else
			// Debug output
			AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);

	}

	// Skip out if only an audio frame
	if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
		// Skip the rest of the image processing for performance reasons
		return;

	// Debug output
	AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);

	// Get actual frame image data
	source_image = source_frame->GetImage();

	// Get some basic image properties
	int source_width = source_image->width();
	int source_height = source_image->height();

	/* ALPHA & OPACITY */
	if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
	{
		float alpha = source_clip->alpha.GetValue(clip_frame_number);

		// Get source image's pixels
		unsigned char *pixels = (unsigned char *) source_image->bits();

		// Loop through pixels
		for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
		{
			// Get the alpha values from the pixel
			int A = pixels[byte_index + 3];

			// Apply alpha to pixel
			pixels[byte_index + 3] *= alpha;
		}

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
	}

	/* RESIZE SOURCE IMAGE - based on scale type */
	switch (source_clip->scale)
	{
	case (SCALE_FIT):
		// keep aspect ratio
		source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
		source_width = source_image->width();
		source_height = source_image->height();

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
		break;

	case (SCALE_STRETCH):
		// ignore aspect ratio
		source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
		source_width = source_image->width();
		source_height = source_image->height();

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
		break;

	case (SCALE_CROP):
		QSize width_size(info.width, round(info.width / (float(source_width) / float(source_height))));
		QSize height_size(round(info.height / (float(source_height) / float(source_width))), info.height);

		// respect aspect ratio
		if (width_size.width() >= info.width && width_size.height() >= info.height)
			source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
		else
			source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it
		source_width = source_image->width();
		source_height = source_image->height();

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
		break;
	}

	/* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
	float x = 0.0; // left
	float y = 0.0; // top

	// Adjust size for scale x and scale y
	float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
	float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
	float scaled_source_width = source_width * sx;
	float scaled_source_height = source_height * sy;

	switch (source_clip->gravity)
	{
	case (GRAVITY_TOP):
		x = (info.width - scaled_source_width) / 2.0; // center
		break;
	case (GRAVITY_TOP_RIGHT):
		x = info.width - scaled_source_width; // right
		break;
	case (GRAVITY_LEFT):
		y = (info.height - scaled_source_height) / 2.0; // center
		break;
	case (GRAVITY_CENTER):
		x = (info.width - scaled_source_width) / 2.0; // center
		y = (info.height - scaled_source_height) / 2.0; // center
		break;
	case (GRAVITY_RIGHT):
		x = info.width - scaled_source_width; // right
		y = (info.height - scaled_source_height) / 2.0; // center
		break;
	case (GRAVITY_BOTTOM_LEFT):
		y = (info.height - scaled_source_height); // bottom
		break;
	case (GRAVITY_BOTTOM):
		x = (info.width - scaled_source_width) / 2.0; // center
		y = (info.height - scaled_source_height); // bottom
		break;
	case (GRAVITY_BOTTOM_RIGHT):
		x = info.width - scaled_source_width; // right
		y = (info.height - scaled_source_height); // bottom
		break;
	}

	// Debug output
	AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "source_width", source_width, "info.height", info.height, "source_height", source_height);

	/* LOCATION, ROTATION, AND SCALE */
	float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
	x += (info.width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
	y += (info.height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
	bool is_x_animated = source_clip->location_x.Points.size() > 1;
	bool is_y_animated = source_clip->location_y.Points.size() > 1;

	int offset_x = -1;
	int offset_y = -1;
	bool transformed = false;
	QTransform transform;
	if ((!isEqual(x, 0) || !isEqual(y, 0)) && (isEqual(r, 0) && isEqual(sx, 1) && isEqual(sy, 1) && !is_x_animated && !is_y_animated))
	{
		// SIMPLE OFFSET
		AppendDebugMethod("Timeline::add_layer (Transform: SIMPLE)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);

		// If only X and Y are different, and no animation is being used (just set the offset for speed)
		transformed = true;

		// Set QTransform
		transform.translate(x, y);

	} else if (!isEqual(r, 0) || !isEqual(x, 0) || !isEqual(y, 0) || !isEqual(sx, 1) || !isEqual(sy, 1))
	{
		// COMPLEX DISTORTION
		AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);

		// Use the QTransform object, which can be very CPU intensive
		transformed = true;

		// Set QTransform
		if (!isEqual(r, 0)) {
			// ROTATE CLIP
			float origin_x = x + (source_width / 2.0);
			float origin_y = y + (source_height / 2.0);
			transform.translate(origin_x, origin_y);
			transform.rotate(r);
			transform.translate(-origin_x,-origin_y);
		}

		// Set QTransform
		if (!isEqual(x, 0) || !isEqual(y, 0)) {
			// TRANSLATE/MOVE CLIP
			transform.translate(x, y);
		}

		if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
			// TRANSLATE/MOVE CLIP
			transform.scale(sx, sy);
		}

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Completed ScaleRotateTranslateDistortion)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
	}

	// Debug output
	AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);

	/* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
	tr1::shared_ptr<QImage> new_image = new_frame->GetImage();

	// Load timeline's new frame image into a QPainter
	QPainter painter(new_image.get());
	painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);

	// Apply transform (translate, rotate, scale)... if any
	if (transformed)
		painter.setTransform(transform);

	// Composite a new layer onto the image
    painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
    painter.drawImage(0, 0, *source_image);
    painter.end();

	// Debug output
	AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
}