Exemplo n.º 1
0
void VideoPreviewer::ReadVideoFrame(unsigned int width, unsigned int height, const uint8_t* data, int stride, AVPixelFormat format, int64_t timestamp) {
	Q_UNUSED(timestamp);

	QSize image_size;
	{
		SharedLock lock(&m_shared_data);

		// check the timestamp
		if(lock->m_next_frame_time == SINK_TIMESTAMP_ASAP) {
			lock->m_next_frame_time = timestamp + 1000000 / lock->m_frame_rate;
		} else {
			if(timestamp < lock->m_next_frame_time - 1000000 / lock->m_frame_rate)
				return;
			lock->m_next_frame_time = std::max(lock->m_next_frame_time + 1000000 / lock->m_frame_rate, timestamp);
		}

		// don't do anything if the preview window is invisible
		if(!lock->m_is_visible)
			return;

		// check the size (the scaler can't handle sizes below 2)
		if(width < 2 || height < 2 || lock->m_widget_size.width() < 2 || lock->m_widget_size.height() < 2)
			return;

		// calculate the scaled size
		lock->m_source_size = QSize(width, height);
		image_size = CalculateScaledSize(lock->m_source_size, lock->m_widget_size);

	}

	// allocate the image
	int image_stride = grow_align16(image_size.width() * 4);
	std::shared_ptr<TempBuffer<uint8_t> > image_buffer = std::make_shared<TempBuffer<uint8_t> >();
	image_buffer->Alloc(image_stride * image_size.height());
	uint8_t *image_data = image_buffer->GetData();

	// scale the image
	m_fast_scaler.Scale(width, height, format, &data, &stride,
						image_size.width(), image_size.height(), AV_PIX_FMT_BGRA, &image_data, &image_stride);

	// set the alpha channel to 0xff (just to be sure)
	// Some applications (e.g. firefox) generate alpha values that are not 0xff.
	// I'm not sure whether Qt cares about this, apparently Qt 4.8 with the 'native' back-end doesn't,
	// but I'm not sure about the other back-ends.
	/*for(int y = 0; y < image_size.height(); ++y) {
		uint8_t *row = image_data + image_stride * y;
		for(int x = 0; x < image_size.width(); ++x) {
			row[x * 4 + 3] = 0xff; // third byte is alpha because we're little-endian
		}
	}*/

	// store the image
	SharedLock lock(&m_shared_data);
	lock->m_image_buffer = std::move(image_buffer); image_buffer.reset();
	lock->m_image_stride = image_stride;
	lock->m_image_size = image_size;

	emit NeedsUpdate();

}
Exemplo n.º 2
0
	ImageGeneric(const std::vector<unsigned int>& stride, const std::vector<unsigned int>& rows, std::mt19937& rng) {

		// count planes
		assert(stride.size() == rows.size());
		unsigned int planes = stride.size();
		m_data.resize(planes);
		m_stride.resize(planes);

		// calculate stride and total size
		size_t totalsize = 0;
		for(unsigned int p = 0; p < planes; ++p) {
			m_stride[p] = grow_align16(stride[p]);
			totalsize += m_stride[p] * rows[p];
		}

		// allocate buffer
		m_buffer.Alloc(totalsize);
		for(unsigned int i = 0; i < totalsize; ++i) {
			m_buffer[i] = rng();
		}

		// set data
		uint8_t *data = m_buffer.GetData();
		for(unsigned int p = 0; p < planes; ++p) {
			m_data[p] = data;
			data += m_stride[p] * rows[p];
		}

	}
Exemplo n.º 3
0
static std::unique_ptr<AVFrameWrapper> CreateVideoFrameYUV(unsigned int width, unsigned int height, std::shared_ptr<AVFrameData>* reuse_data = NULL) {
	// allocate a YUV frame, with proper alignment
	// Y = 1 byte per pixel, U or V = 1 byte per 2x2 pixels
	int l1 = grow_align16(width);
	int l2 = grow_align16(width / 2);
	int s1 = grow_align16(l1 * height);
	int s2 = grow_align16(l2 * height / 2);
	std::shared_ptr<AVFrameData> frame_data = (reuse_data == NULL)? std::make_shared<AVFrameData>(s1 + s2 * 2) : *reuse_data;
	std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
	frame->GetFrame()->data[0] = frame->GetRawData();
	frame->GetFrame()->data[1] = frame->GetRawData() + s1;
	frame->GetFrame()->data[2] = frame->GetRawData() + s1 + s2;
	frame->GetFrame()->linesize[0] = l1;
	frame->GetFrame()->linesize[1] = l2;
	frame->GetFrame()->linesize[2] = l2;
#if SSR_USE_AVFRAME_FORMAT
	frame->GetFrame()->format = PIX_FMT_YUV420P;
#endif
	return frame;
}
Exemplo n.º 4
0
// note: sample_size = sizeof(sampletype) * channels
static std::unique_ptr<AVFrameWrapper> CreateAudioFrame(unsigned int planes, unsigned int samples, unsigned int sample_size, AVSampleFormat sample_format) {
	size_t plane_size = grow_align16(samples * sample_size / planes);
	std::shared_ptr<AVFrameData> frame_data = std::make_shared<AVFrameData>(plane_size * planes);
	std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
	for(unsigned int i = 0; i < planes; ++i) {
		frame->GetFrame()->data[i] = frame->GetRawData() + plane_size * i;
		frame->GetFrame()->linesize[i] = samples * sample_size / planes;
	}
#if SSR_USE_AVFRAME_NB_SAMPLES
	frame->GetFrame()->nb_samples = samples;
#endif
#if SSR_USE_AVFRAME_FORMAT
	frame->GetFrame()->format = sample_format;
#endif
	return frame;
}
void Scale_BGRA_Generic(unsigned int in_w, unsigned int in_h, const uint8_t* in_data, int in_stride,
						unsigned int out_w, unsigned int out_h, uint8_t* out_data, int out_stride,
						MipMapFunction mipmap_function, BilinearFunction bilinear_function) {

	// no scaling?
	if(in_w == out_w && in_h == out_h) {
		if(in_stride == out_stride) {
			memcpy(out_data, in_data, in_stride * in_h);
		} else {
			for(unsigned int out_j = 0; out_j < out_h; ++out_j) {
				memcpy(out_data, in_data, in_w * 4);
				in_data += in_stride;
				out_data += out_stride;
			}
		}
		return;
	}

	// calculate mipmap factors
	unsigned int mx = 0, my = 0;
	while((out_w << (mx + 1)) <= in_w) ++mx;
	while((out_h << (my + 1)) <= in_h) ++my;
	if(mx + my > 8) {
		if(mx <= 4)
			my = 8 - mx;
		else if(my <= 4)
			mx = 8 - my;
		else
			mx = my = 4;
	}

	// pure mipmap scaling?
	if((out_w << mx) == in_w && (out_h << my) == in_h) {
		mipmap_function(in_w, in_h, in_data, in_stride, out_data, out_stride, mx, my);
		return;
	}

	// create mipmap
	TempBuffer<uint8_t> mipmap;
	if(mx != 0 || my != 0) {
		unsigned int mipmap_w = ((in_w - 1) >> mx) + 1, mipmap_h = ((in_h - 1) >> my) + 1;
		int mipmap_stride = grow_align16(mipmap_w * 4);
		mipmap.Alloc(mipmap_stride * mipmap_h);
		mipmap_function(in_w, in_h, in_data, in_stride, mipmap.GetData(), mipmap_stride, mx, my);
		in_data = mipmap.GetData();
		in_stride = mipmap_stride;
	}
Exemplo n.º 6
0
static std::unique_ptr<AVFrameWrapper> CreateAudioFrame(unsigned int channels, unsigned int sample_rate, unsigned int samples, unsigned int planes, AVSampleFormat sample_format) {

	// get required sample size
	// note: sample_size = sizeof(sampletype) * channels
	unsigned int sample_size = 0; // to keep GCC happy
	switch(sample_format) {
		case AV_SAMPLE_FMT_S16:
#if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
		case AV_SAMPLE_FMT_S16P:
#endif
			sample_size = channels * sizeof(int16_t); break;
		case AV_SAMPLE_FMT_FLT:
#if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
		case AV_SAMPLE_FMT_FLTP:
#endif
			sample_size = channels * sizeof(float); break;
		default: assert(false); break;
	}

	// create the frame
	size_t plane_size = grow_align16(samples * sample_size / planes);
	std::shared_ptr<AVFrameData> frame_data = std::make_shared<AVFrameData>(plane_size * planes);
	std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
	for(unsigned int p = 0; p < planes; ++p) {
		frame->GetFrame()->data[p] = frame->GetRawData() + plane_size * p;
		frame->GetFrame()->linesize[p] = samples * sample_size / planes;
	}
#if SSR_USE_AVFRAME_NB_SAMPLES
	frame->GetFrame()->nb_samples = samples;
#endif
#if SSR_USE_AVFRAME_CHANNELS
	frame->GetFrame()->channels = channels;
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
	frame->GetFrame()->sample_rate = sample_rate;
#endif
#if SSR_USE_AVFRAME_FORMAT
	frame->GetFrame()->format = sample_format;
#endif

	return frame;

}
Exemplo n.º 7
0
static std::unique_ptr<AVFrameWrapper> CreateVideoFrame(unsigned int width, unsigned int height, AVPixelFormat pixel_format, const std::shared_ptr<AVFrameData>& reuse_data) {

	// get required planes
	unsigned int planes = 0;
	size_t linesize[3] = {0}, planesize[3] = {0};
	switch(pixel_format) {
		case AV_PIX_FMT_YUV444P: {
			// Y/U/V = 1 byte per pixel
			planes = 3;
			linesize[0]  = grow_align16(width); planesize[0] = linesize[0] * height;
			linesize[1]  = grow_align16(width); planesize[1] = linesize[1] * height;
			linesize[2]  = grow_align16(width); planesize[2] = linesize[2] * height;
			break;
		}
		case AV_PIX_FMT_YUV422P: {
			// Y = 1 byte per pixel, U/V = 1 byte per 2x1 pixels
			assert(width % 2 == 0);
			planes = 3;
			linesize[0]  = grow_align16(width    ); planesize[0] = linesize[0] * height;
			linesize[1]  = grow_align16(width / 2); planesize[1] = linesize[1] * height;
			linesize[2]  = grow_align16(width / 2); planesize[2] = linesize[2] * height;
			break;
		}
		case AV_PIX_FMT_YUV420P: {
			// Y = 1 byte per pixel, U/V = 1 byte per 2x2 pixels
			assert(width % 2 == 0);
			assert(height % 2 == 0);
			planes = 3;
			linesize[0]  = grow_align16(width    ); planesize[0] = linesize[0] * height    ;
			linesize[1]  = grow_align16(width / 2); planesize[1] = linesize[1] * height / 2;
			linesize[2]  = grow_align16(width / 2); planesize[2] = linesize[2] * height / 2;
			break;
		}
		case AV_PIX_FMT_NV12: {
			assert(width % 2 == 0);
			assert(height % 2 == 0);
			// planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved
			// Y = 1 byte per pixel, U/V = 1 byte per 2x2 pixels
			planes = 2;
			linesize[0]  = grow_align16(width); planesize[0] = linesize[0] * height    ;
			linesize[1]  = grow_align16(width); planesize[1] = linesize[1] * height / 2;
			break;
		}
		case AV_PIX_FMT_BGRA: {
			// BGRA = 4 bytes per pixel
			planes = 1;
			linesize[0] = grow_align16(width * 4); planesize[0] = linesize[0] * height;
			break;
		}
		case AV_PIX_FMT_BGR24: {
			// BGR = 3 bytes per pixel
			planes = 1;
			linesize[0] = grow_align16(width * 3); planesize[0] = linesize[0] * height;
			break;
		}
		default: assert(false); break;
	}

	// create the frame
	size_t totalsize = 0;
	for(unsigned int p = 0; p < planes; ++p) {
		totalsize += planesize[p];
	}
	std::shared_ptr<AVFrameData> frame_data = (reuse_data == NULL)? std::make_shared<AVFrameData>(totalsize) : reuse_data;
	std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
	uint8_t *data = frame->GetRawData();
	for(unsigned int p = 0; p < planes; ++p) {
		frame->GetFrame()->data[p] = data;
		frame->GetFrame()->linesize[p] = linesize[p];
		data += planesize[p];
	}
#if SSR_USE_AVFRAME_WIDTH_HEIGHT
	frame->GetFrame()->width = width;
	frame->GetFrame()->height = height;
#endif
#if SSR_USE_AVFRAME_FORMAT
	frame->GetFrame()->format = pixel_format;
#endif
#if SSR_USE_AVFRAME_SAR
	frame->GetFrame()->sample_aspect_ratio.num = 1;
	frame->GetFrame()->sample_aspect_ratio.den = 1;
#endif

	return frame;

}