Exemplo n.º 1
0
void QVideoDecoder::read()
{
	emit ready(false);

	AVPacket packet;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, &packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet.stream_index == m_video_stream)
			{
				//global_video_pkt_pts = packet.pts;

				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet.data, packet.size);

				// Did we get a video frame?
				if(frame_finished)
				{
					av_free_packet(&packet);
				}
			}
			else if(packet.stream_index == m_audio_stream)
			{
				//decode audio packet, store in queue
				av_free_packet(&packet);
			}
			else
			{
				av_free_packet(&packet);
			}
		}
		else
		{
			emit reachedEnd();
			this->restart();
		}
	}
}
Exemplo n.º 2
0
QVideo::QVideo(QObject *parent) : QObject(parent), 
	m_frame_rate(0.0f), 
	m_looped(true),
	m_max_buffer_size(25000000),
	m_ready_to_play(false),
	m_advance_mode(QVideo::RealTime),
	m_play_timer(0),
	m_total_runtime(0),
	m_frameTimer(0),
	m_video_loaded(false),
	m_status(Paused),
	m_videoSpeedMultiplier(1)
{
	QMutexLocker locker(&qvideo_mutex);
	av_register_all();

	//m_video_encoder = new QVideoEncoder(this, this);
	
	m_video_decoder = new QVideoDecoder(this, this);
	m_video_decoder->start();
	//connect(this, SIGNAL(startDecode()), m_video_decoder, SLOT(decode()));
	connect(m_video_decoder, SIGNAL(reachedEnd()), this, SLOT(determineVideoEndAction()));
        connect(m_video_decoder, SIGNAL(ready(bool)), this, SLOT(setReady(bool)));
        

        connect(&m_nextImageTimer, SIGNAL(timeout()), this, SLOT(displayFrame()));
        m_nextImageTimer.setSingleShot(true);


	setAdvanceMode(QVideo::RealTime);

// 	m_screen = new QLabel();

        // just a random default value
	m_expectedDelay = 999;
	m_last_frame_shown = 999;
        m_frame_counter = 0;
}
Exemplo n.º 3
0
void QVideoDecoder::readFrame()
{
	emit ready(false);

	AVPacket packet;
	double pts;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, &packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet.stream_index == m_video_stream)
			{
				//global_video_pkt_pts = packet.pts;

				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet.data, packet.size);

				if(packet.dts == AV_NOPTS_VALUE && m_av_frame->opaque && *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet.dts != AV_NOPTS_VALUE)
				{
					pts = packet.dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						m_sws_context = sws_getContext(m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt, m_video_codec_context->width, m_video_codec_context->height,
							PIX_FMT_RGB32, SWS_PRINT_INFO, NULL, NULL, NULL);
						//printf("readFrame(): created m_sws_context\n");
					}
					printf("readFrame(): got frame\n");

					sws_scale(m_sws_context, m_av_frame->data, m_av_frame->linesize, 0,
						m_video_codec_context->height, m_av_rgb_frame->data, m_av_rgb_frame->linesize);

					size_t num_bytes = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

					QImage * frame = new QImage(m_video_codec_context->width, m_video_codec_context->height, QImage::Format_RGB32);

					memcpy(frame->bits(), m_av_rgb_frame->data[0], num_bytes);

					av_free_packet(&packet);

					QFFMpegVideoFrame video_frame;
					video_frame.frame = frame;
					video_frame.pts = pts;
					video_frame.previous_pts = m_previous_pts;

					emit newFrame(video_frame);

					m_previous_pts = pts;
				}
			}
			else if(packet.stream_index == m_audio_stream)
			{
				//decode audio packet, store in queue
				av_free_packet(&packet);
			}
			else
			{
				av_free_packet(&packet);
			}
		}
		else
		{
			emit reachedEnd();
		}
	}
}
Exemplo n.º 4
0
void QVideoDecoder::decode()
{
	if(m_video->m_status == QVideo::NotRunning)
		return;

	emit ready(false);

	AVPacket pkt1, *packet = &pkt1;
	double pts;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet->stream_index == m_video_stream)
			{
				global_video_pkt_pts = packet->pts;

// 				mutex.lock();
				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet->data, packet->size);
// 				mutex.unlock();

				if(packet->dts == AV_NOPTS_VALUE &&
					      m_av_frame->opaque &&
				  *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet->dts != AV_NOPTS_VALUE)
				{
					pts = packet->dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
// 					size_t num_native_bytes = m_av_frame->linesize[0]     * m_video_codec_context->height;
// 					size_t num_rgb_bytes    = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						mutex.lock();
						m_sws_context = sws_getContext(
							m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt,
							m_video_codec_context->width, m_video_codec_context->height,
							//PIX_FMT_RGB32,SWS_BICUBIC,
							RAW_PIX_FMT, SWS_FAST_BILINEAR,
							NULL, NULL, NULL); //SWS_PRINT_INFO
						mutex.unlock();
						//printf("decode(): created m_sws_context\n");
					}
					//printf("decode(): got frame\n");

// 					mutex.lock();
					sws_scale(m_sws_context,
						  m_av_frame->data,
						  m_av_frame->linesize, 0,
						  m_video_codec_context->height,
						  m_av_rgb_frame->data,
						  m_av_rgb_frame->linesize);
// 					mutex.unlock();

// 					size_t num_bytes = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

// 					if(m_frame)
// 						delete m_frame;

					m_frame = QImage(m_av_rgb_frame->data[0],
								m_video_codec_context->width,
								m_video_codec_context->height,
								QImage::Format_RGB16);

					av_free_packet(packet);

					// This block from the synchronize_video(VideoState *is, AVFrame *src_frame, double pts) : double
					// function given at: http://www.dranger.com/ffmpeg/tutorial05.html
					{
						// update the frame pts
						double frame_delay;

						if(pts != 0)
						{
							/* if we have pts, set video clock to it */
							m_video_clock = pts;
						} else {
							/* if we aren't given a pts, set it to the clock */
							pts = m_video_clock;
						}
						/* update the video clock */
						frame_delay = av_q2d(m_timebase);
						/* if we are repeating a frame, adjust clock accordingly */
						frame_delay += m_av_frame->repeat_pict * (frame_delay * 0.5);
						m_video_clock += frame_delay;
						//qDebug() << "Frame Dealy: "<<frame_delay;
					}


					QFFMpegVideoFrame video_frame;
					video_frame.frame = &m_frame;
					video_frame.pts = pts;
					video_frame.previous_pts = m_previous_pts;

					m_current_frame = video_frame;

					emit newFrame(video_frame);

					m_previous_pts = pts;

                                        //QTimer::singleShot(5, this, SLOT(decode()));
				}
			}
			else if(packet->stream_index == m_audio_stream)
			{
// 				mutex.lock();
				//decode audio packet, store in queue
				av_free_packet(packet);
// 				mutex.unlock();

			}
			else
			{
// 				mutex.lock();
				av_free_packet(packet);
// 				mutex.unlock();

			}
		}
		else
		{
			emit reachedEnd();
		}
	}
}
Exemplo n.º 5
0
void TargetManagement::findTargets ()
{
	int ylow;
	if (discovered -> size () == 0)
	{
		ylow = initialTarget ();

		for (unsigned int j = 0; j < objects -> size (); j++)
		{
			discovered -> push_back (0);
		}
	}
	else
	{
		ylow = furtherTarget ();
	}
	int corner = 0;
	if (ylow != -1)
	{
		targets -> push_back (ylow);
		discovered -> at (ylow) = 2;

		// c1-c2-c3-c4 Atvirksciai
		int count = 0;
		int currentTarget = targets -> back ();
		lastCorners.push(vector<int>());
		lastCorners.top().push_back(currentTarget);
		lastCorners.top().push_back (0);
		lastCorners.top().push_back (0);
		Point *currentPoint = corners -> at (info->at(currentTarget)) -> at (corner);
		lastPoints.push(currentPoint);
		while (count < 4)
		{
			//cout << discovered -> at (ylow) << endl;
			vector<Point*> *recCorners = corners -> at (info->at(currentTarget));
			list<int> *g = &(graph -> graph -> at (currentTarget));
			vector<Point*> *points = &(graph -> connections -> at (currentTarget));
			if (corner == 0) // Goes to 3
			{
				Point *nextCorner = recCorners -> at (3);
				currentPoint = findNewCorner (recCorners, g, points, currentPoint, nextCorner);

				if (foundNewTarget(currentTarget))
				{
					currentTarget = lastCorners.top()[0];
					corner = setCorner (currentPoint);
					count = 0;
					lastCorners.top().push_back (corner);
					lastCorners.top().push_back(count);
				}
				else if (reachedEnd (currentPoint, count))
				{
					corner = 3;
					count++;
					lastCorners.top()[1] = corner;
					lastCorners.top()[2] = count;
					lastPoints.top() = currentPoint;
				}
				else
				{
					corner = -1;
				}
			}
			else if (corner == 1) // Goes to 0
			{
				Point *nextCorner = recCorners -> at (0);
				currentPoint = findNewCorner (recCorners, g, points, currentPoint, nextCorner);


				if (foundNewTarget(currentTarget))
				{
					currentTarget = lastCorners.top()[0];
					corner = setCorner (currentPoint);
					count = 0;
					lastCorners.top().push_back (corner);
					lastCorners.top().push_back(count);
				}
				else if (reachedEnd (currentPoint, count))
				{
					corner = 0;
					count++;
					lastCorners.top()[1] = corner;
					lastCorners.top()[2] = count;
					lastPoints.top() = currentPoint;
				}
				else
				{
					corner = -1;
				}
			}
			else if (corner == 2) // Goes to 1
			{
				Point *nextCorner = recCorners -> at (1);
				currentPoint = findNewCorner (recCorners, g, points, currentPoint, nextCorner);


				if (foundNewTarget(currentTarget))
				{
					currentTarget = lastCorners.top()[0];
					corner = setCorner (currentPoint);
					count = 0;
					lastCorners.top().push_back (corner);
					lastCorners.top().push_back(count);
				}
				else if (reachedEnd (currentPoint, count))
				{
					corner = 1;
					count++;
					lastCorners.top()[1] = corner;
					lastCorners.top()[2] = count;
					lastPoints.top() = currentPoint;
				}
				else
				{
					corner = -1;
				}
			}
			else if (corner == 3)				   // Goes to 2
			{
				Point *nextCorner = recCorners -> at (2);
				currentPoint = findNewCorner (recCorners, g, points, currentPoint, nextCorner);

				if (foundNewTarget(currentTarget))
				{
					currentTarget = lastCorners.top()[0];
					corner = setCorner (currentPoint);
					count = 0;
					lastCorners.top().push_back (corner);
					lastCorners.top().push_back(count);
				}
				else if (reachedEnd (currentPoint, count))
				{
					corner = 2;
					count++;
					lastCorners.top()[1] = corner;
					lastCorners.top()[2] = count;
					lastPoints.top() = currentPoint;
				}
				else
				{
					corner = -1;
				}
			}
			else
			{
				lastPoints.pop ();
				Point *nextCorner = lastPoints.top ();
				lastPoints.push (currentPoint);
				currentPoint = findNewCorner (recCorners, g, points, currentPoint, nextCorner);

				if (foundNewTarget(currentTarget))
				{
					currentTarget = lastCorners.top()[0];
					corner = setCorner (currentPoint);
					count = 0;
					lastCorners.top().push_back (corner);
					lastCorners.top().push_back(count);
				}
				else
				{
					count++;
					lastCorners.top()[2] = count;
					lastPoints.top() = currentPoint;
				}
			}

			if (count == 4 && traceConnection ())
			{
				// Roll back searching.
				if (rollBack ())
				{
					currentTarget = lastCorners.top()[0];
					corner = lastCorners.top()[1];
					count = lastCorners.top()[2];
					currentPoint = lastPoints.top();
					//lastPoints.pop ();
				}
			}
		}
		clearStack ();
		findTargets ();
	}
}