Exemple #1
0
void  OpenCV::OpenCVCore::createPermanentConnection()
{
	// they are still created

	//  sending result data from face detection
	QObject::connect( mThrFaceRec,
					  SIGNAL( sendEyesCoords( float,float,float ) ),
					  AppCore::Core::getInstance( mApp )->getCoreWindow()->getCameraManipulator(),
					  SLOT( setRotationHeadFaceDet( float,float,float ) ) );
	QOSG::CoreWindow* coreWindow = AppCore::Core::getInstance( mApp )->getCoreWindow();
	QOSG::ProjectiveARCore* projectiveARCore = QOSG::ProjectiveARCore::getInstance( mApp, coreWindow );
	//projectiveARCore->init();
	QObject::connect( mThrFaceRec,
					  SIGNAL( sendEyesRealCoords( float,float,float ) ),
					  projectiveARCore->getViewer(),
					  SLOT( setViewerPosByFaceDetection( float,float,float ) ) );

	//  sending result data from aruco - M.Garaj(TP) first ArUco try
	/*QObject::connect( mThrAruco,
					  SIGNAL( sendArucoPosVec( osg::Vec3d ) ),
					  AppCore::Core::getInstance( mApp )->getCoreWindow()->getCameraManipulator(),
					  SLOT( updateArucoGraphPosition( osg::Vec3d ) ) );
	QObject::connect( mThrAruco,
					  SIGNAL( sendArucoRorQuat( osg::Quat ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph(),
					  SLOT( updateGraphRotByAruco( osg::Quat ) ) );
	QObject::connect( mThrAruco,
					  SIGNAL( sendArucoPosAndQuat( osg::Quat,osg::Vec3d ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph(),
					  SLOT( updateGraphPosAndRotByAruco( osg::Quat, osg::Vec3d ) ) );*/

	//jurik
	//sending matrices via Qt
	QObject::connect( mThrAruco,
					  SIGNAL( sendProjectionMatrix( QMatrix4x4 ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph(),
					  SLOT( recievedPMatrix( QMatrix4x4 ) ) );
	QObject::connect( mThrAruco,
					  SIGNAL( sendModelViewMatrix( QMatrix4x4 ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph(),
					  SLOT( recievedMVMatrix( QMatrix4x4 ) ) );

	// updating background image
	QObject::connect( mThrFaceRec,
					  SIGNAL( pushBackgrImage( cv::Mat ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph()->getCameraStream(),
					  SLOT( updateBackgroundImage( cv::Mat ) ) );
	QObject::connect( mThrAruco,
					  SIGNAL( pushBackgrImage( cv::Mat ) ),
					  AppCore::Core::getInstance( mApp )->getCoreGraph()->getCameraStream(),
					  SLOT( updateBackgroundImage( cv::Mat ) ) );

	QObject::connect( mThrAruco,
					  SIGNAL( moveMouseArucoSignal( double,double,bool,Qt::MouseButton ) ),
					  AppCore::Core::getInstance()->getCoreWindow(),
					  SLOT( moveMouseAruco( double,double,bool,Qt::MouseButton ) ) );

}
// in a loop, capture frame from camera and detect faces
// send eyes coordinates to change view
void QOpenCV::FaceRecognitionThread::run()
{
	mCancel = false;
	cv::Mat image;
	OpenCV::FaceRecognizer*	mFaceRecognizer = new OpenCV::FaceRecognizer();

	if ( mCapVideo == NULL ) {
		qDebug() << "[FaceRecognitionThread::run()]  Camera is not set";
		return;
	}

	while ( !mCancel ) {
		// get image from camera
		image = mCapVideo->queryFrame();

		cv::cvtColor( image, image, CV_BGR2RGB );

		// we detect faces on grayscale image
		mFaceRecognizer->detectFaces( mCapVideo->getGrayframe() );
		mFaceRecognizer->annotateFaces( image );

		cv::flip( image, image, 1 );

		// show image
		if ( mSendImgEnabled && !image.empty() ) {
			if ( image.data ) {
				emit pushImage( image.clone() );    // ???
			}
		}
		if ( mSendBackgrImgEnabled && !image.empty() ) {
			emit pushBackgrImage( image.clone() );
		}

		// when face was detected along with movement (implemented with threshold)
		// send eyes coordinate to change view
		if ( mFaceRecognizer->detected ) { //&& mFaceRecognizer->isMovement
			emit sendEyesCoords( static_cast<float>( -mFaceRecognizer->getEyesCoords().x ),
								 static_cast<float>( -mFaceRecognizer->getEyesCoords().y ),
								 -mFaceRecognizer->getHeadDistance( mCapVideo->getWidth() ) );
			// Kostan:
			//  focalLength - used my webcam focal length
			//  0.1481m - face width avg (15yr - 40yr)
			float focalLength = 749.3f;
			float distance = mFaceRecognizer->getHeadDistanceFocal( 0.1481 );
			float x = ( float ) -mFaceRecognizer->getEyesCoords().x * ( mCapVideo->getWidth()/200 ) * ( distance / focalLength );
			float y = ( float ) -mFaceRecognizer->getEyesCoords().y * ( mCapVideo->getHeight()/200 ) * ( distance / focalLength );
			emit sendEyesRealCoords( x, y, distance );
		}
		msleep( 80 );
	}
	mCapVideo->release();
	mCapVideo = NULL;
	delete mFaceRecognizer;
}
void ArucoThread::imagesSending( ArucoCore& aCore, const cv::Mat frame ) const
{


	if ( mSendBackgrImgEnabled && !frame.empty() ) {
		if ( ! mMarkerIsBehind ) {
			cv::flip( frame, frame, 1 );
		}
		cv::cvtColor( frame, frame,CV_BGR2RGB );

		emit pushBackgrImage( frame.clone() );
	}

	cv::Mat image;
	if ( mMultiMarkerEnabled ) {
		image = aCore.getDetectedRectangleImage();
	}
	else {
		image = aCore.getDetImage();
	}

	if ( mSendImgEnabled ) {
		if ( ! mMarkerIsBehind ) {
			cv::flip( image, image, 1 );
		}
		cv::cvtColor( image, image, CV_BGR2RGB );


		if ( mSendBackgrImgEnabled ) {
			//if you comment this, background image will be without the augmented reality
			emit pushBackgrImage( image.clone() );
		}

		emit pushImagemMat( image.clone() );

	}
}
// in a loop, capture frame from camera and detect faces
// send eyes coordinates to change view
void QOpenCV::FaceRecognitionThread::run()
{
	mCancel = false;
	cv::Mat image;
	OpenCV::FaceRecognizer*	mFaceRecognizer = new OpenCV::FaceRecognizer();

	if ( mCapVideo == NULL ) {
		qDebug() << "[FaceRecognitionThread::run()]  Camera is not set";
		return;
	}

	while ( !mCancel ) {
		// get image from camera
		image = mCapVideo->queryFrame();

		cv::cvtColor( image, image, CV_BGR2RGB );

		// we detect faces on grayscale image
		mFaceRecognizer->detectFaces( mCapVideo->getGrayframe() );
		mFaceRecognizer->annotateFaces( image );

		cv::flip( image, image, 1 );

		// show image
		if ( mSendImgEnabled && !image.empty() ) {
			if ( image.data ) {
				emit pushImage( image.clone() );    // ???
			}
		}
		if ( mSendBackgrImgEnabled && !image.empty() ) {
			emit pushBackgrImage( image.clone() );
		}

		// when face was detected along with movement (implemented with threshold)
		// send eyes coordinate to change view
		if ( mFaceRecognizer->detected ) { //&& mFaceRecognizer->isMovement
			emit sendEyesCoords( static_cast<float>( -mFaceRecognizer->getEyesCoords().x ),
								 static_cast<float>( -mFaceRecognizer->getEyesCoords().y ),
								 -mFaceRecognizer->getHeadDistance( mCapVideo->getWidth() ) );
		}
		msleep( 80 );
	}
	mCapVideo->release();
	mCapVideo = NULL;
	delete mFaceRecognizer;
}