Пример #1
0
// in a loop, capture frame from camera and detect faces
// send eyes coordinates to change view
void QOpenCV::FaceRecognitionThread::run()
{
	mCancel = false;
	cv::Mat image;
	OpenCV::FaceRecognizer*	mFaceRecognizer = new OpenCV::FaceRecognizer();

	if ( mCapVideo == NULL ) {
		qDebug() << "[FaceRecognitionThread::run()]  Camera is not set";
		return;
	}

	while ( !mCancel ) {
		// get image from camera
		image = mCapVideo->queryFrame();

		cv::cvtColor( image, image, CV_BGR2RGB );

		// we detect faces on grayscale image
		mFaceRecognizer->detectFaces( mCapVideo->getGrayframe() );
		mFaceRecognizer->annotateFaces( image );

		cv::flip( image, image, 1 );

		// show image
		if ( mSendImgEnabled && !image.empty() ) {
			if ( image.data ) {
				emit pushImage( image.clone() );    // ???
			}
		}
		if ( mSendBackgrImgEnabled && !image.empty() ) {
			emit pushBackgrImage( image.clone() );
		}

		// when face was detected along with movement (implemented with threshold)
		// send eyes coordinate to change view
		if ( mFaceRecognizer->detected ) { //&& mFaceRecognizer->isMovement
			emit sendEyesCoords( static_cast<float>( -mFaceRecognizer->getEyesCoords().x ),
								 static_cast<float>( -mFaceRecognizer->getEyesCoords().y ),
								 -mFaceRecognizer->getHeadDistance( mCapVideo->getWidth() ) );
			// Kostan:
			//  focalLength - used my webcam focal length
			//  0.1481m - face width avg (15yr - 40yr)
			float focalLength = 749.3f;
			float distance = mFaceRecognizer->getHeadDistanceFocal( 0.1481 );
			float x = ( float ) -mFaceRecognizer->getEyesCoords().x * ( mCapVideo->getWidth()/200 ) * ( distance / focalLength );
			float y = ( float ) -mFaceRecognizer->getEyesCoords().y * ( mCapVideo->getHeight()/200 ) * ( distance / focalLength );
			emit sendEyesRealCoords( x, y, distance );
		}
		msleep( 80 );
	}
	mCapVideo->release();
	mCapVideo = NULL;
	delete mFaceRecognizer;
}
Пример #2
0
// in a loop, capture frame from camera and detect faces
// send eyes coordinates to change view
void QOpenCV::FaceRecognitionThread::run()
{
	mCancel = false;
	cv::Mat image;
	OpenCV::FaceRecognizer*	mFaceRecognizer = new OpenCV::FaceRecognizer();

	if ( mCapVideo == NULL ) {
		qDebug() << "[FaceRecognitionThread::run()]  Camera is not set";
		return;
	}

	while ( !mCancel ) {
		// get image from camera
		image = mCapVideo->queryFrame();

		cv::cvtColor( image, image, CV_BGR2RGB );

		// we detect faces on grayscale image
		mFaceRecognizer->detectFaces( mCapVideo->getGrayframe() );
		mFaceRecognizer->annotateFaces( image );

		cv::flip( image, image, 1 );

		// show image
		if ( mSendImgEnabled && !image.empty() ) {
			if ( image.data ) {
				emit pushImage( image.clone() );    // ???
			}
		}
		if ( mSendBackgrImgEnabled && !image.empty() ) {
			emit pushBackgrImage( image.clone() );
		}

		// when face was detected along with movement (implemented with threshold)
		// send eyes coordinate to change view
		if ( mFaceRecognizer->detected ) { //&& mFaceRecognizer->isMovement
			emit sendEyesCoords( static_cast<float>( -mFaceRecognizer->getEyesCoords().x ),
								 static_cast<float>( -mFaceRecognizer->getEyesCoords().y ),
								 -mFaceRecognizer->getHeadDistance( mCapVideo->getWidth() ) );
		}
		msleep( 80 );
	}
	mCapVideo->release();
	mCapVideo = NULL;
	delete mFaceRecognizer;
}
Пример #3
0
void OpenCV::OpenCVCore::createConnectionFaceRec()
{
	// send actual image
	QObject::connect( mOpencvWindow,
					  SIGNAL( sendImgFaceRec( bool ) ),
					  mThrFaceRec,
					  SLOT( setSendImgEnabled( bool ) ) );
	QObject::connect( mThrFaceRec,
					  SIGNAL( pushImage( cv::Mat ) ),
					  mOpencvWindow,
					  SLOT( setLabel( cv::Mat ) ) );
	// send actual image to background
	QObject::connect( mOpencvWindow,
					  SIGNAL( sendBackgrImgFaceRec( bool ) ),
					  mThrFaceRec,
					  SLOT( setSendBackgrImgEnabled( bool ) ) );



	// start, stop
	QObject::connect( mOpencvWindow,
					  SIGNAL( startFaceRec() ),
					  mThrFaceRec,
					  SLOT( start() ) );
	QObject::connect( mOpencvWindow,
					  SIGNAL( stopFaceRec( bool ) ),
					  mThrFaceRec,
					  SLOT( setCancel( bool ) ) );
	QObject::connect( mThrFaceRec,
					  SIGNAL( finished() ),
					  mOpencvWindow,
					  SLOT( onFaceRecThrFinished() ) );
	QObject::connect( mOpencvWindow,
					  SIGNAL( setCapVideoFaceRec( OpenCV::CapVideo* ) ),
					  mThrFaceRec,
					  SLOT( setCapVideo( OpenCV::CapVideo* ) ) );




}
Пример #4
0
void Kinect::KinectThread::run()
{
	// flag for timer
	bool wasTimerReset = true;
	mCancel=false;

	//real word convector
	/*	openni::CoordinateConverter coordinateConverter;
		// convert milimeters to pixels
		float pDepth_x;
		float pDepth_y;
		float pDepth_z;
		float pDepth_x2;
		float pDepth_y2;
		float pDepth_z2;
	    /////////end////////////*/
	Kinect::KinectZoom* zoom = new Kinect::KinectZoom();
	cv::Mat frame;
	cv::Mat depth;

	//if set true, it will capture first frame of kinect stream and save color frame, depth frame and depth matrix in to specific location
	bool test = false;
	// check if is close
	while ( !mCancel ) {
		//check if is sending image enabling
		if ( mSetImageEnable ) {
			// read frame data
			color.readFrame( &colorFrame );
			//convert for sending
			frame=mKinect->colorImageCvMat( colorFrame );
			cv::cvtColor( frame, frame, CV_BGR2RGB );
			m_depth.readFrame( &depthFrame );

			//if set true, it will capture the first frame of kinect stream and save color frame, depth frame and depth matrix in to specific location
			if ( captureImage ) {
				depth = mKinect->depthImageCvMat( depthFrame );

				std::string file = Util::ApplicationConfig::get()->getValue( "Kinect.OutputFolder" ).toStdString();


				//save color frame
				cv::imwrite( file + "\\" + Util::ApplicationConfig::get()->getValue( "Kinect.ColourImageName" ).toStdString()  + ".jpeg", frame );

				//save depth matrix
				std::ofstream fout( file + "\\" + Util::ApplicationConfig::get()->getValue( "Kinect.DepthInfoName" ).toStdString() + ".txt" );
				if ( !fout ) {
					qDebug() <<"File Not Opened";
				}

				for ( int i=0; i<depth.rows; i++ ) {
					for ( int j=0; j < depth.cols; j++ ) {
						fout << depth.at<uint16_t>( i,j )<<"\t";
					}
					fout << "\n";
				}

				cv::normalize( depth, depth, 0,255, CV_MINMAX, CV_8UC1 );
				//save depth frame
				cv::imwrite( file + "\\" + Util::ApplicationConfig::get()->getValue( "Kinect.DepthImageName" ).toStdString() + ".jpg", depth );

				fout.close();
				captureImage =  false;
			}

#ifdef NITE2_FOUND
			//set parameters for changes movement and cursor
			kht->setCursorMovement( isCursorEnable );
			kht->setSpeedMovement( mSpeed );
			// cita handframe, najde gesto na snimke a vytvori mu "profil"
			kht->getAllGestures();
			kht->getAllHands();
#endif
			//////////////End/////////////

			//	cap >> frame; // get a new frame from camera
			cv::cvtColor( frame, frame, CV_BGR2RGB );

#ifdef NITE2_FOUND
			/*			if ( kht->isTwoHands == true ) { //TODO must be two hands for green square mark hand in frame
							// convert hand coordinate
							coordinateConverter.convertWorldToDepth( m_depth, kht->getArrayHands[0][0], kht->getArrayHands[0][1], kht->handZ[0], &pDepth_x, &pDepth_y, &pDepth_z );
							coordinateConverter.convertWorldToDepth( m_depth, kht->getArrayHands[1][0], kht->getArrayHands[1][1], kht->handZ[1], &pDepth_x2, &pDepth_y2, &pDepth_z2 );

							pDepth_y = kht->handTrackerFrame.getDepthFrame().getHeight() - pDepth_y;
							pDepth_y2 = kht->handTrackerFrame.getDepthFrame().getHeight() - pDepth_y2;

							printf( "depth X, Y, Z: %f %f %f\n",pDepth_x,pDepth_y,pDepth_z );

							// green square for hand
							cv::Rect hand_rect;

							if ( pDepth_x < pDepth_x2 ) {
								hand_rect.x = pDepth_x;
							}
							else {
								hand_rect.x = pDepth_x2;
							}
							if ( pDepth_y < pDepth_y2 ) {
								hand_rect.y = pDepth_y;
							}
							else {
								hand_rect.y = pDepth_y2;
							}

							hand_rect.width = abs( pDepth_x - pDepth_x2 );
							hand_rect.height = abs( pDepth_y - pDepth_y2 ); //kht->handY[1] - kht->handY[0];

							rectangle( frame, hand_rect, CV_RGB( 0, 255,0 ), 3 );
						}
			            else {*/
			// hand found
			if ( ( kht->numHandsTracking != 0 )&&( kht->numHandsTracking < 3 ) ) {
				int numFingers[2];

				// calculate num fingers for every tracked hand
				for ( int i = 0; i < kht->numHandsTracking; i++ ) {
					bool mainHand = false;
					if ( i==0 ) {
						mainHand = true;
					}
					// calculate depth frame
					zoom->calcHandDepthFrame( frame,&m_depth,kht->getArrayHands[i][0], kht->getArrayHands[i][1], kht->handZ[i], mainHand );
					// calculate num of fingers
					numFingers[i] = zoom->DetectContour();
				}

				// cursor disabled => move graph
				if ( !isCursorEnable ) {
					//sliding - calculate main hand movement
					kht->getRotatingMove();

					// two hand gestures
					if ( kht->numHandsTracking == 2 ) {
						// if off hand is open -> move graph by main hand
						if ( numFingers[1] != 0 ) {
							// open main hand -> move by X/Y axis
							if ( numFingers[0] != 0 ) {
								kht->moveGraphByHand( );
							}
							// closed main hand -> move by Z axis
							else {
								kht->moveGraphByHandToDepth( zoom->previousZ - zoom->currentZ );
							}
						}
					}
					// one hand gestures
					else {
						// if hand not closed - rotate
						if ( numFingers[0] != 0 ) {
							line( frame, cv::Point2i( 30, 30 ), cv::Point2i( 30, 30 ), cv::Scalar( 0, 0, 0 ), 5 ,8 );
							if ( ( int )kht->slidingHand_x != 0 ) {
								putText( frame, kht->slidingHand_type, cvPoint( ( int )kht->slidingHand_x,( int )kht->slidingHand_y ), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar( 0,0,250 ), 1, CV_AA );
								emit sendSliderCoords( ( kht->slidingHand_x/kht->handTrackerFrame.getDepthFrame().getWidth()-0.5 )*( -200 ),
													   ( kht->slidingHand_y/kht->handTrackerFrame.getDepthFrame().getHeight()-0.5 )*( 200 ),
													   ( kht->slidingHand_z/kht->handTrackerFrame.getDepthFrame().getHeight()-0.5 )*200 );
							}
						}
						// if hand is closed and zomm enabled - compute zoom
						else if ( isZoomEnable ) {
							zoom->zoom();
						}
					}
				}
				// cursor enabled => move cursor
				else {
					// start highlighting neighbour nodes
					//nav->setSelectionMode( 2 );
					nav->navigate();

					// detect click gesture
					if ( wasTimerReset ) {
						// if main hand closed and timer inactive, start timer
						if ( numFingers[0] == 0 ) {
							emit signalClickTimerStart();
							// to prevent restart in cycle
							wasTimerReset = false;
						}
					}
					// if timer is not ready
					else if ( !wasTimerReset ) {
						// if main hand open
						if ( numFingers[0] > 3 ) {
							// if timer is active, stop timer and do gesture
							if ( clickTimer->isActive() ) {
								wasTimerReset = true;
								emit signalClickTimerStop();
							}
							// gesture has ended, restart timer to enable next gesture
							else {
								wasTimerReset = true;
							}
						}
					}

					// else do basic mouse gestures
					else {

					}
				}
			}
			//}
#endif
			// resize, send a msleep for next frame
			cv::resize( frame, frame,cv::Size( 320,240 ),0,0,cv::INTER_LINEAR );
			emit pushImage( frame );
			msleep( 20 );
		}
	}
}
Пример #5
0
QImage* Morphology::doClosing(const StructElement& se)
{
    QImage* image = closingHelper(*bufferImage[bufferCurrentIndex], se);
    pushImage(image);
    return image;
}
Пример #6
0
QImage* Morphology::doErosion(const StructElement& se)
{
    QImage* image = erosionHelper(*bufferImage[bufferCurrentIndex], se);
    pushImage(image);
    return image;
}
Пример #7
0
void OpenCV::OpenCVCore::createConnectionKinect()
{

	//for video sending
	QObject::connect( mOpencvWindow,
					  SIGNAL( sendImageKinect( bool ) ),
					  mThrKinect,
					  SLOT( setImageSend( bool ) ) );

	//send image to label
	QObject::connect( mThrKinect,
					  SIGNAL( pushImage( cv::Mat ) ),
					  mOpencvWindow,
					  SLOT( setLabel( cv::Mat ) ) );

	//start
	QObject::connect( mOpencvWindow,
					  SIGNAL( startKinect() ),
					  mThrKinect,
					  SLOT( start() ) );

	//stop
	QObject::connect( mOpencvWindow,
					  SIGNAL( stopKinect( bool ) ),
					  mThrKinect,
					  SLOT( setCancel( bool ) ) );

	// moving camera with gesture
	QObject::connect( mThrKinect,
					  SIGNAL( sendSliderCoords( float,float,float ) ),
					  AppCore::Core::getInstance( NULL )->getCoreWindow()->getCameraManipulator(),
					  SLOT( setRotationHeadKinect( float,float,float ) ) );

	//enable/disable cursor movement
	QObject::connect( mOpencvWindow,
					  SIGNAL( setMovementCursor( bool ) ),
					  mThrKinect,
					  SLOT( setCursorMovement( bool ) ) );

	//enable/disable zoom
	QObject::connect( mOpencvWindow,
					  SIGNAL( setZoom( bool ) ),
					  mThrKinect,
					  SLOT( setZoomUpdate( bool ) ) );

	//edit for speed movement
	QObject::connect( mOpencvWindow,
					  SIGNAL( sendSpeedKinect( double ) ),
					  mThrKinect,
					  SLOT( setSpeedKinect( double ) ) );

	//edit for speed movement
	QObject::connect( mOpencvWindow,
					  SIGNAL( inicializeKinect() ),
					  mThrKinect,
					  SLOT( inicializeKinect() ) );

	//edit for speed movement
	QObject::connect( mOpencvWindow,
					  SIGNAL( closeActionOpenni() ),
					  mThrKinect,
					  SLOT( closeActionOpenni() ) );

	QObject::connect( mOpencvWindow,
					  SIGNAL( setKinectMarkerDetection( bool ) ),
					  mThrKinect,
					  SLOT( setImageSendToMarkerDetection( bool ) ) );

	//enable/disable sending picture to Marker Detection
	QObject::connect( mThrKinect,
					  SIGNAL( pushImageToMarkerDetection( cv::Mat ) ),
					  mThrAruco,
					  SLOT( detectMarkerFromImage( cv::Mat ) ) );

	//send augmented Image created in Kinect
	QObject::connect( mThrAruco,
					  SIGNAL( pushImageFromKinect( cv::Mat ) ),
					  mOpencvWindow,
					  SLOT( setLabel( cv::Mat ) ) );
	QObject::connect( mOpencvWindow,
					  SIGNAL( setKinectCaptureImage( bool ) ),
					  mThrKinect,
					  SLOT( setCaptureImage( bool ) ) );
}