void VerticalRayGraphicsItem::drawArrowhead( QPainter* painter, double targetY, bool isShootingUp ) { if ( this->scene( ) == 0 || this->scene( )->views( ).size( ) == 0 ) { return; } QGraphicsView* view = this->scene( )->views( ).first( ); QPointF arrowTip( this->m_source.x( ), targetY ); QPoint pt = view->mapFromScene( arrowTip ); if ( ! isShootingUp && this->m_isInfinite ) { if (view->horizontalScrollBar( ) && view->horizontalScrollBar( )->isVisible( ) ) { // std::cout << view->horizontalScrollBar( )->height( ) << std::endl; pt.setY( pt.y( ) - view->horizontalScrollBar( )->height( ) - 5 ); arrowTip = view->mapToScene( pt ); } else { // std::cout << "no scroll bar " << std::endl; pt.setY( pt.y( ) - 5 ); arrowTip = view->mapToScene( pt ); } } int dy = -1; if ( isShootingUp ) { dy = 1; } QPoint leftPt( pt.x( ) - 3, pt.y( ) + 3*dy ); QPoint rightPt( pt.x( ) + 3, pt.y( ) + 3*dy ); QPointF left = view->mapToScene( leftPt ); QPointF right = view->mapToScene( rightPt ); QLineF leftEdge( left, arrowTip ); QLineF rightEdge( arrowTip, right ); painter->drawLine( leftEdge ); painter->drawLine( rightEdge ); }
/** * @function detectEyes * - Uses OpenCV to detect face * - Interpolate eyes position on image * - Computes eyes position in space * - Add some display for the detection */ cv::Mat detectEyes(cv::Mat image) { // INIT std::vector<cv::Rect> faces; cv::Mat image_gray; cv::cvtColor( image, image_gray, CV_BGR2GRAY ); cv::equalizeHist( image_gray, image_gray ); // DETECT FACE //-- Find bigger face (opencv documentation) face_cascade.detectMultiScale( image_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(minFaceSize, minFaceSize) ); for( size_t i = 0; i < faces.size(); i++ ) { // DETECT EYES //-- points in pixel cv::Point leftEyePt( faces[i].x + faces[i].width*0.30, faces[i].y + faces[i].height*0.37 ); cv::Point rightEyePt( faces[i].x + faces[i].width*0.70, faces[i].y + faces[i].height*0.37 ); cv::Point eyeCenterPt( faces[i].x + faces[i].width*0.5, leftEyePt.y ); //-- normalize with webcam internal parameters GLdouble normRightEye = (rightEyePt.x - camWidth/2)/f; GLdouble normLeftEye = (leftEyePt.x - camWidth/2)/f; GLdouble normCenterX = (eyeCenterPt.x - camWidth/2)/f; GLdouble normCenterY = (eyeCenterPt.y - camHeight/2)/f; //-- get space coordinates float tempZ = eyesGap/(normRightEye-normLeftEye); float tempX = normCenterX*glCamZ; float tempY = -normCenterY*glCamZ; //-- update cam coordinates (smoothing) glCamZ = (glCamZ*0.5) + (tempZ*0.5); glCamX = (glCamX*0.5) + (tempX*0.5); glCamY = (glCamY*0.5) + (tempY*0.5); // DISPLAY if(bDisplayCam && bDisplayDetection) { //-- face rectangle cv::rectangle(image, faces[i], 1234); //-- face lines cv::Point leftPt( faces[i].x, faces[i].y + faces[i].height*0.37 ); cv::Point rightPt( faces[i].x + faces[i].width, faces[i].y + faces[i].height*0.37 ); cv::Point topPt( faces[i].x + faces[i].width*0.5, faces[i].y); cv::Point bottomPt( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height); cv::line(image, leftPt, rightPt, cv::Scalar( 0, 0, 0 ), 1, 1, 0); cv::line(image, topPt, bottomPt, cv::Scalar( 0, 0, 0 ), 1, 1, 0); //-- eyes circles cv::circle(image, rightEyePt, 0.06*faces[i].width, cv::Scalar( 255, 255, 255 ), 1, 8, 0); cv::circle(image, leftEyePt, 0.06*faces[i].width, cv::Scalar( 255, 255, 255 ), 1, 8, 0); //-- eyes line & center cv::line(image, leftEyePt, rightEyePt, cv::Scalar( 0, 0, 255 ), 1, 1, 0); cv::circle(image, eyeCenterPt, 2, cv::Scalar( 0, 0, 255 ), 3, 1, 0); } } return image; }