int main() { int nbimg = 0; // std::cout << "initialisation de Chehra..." << std::endl; // Chehra chehra; // std::cout << "done" << std::endl; VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return -1; } char key = 0; do{ std::ostringstream oss; oss << SAVEDPATH << "mire" << nbimg + 1 << ".png"; vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); Mat img; vcap >> img; imshow( "webCamflux", img ); key = (char)waitKey(30); if(key == 32){ bool visageFound = detectMire(img, nbimg); if(visageFound == false) { std::cout << "Pas de detection !" << std::endl; } else { imwrite(oss.str(), img, compression_params); std::cout << "Image " << oss.str() << " saved" << std::endl; nbimg++; visageFound = false; } } } while (key != 27); return 0; }
void main() { bool patternfound = false; bool resetAuto = false; int nbImages = 0; double moyFinale = 0; bool detectionVisage = false; int nbrLoopSinceLastDetection = 0; int criticalValueOfLoopWithoutDetection = 15; std::cout << "initialisation de Chehra..." << std::endl; Chehra chehra; std::cout << "done" << std::endl; cv::Mat cameraMatrix, distCoeffs; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point2f> pointsVisage2D; std::vector<cv::Point2f> moyPointsVisage2D; std::vector<cv::Point3f> pointsVisage3D; std::vector<cv::Point3f> visage; std::vector<double> distances; double moyDistances; std::vector<std::vector<cv::Point2f>> images; std::vector<cv::Mat> frames; double s = 10.0f; osg::Matrixd matrixS; // scale matrixS.set( s, 0, 0, 0, 0, s, 0, 0, 0, 0, s, 0, 0, 0, 0, 1); pointsVisage3D.push_back(cv::Point3f(90,0,-80)); pointsVisage3D.push_back(cv::Point3f(-90,0,-80)); pointsVisage3D.push_back(cv::Point3f(0,0,0)); pointsVisage3D.push_back(cv::Point3f(600,0,600)); pointsVisage3D.push_back(cv::Point3f(0,0,600)); pointsVisage3D.push_back(cv::Point3f(-600,0,600)); /* pointsVisage3D.push_back(cv::Point3f(13.1, -98.1,108.3)); // exterieur narine gauche pointsVisage3D.push_back(cv::Point3f(-13.1, -98.1,108.3)); // exterieur narine droite pointsVisage3D.push_back(cv::Point3f(0, -87.2, 124.2)); // bout du nez pointsVisage3D.push_back(cv::Point3f(44.4, -57.9, 83.7)); // exterieur oeil gauche pointsVisage3D.push_back(cv::Point3f(0, 55.4, 101.4)); // haut du nez, centre des yeux pointsVisage3D.push_back(cv::Point3f(-44.4, -57.9, 83.7)); // exterieur oeil droit */ cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; double f = (cameraMatrix.at<double>(0, 0) + cameraMatrix.at<double>(1, 1)) / 2; // NEAR = distance focale ; si pixels carrés, fx = fy -> np //mais est généralement différent de fy donc on prend (pour l'instant) par défaut la valeur médiane double g = 2000 * f; // je sais pas pourquoi. au pif. fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Geode> cam = createHUD(backgroundImage, vcap.get(CV_CAP_PROP_FRAME_WIDTH), vcap.get(CV_CAP_PROP_FRAME_HEIGHT), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 2), f); std::cout << "initialisation de l'objet 3D..." << std::endl; osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/brain.obj"); //osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/dumptruck.osgt"); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* osg::Sphere* unitSphere = new osg::Sphere(osg::Vec3(0, -1000, 1000), 100.0); osg::ShapeDrawable* unitSphereDrawable = new osg::ShapeDrawable(unitSphere); osg::Geode* objet3D = new osg::Geode(); objet3D->addDrawable(unitSphereDrawable); */ //osg::StateSet* sphereStateset = unitSphereDrawable->getOrCreateStateSet(); //sphereStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); //sphereStateset->setMode(GL_LIGHTING, osg::StateAttribute::OFF); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout << "done" << std::endl; osg::StateSet* obectStateset = objet3D->getOrCreateStateSet(); obectStateset->setMode(GL_LIGHTING,osg::StateAttribute::OFF); obectStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); osg::ref_ptr<osg::MatrixTransform> mat = new osg::MatrixTransform(); // construct the viewer. osgViewer::CompositeViewer compositeViewer; osgViewer::View* viewer = new osgViewer::View; osgViewer::View* viewer2 = new osgViewer::View; // add the HUD subgraph. group->addChild(cam); mat->addChild(objet3D); group->addChild(mat); osg::Matrixd projectionMatrix; projectionMatrix.makeFrustum( -cameraMatrix.at<double>(0, 2), vcap.get(CV_CAP_PROP_FRAME_WIDTH) - cameraMatrix.at<double>(0, 2), -cameraMatrix.at<double>(1, 2), vcap.get(CV_CAP_PROP_FRAME_HEIGHT) - cameraMatrix.at<double>(1, 2), f, g); osg::Vec3d eye(0.0f, 0.0f, 0.0f), target(0.0f, g, 0.0f), normal(0.0f, 0.0f, 1.0f); // set the scene to render viewer->setSceneData(group.get()); viewer->setUpViewInWindow(0, 0, 1920 / 2, 1080 / 2); viewer->getCamera()->setProjectionMatrix(projectionMatrix); viewer->getCamera()->setViewMatrixAsLookAt(eye, target, normal); viewer2->setSceneData(group.get()); viewer2->setUpViewInWindow(1920 / 2, 0, 1920 / 2, 1080 / 2); viewer2->getCamera()->setProjectionMatrix(projectionMatrix); osg::Vec3d eye2(4 * f, 3 * f / 2, 0.0f), target2(0.0f, f, 0.0f), normal2(0.0f, 0.0f, 1.0f); viewer2->getCamera()->setViewMatrixAsLookAt(eye2, target2, normal2); compositeViewer.addView(viewer2); compositeViewer.addView(viewer); compositeViewer.realize(); // set up windows and associated threads. do { patternfound = false; resetAuto = false; detectionVisage = false; moyPointsVisage2D.clear(); pointsVisage2D.clear(); visage.clear(); moyDistances = 0; distances.clear(); std::cout << "recherche de pattern" << std::endl << "nbr images sauvegardees : " << images.size() << std::endl; vcap >> *frame; frames.push_back(*frame); detectionVisage = detecterVisage(frame, &chehra, &pointsVisage2D, &visage); if(detectionVisage) { images.push_back(pointsVisage2D); nbrLoopSinceLastDetection = 0; group->addChild(mat); } else nbrLoopSinceLastDetection++; if((images.size() > NBRSAVEDIMAGES || nbrLoopSinceLastDetection > criticalValueOfLoopWithoutDetection) && !images.empty()) images.erase(images.begin()); if(images.empty()) group->removeChild(mat); else { //cv::cornerSubPix(*frame, pointsVisage2D, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); for(int i = 0; i < NBRFACEPOINTSDETECTED; i++) { cv::Point2f coordonee(0.0f, 0.0f); for(int j = 0; j < images.size(); j++) { coordonee.x += images[j][i].x; coordonee.y += images[j][i].y; } coordonee.x /= images.size(); coordonee.y /= images.size(); moyPointsVisage2D.push_back(coordonee); } cv::solvePnP(pointsVisage3D, moyPointsVisage2D, cameraMatrix, distCoeffs, rvecs, tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); imagePoints = dessinerPoints(frame, pointsVisage3D, rotVec, tvecs, cameraMatrix, distCoeffs); double t3 = tvecs.at<double>(2, 0); double t1 = tvecs.at<double>(0, 0); double t2 = tvecs.at<double>(1, 0) + t3 / 27.5; // and now, magic ! double r11 = rotVec.at<double>(0, 0); double r12 = rotVec.at<double>(0, 1); double r13 = rotVec.at<double>(0, 2); double r21 = rotVec.at<double>(1, 0); double r22 = rotVec.at<double>(1, 1); double r23 = rotVec.at<double>(1, 2); double r31 = rotVec.at<double>(2, 0); double r32 = rotVec.at<double>(2, 1); double r33 = rotVec.at<double>(2, 2); osg::Matrixd matrixR; // rotation (transposee de rotVec) matrixR.set( r11, r21, r31, 0, r12, r22, r32, 0, r13, r23, r33, 0, 0, 0, 0, 1); osg::Matrixd matrixT; // translation matrixT.makeTranslate(t1, t2, t3); osg::Matrixd matrix90; // rotation de repere entre opencv et osg matrix90.makeRotate(osg::Quat(osg::DegreesToRadians(-90.0f), osg::Vec3d(1.0, 0.0, 0.0))); mat->setMatrix(matrixS * matrixR * matrixT * matrix90); // Calcul d'erreur de reprojection double moy = 0; for(int i = 0; i < pointsVisage2D.size(); i++) { double d = sqrt(pow(pointsVisage2D[i].y - imagePoints[i].y, 2) + pow(pointsVisage2D[i].x - imagePoints[i].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / pointsVisage2D.size(); if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset resetAuto = true; } backgroundImage->dirty(); compositeViewer.frame(); }while(!compositeViewer.done()); }
void main() { bool patternfound = false; bool reset = false; bool resetAuto = false; int nbImages = 0; double moyFinale = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<cv::Point3f> chessCorners3D; std::vector<double> distances; double moyDistances; // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); // Creation des points a projeter cubeObjectPoints.push_back(cv::Point3f(52, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 104)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 104)); // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("dumptruck.osgt"); osg::ref_ptr<osg::Camera> cam = createHUD(backgroundImage); osg::ref_ptr<osg::PositionAttitudeTransform> pat = new osg::PositionAttitudeTransform; osg::ref_ptr<osg::PositionAttitudeTransform> pat2 = new osg::PositionAttitudeTransform; // construct the viewer. osgViewer::Viewer viewer; // add the HUD subgraph. group->addChild(cam); pat->addChild(pat2); pat2->addChild(objet3D); group->addChild(pat); // set the scene to render viewer.setSceneData(group.get()); viewer.realize(); // set up windows and associated threads. char key = 0; bool detectionMire = false; cv::Mat Rc, C = cv::Mat(3, 1, CV_64F), rotVecInv; pat->setScale(osg::Vec3d(0.08, 0.08, 0.08)); pat->setAttitude(osg::Quat(osg::DegreesToRadians(180.0), osg::Vec3d(1.0, 0.0, 0.0))); pat2->setPosition(osg::Vec3d(15.0, 4.0, 5.0)); /* // projection double fx = cameraMatrix.at<double>(0, 0); double fy = cameraMatrix.at<double>(1, 1); double cx = cameraMatrix.at<double>(1, 2); double cy = cameraMatrix.at<double>(1, 0); double W = (double)frame->cols; double H = (double)frame->rows; double near = .1; double far = 100.0; osg::Matrixd projectionMatrix; projectionMatrix.set( 2 * fx / W, 0, 0, 0, 0, 2 * fy / H, 0, 0, 2 * (cx / W) - 1, 2 * (cy - H) - 1, (far + near) / (far - near), 1, 0, 0, 2 * far * near / (near - far), 0); projectionMatrix.set( 2 * fx / W, 0, 0, 0, 0, 2 * fy / H, 0, 0, 2 * (cx / W) - 1, 2 * (cy / H) - 1, (far + near) / (far - near), 1, 0, 0, 2 * far * near / (near - far), 0); viewer.getCamera()->setProjectionMatrix(projectionMatrix);*/ do { patternfound = false; resetAuto = false; detectionMire = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); moyDistances = 0; distances.clear(); imCalibNext.release(); group->removeChild(pat); std::cout << "recherche de mire" << std::endl; do { vcap >> *frame; backgroundImage->dirty(); detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext); viewer.frame(); }while(!detectionMire && !viewer.done()); if(viewer.done()) break; std::cout << "mire detectee" << std::endl << std::endl; group->addChild(pat); do { vcap >> *frame; cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); cv::transpose(rotVec, Rc); cv::invert(rotVec, rotVecInv); for(int i = 0; i < 3; i++) C.at<double>(i, 0) = -1 * ( rotVecInv.at<double>(i, 0) * tvecs.at<double>(0, 0) + rotVecInv.at<double>(i, 1) * tvecs.at<double>(1, 0) + rotVecInv.at<double>(i, 2) * tvecs.at<double>(2, 0)); osg::Matrixd viewMatrixR, viewMatrixT, viewMatrix90; viewMatrixT.makeTranslate( -C.at<double>(0, 0) / 100, C.at<double>(1, 0) / 100, C.at<double>(2, 0) / 100); double r11 = rotVec.at<double>(0, 0); double r21 = rotVec.at<double>(1, 0); double r31 = rotVec.at<double>(2, 0); double r32 = rotVec.at<double>(2, 1); double r33 = rotVec.at<double>(2, 2); viewMatrixR.makeRotate( atan2(r32, r33), osg::Vec3d(1.0, 0.0, 0.0), -atan2(-r31, sqrt((r32 * r32) + (r33 * r33))), osg::Vec3d(0.0, 1.0, 0.0), -atan2(r21, r11), osg::Vec3d(0.0, 0.0, 1.0)); viewMatrix90.makeRotate(osg::DegreesToRadians(-90.0), osg::Vec3d(1.0, 0.0, 0.0)); viewer.getCamera()->setViewMatrix(viewMatrixT * viewMatrixR); // Calcul d'erreur de reprojection double moy = 0; for(int j = 0; j < COLCHESSBOARD * ROWCHESSBOARD; j++) { double d = sqrt(pow(chessCornersInit[0][j].y - imagePoints[j].y, 2) + pow(chessCornersInit[0][j].x - imagePoints[j].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / (COLCHESSBOARD * ROWCHESSBOARD); if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset resetAuto = true; key = cv::waitKey(33); backgroundImage->dirty(); viewer.frame(); }while(!viewer.done() && !resetAuto && key != 32); }while(!viewer.done()); }
void main() { bool patternfound = false; bool reset = false; bool resetAuto = false; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<cv::Point3f> chessCorners3D; std::vector<double> distances; double moyDistances; // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Node> objet3D; objet3D = osgDB::readNodeFile("dumptruck.osgt"); osg::ref_ptr<osg::Camera> cam = createHUD(backgroundImage); osgViewer::Viewer viewer; group->addChild(cam); group->addChild(objet3D); // set the scene to render viewer.setSceneData(group.get()); // projection viewer.getCamera()->setProjectionMatrixAsPerspective( 40., 1., 1., 100. ); // Create a matrix to specify a distance from the viewpoint. osg::Matrix trans; trans.makeTranslate( 7, 0., -50. ); // Rotation angle (in radians) double angle( 0. ); char key = 0; bool detectionMire = false; do { patternfound = false; resetAuto = false; detectionMire = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); moyDistances = 0; distances.clear(); imCalibNext.release(); group->removeChild(objet3D); std::cout << "recherche de mire" << std::endl; do { vcap >> *frame; backgroundImage->dirty(); detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext); viewer.frame(); }while(!detectionMire && !viewer.done()); if(viewer.done()) break; std::cout << "mire detectee" << std::endl << std::endl; group->addChild(objet3D); do { vcap >> *frame; cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); // Create the rotation matrix. osg::Matrix rot; rot.makeRotate( angle, osg::Vec3( 1., 0., 0. ) ); angle += 0.01; // Set the view matrix (the concatenation of the rotation and // translation matrices). viewer.getCamera()->setViewMatrix( rot * trans ); double moy = 0; for(int j = 0; j < COLCHESSBOARD * ROWCHESSBOARD; j++) { double d = sqrt(pow(chessCornersInit[0][j].y - imagePoints[j].y, 2) + pow(chessCornersInit[0][j].x - imagePoints[j].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / (COLCHESSBOARD * ROWCHESSBOARD); if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset resetAuto = true; key = cv::waitKey(33); // Draw the next frame. backgroundImage->dirty(); viewer.frame(); }while(!viewer.done() && !resetAuto && key != 32); } while(!viewer.done()); }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; int i = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31,31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point2f> chessCornersInit[2]; std::vector<cv::Point3f> chessCorners3D; // Creation des points a projeter objectPoints.push_back(cv::Point3f(50,25,0)); objectPoints.push_back(cv::Point3f(150,25,0)); objectPoints.push_back(cv::Point3f(150,125,0)); objectPoints.push_back(cv::Point3f(50,125,0)); objectPoints.push_back(cv::Point3f(50,25,100)); objectPoints.push_back(cv::Point3f(150,25,100)); objectPoints.push_back(cv::Point3f(150,125,100)); objectPoints.push_back(cv::Point3f(50,125,100)); // Creation des coins de la mire for(int x=0 ; x<COLCHESSBOARD ; x++) for(int y=0 ; y<ROWCHESSBOARD ; y++) chessCorners3D.push_back(cv::Point3f(x*26.0f,y*26.0f,0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap("../rsc/capture.avi"); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return -1; } do{ vcap >> imCalibColor; cv::imshow("Projection", imCalibColor); cv::cvtColor(imCalibColor, imCalib, CV_BGR2GRAY); cv::waitKey(); timer = clock(); startImage = clock(); patternfound = cv::findChessboardCorners(imCalib, cv::Size(ROWCHESSBOARD, COLCHESSBOARD), chessCornersInit[0], cv::CALIB_CB_FAST_CHECK); std::cout << "findChessboardCorners\t" << float(clock()-timer)/CLOCKS_PER_SEC << " sec" << std::endl; timer = clock(); } while(!patternfound); for(;;) { vcap >> imCalibColor; if(!imCalibNext.empty()) { cv::swap(imCalib, imCalibNext); // copie de l'ancienne image pour le flot optique for(size_t c = 0; c < chessCornersInit[0].size(); c++) chessCornersInit[0][c] = chessCornersInit[1][c]; chessCornersInit[1].clear(); } else cv::cornerSubPix(imCalib, chessCornersInit[0], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); cv::cvtColor(imCalibColor, imCalibNext, CV_BGR2GRAY); std::vector<uchar> status; std::vector<float> err; cv::calcOpticalFlowPyrLK(imCalib, imCalibNext, chessCornersInit[0], chessCornersInit[1], status, err, winSize, 3, termcrit, 0, 0.0001); cv::solvePnP(chessCorners3D, chessCornersInit[0], cameraMatrix, distCoeffs, rvecs, tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); //Projection cv::projectPoints(objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs, imagePoints); // Dessin des points projetes cv::line(imCalibColor, imagePoints[0], imagePoints[4], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[5], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[6], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[7], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[0], imagePoints[1], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[2], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[3], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[0], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[4], imagePoints[5], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[5], imagePoints[6], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[6], imagePoints[7], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[7], imagePoints[4], cv::Scalar(0,255,255), 2, 8); cv::imshow("Projection", imCalibColor); cv::waitKey(67); } return 0; }
static QFont fontFromObject(QQmlV4Handle object, QV4::ExecutionEngine *v4, bool *ok) { if (ok) *ok = false; QFont retn; QV4::Scope scope(v4); QV4::ScopedObject obj(scope, object); if (!obj) { if (ok) *ok = false; return retn; } QV4::ScopedString s(scope); QV4::ScopedValue vbold(scope, obj->get((s = v4->newString(QStringLiteral("bold"))))); QV4::ScopedValue vcap(scope, obj->get((s = v4->newString(QStringLiteral("capitalization"))))); QV4::ScopedValue vfam(scope, obj->get((s = v4->newString(QStringLiteral("family"))))); QV4::ScopedValue vstyle(scope, obj->get((s = v4->newString(QStringLiteral("styleName"))))); QV4::ScopedValue vital(scope, obj->get((s = v4->newString(QStringLiteral("italic"))))); QV4::ScopedValue vlspac(scope, obj->get((s = v4->newString(QStringLiteral("letterSpacing"))))); QV4::ScopedValue vpixsz(scope, obj->get((s = v4->newString(QStringLiteral("pixelSize"))))); QV4::ScopedValue vpntsz(scope, obj->get((s = v4->newString(QStringLiteral("pointSize"))))); QV4::ScopedValue vstrk(scope, obj->get((s = v4->newString(QStringLiteral("strikeout"))))); QV4::ScopedValue vundl(scope, obj->get((s = v4->newString(QStringLiteral("underline"))))); QV4::ScopedValue vweight(scope, obj->get((s = v4->newString(QStringLiteral("weight"))))); QV4::ScopedValue vwspac(scope, obj->get((s = v4->newString(QStringLiteral("wordSpacing"))))); QV4::ScopedValue vhint(scope, obj->get((s = v4->newString(QStringLiteral("hintingPreference"))))); QV4::ScopedValue vkerning(scope, obj->get((s = v4->newString(QStringLiteral("kerning"))))); QV4::ScopedValue vshaping(scope, obj->get((s = v4->newString(QStringLiteral("preferShaping"))))); // pull out the values, set ok to true if at least one valid field is given. if (vbold->isBoolean()) { retn.setBold(vbold->booleanValue()); if (ok) *ok = true; } if (vcap->isInt32()) { retn.setCapitalization(static_cast<QFont::Capitalization>(vcap->integerValue())); if (ok) *ok = true; } if (vfam->isString()) { retn.setFamily(vfam->toQString()); if (ok) *ok = true; } if (vstyle->isString()) { retn.setStyleName(vstyle->toQString()); if (ok) *ok = true; } if (vital->isBoolean()) { retn.setItalic(vital->booleanValue()); if (ok) *ok = true; } if (vlspac->isNumber()) { retn.setLetterSpacing(QFont::AbsoluteSpacing, vlspac->asDouble()); if (ok) *ok = true; } if (vpixsz->isInt32()) { retn.setPixelSize(vpixsz->integerValue()); if (ok) *ok = true; } if (vpntsz->isNumber()) { retn.setPointSize(vpntsz->asDouble()); if (ok) *ok = true; } if (vstrk->isBoolean()) { retn.setStrikeOut(vstrk->booleanValue()); if (ok) *ok = true; } if (vundl->isBoolean()) { retn.setUnderline(vundl->booleanValue()); if (ok) *ok = true; } if (vweight->isInt32()) { retn.setWeight(static_cast<QFont::Weight>(vweight->integerValue())); if (ok) *ok = true; } if (vwspac->isNumber()) { retn.setWordSpacing(vwspac->asDouble()); if (ok) *ok = true; } if (vhint->isInt32()) { retn.setHintingPreference(static_cast<QFont::HintingPreference>(vhint->integerValue())); if (ok) *ok = true; } if (vkerning->isBoolean()) { retn.setKerning(vkerning->booleanValue()); if (ok) *ok = true; } if (vshaping->isBoolean()) { bool enable = vshaping->booleanValue(); if (enable) retn.setStyleStrategy(static_cast<QFont::StyleStrategy>(retn.styleStrategy() & ~QFont::PreferNoShaping)); else retn.setStyleStrategy(static_cast<QFont::StyleStrategy>(retn.styleStrategy() | QFont::PreferNoShaping)); } return retn; }
void main() { bool patternfound = false; bool reset = false; bool resetAuto = false; int nbImages = 0; double moyFinale = 0; char key = 0; bool detectionMire = false; bool detectionVisage = false; int cpt = 0, moyCpt = 0, i = 0; std::cout << "initialisation de Chehra..." << std::endl; Chehra chehra; std::cout << "done" << std::endl; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; cv::Mat Rc, C = cv::Mat(3, 1, CV_64F), rotVecInv; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<cv::Point3f> dessinPointsVisage; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<std::vector<cv::Point2f>> pointsVisageInit(2); std::vector<cv::Point3f> chessCorners3D; std::vector<cv::Point3f> pointsVisage3D; std::vector<cv::Point3f> visage; std::vector<double> distances; double moyDistances; // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f)); // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; double f = (cameraMatrix.at<double>(0, 0) + cameraMatrix.at<double>(1, 1)) / 2; // NEAR = distance focale ; si pixels carrés, fx = fy -> np //mais est généralement différent de fy donc on prend (pour l'instant) par défaut la valeur médiane double g = 2000 * f; // je sais pas pourquoi. au pif. fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Geode> cam = createHUD(backgroundImage, vcap.get(CV_CAP_PROP_FRAME_WIDTH), vcap.get(CV_CAP_PROP_FRAME_HEIGHT), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 2), f); std::cout << "initialisation de l'objet 3D..." << std::endl; osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/Creature.obj"); std::cout << "done" << std::endl; osg::StateSet* obectStateset = objet3D->getOrCreateStateSet(); obectStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); osg::ref_ptr<osg::MatrixTransform> mat = new osg::MatrixTransform(); osg::ref_ptr<osg::PositionAttitudeTransform> pat = new osg::PositionAttitudeTransform(); // construct the viewer. osgViewer::CompositeViewer compositeViewer; osgViewer::View* viewer = new osgViewer::View; osgViewer::View* viewer2 = new osgViewer::View; // add the HUD subgraph. group->addChild(cam); mat->addChild(objet3D); pat->addChild(mat); group->addChild(pat); pat->setScale(osg::Vec3d(3, 3, 3)); osg::Matrixd projectionMatrix; projectionMatrix.makeFrustum( -cameraMatrix.at<double>(0, 2), vcap.get(CV_CAP_PROP_FRAME_WIDTH) - cameraMatrix.at<double>(0, 2), -cameraMatrix.at<double>(1, 2), vcap.get(CV_CAP_PROP_FRAME_HEIGHT) - cameraMatrix.at<double>(1, 2), f, g); osg::Vec3d eye(0.0f, 0.0f, 0.0f), target(0.0f, g, 0.0f), normal(0.0f, 0.0f, 1.0f); // set the scene to render viewer->setSceneData(group.get()); viewer->setUpViewInWindow(0, 0, 1920 / 2, 1080 / 2); viewer->getCamera()->setProjectionMatrix(projectionMatrix); viewer->getCamera()->setViewMatrixAsLookAt(eye, target, normal); viewer2->setSceneData(group.get()); viewer2->setUpViewInWindow(1920 / 2, 0, 1920 / 2, 1080 / 2); viewer2->getCamera()->setProjectionMatrix(projectionMatrix); osg::Vec3d eye2(4 * f, 3 * f / 2, 0.0f), target2(0.0f, f, 0.0f), normal2(0.0f, 0.0f, 1.0f); viewer2->getCamera()->setViewMatrixAsLookAt(eye2, target2, normal2); compositeViewer.addView(viewer); compositeViewer.addView(viewer2); compositeViewer.realize(); // set up windows and associated threads. do { group->removeChild(pat); patternfound = false; resetAuto = false; detectionMire = false; detectionVisage = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); pointsVisageInit[0].clear(); pointsVisageInit[1].clear(); pointsVisage3D.clear(); dessinPointsVisage.clear(); visage.clear(); moyDistances = 0; distances.clear(); imCalibNext.release(); std::cout << "recherche de pattern" << std::endl; time_t start = clock(); double timer = 0; do { start = clock(); vcap >> *frame; backgroundImage->dirty(); //detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext); detectionVisage = detecterVisage(frame, &chehra, &pointsVisageInit[1], &visage, &pointsVisage3D, &imCalibNext); cpt++; double duree = (clock() - start)/(double) CLOCKS_PER_SEC; timer += duree; if(timer >= 1){ std::cout << cpt << " fps" << std::endl; moyCpt += cpt; timer = 0; duree = 0; i++; cpt = 0; start = clock(); } compositeViewer.frame(); }while(!detectionMire && !detectionVisage && !compositeViewer.done()); if(compositeViewer.done()) break; std::cout << "pattern detectee" << std::endl << std::endl; group->addChild(pat); do { start = clock(); vcap >> *frame; cv::Mat rotVec = trackingMire(frame, &imCalibNext, &pointsVisageInit, &pointsVisage3D, &cameraMatrix, &distCoeffs, &tvecs); //cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); //imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); imagePoints = dessinerPoints(frame, pointsVisage3D, rotVec, tvecs, cameraMatrix, distCoeffs); double r11 = rotVec.at<double>(0, 0); double r21 = rotVec.at<double>(1, 0); double r31 = rotVec.at<double>(2, 0); double r32 = rotVec.at<double>(2, 1); double r33 = rotVec.at<double>(2, 2); osg::Matrixd matrixR; matrixR.makeRotate( atan2(r32, r33), osg::Vec3d(1.0, 0.0, 0.0), -atan2(-r31, sqrt((r32 * r32) + (r33 * r33))), osg::Vec3d(0.0, 0.0, 1.0), atan2(r21, r11), osg::Vec3d(0.0, 1.0, 0.0)); mat->setMatrix(matrixR); pat->setPosition(osg::Vec3d(tvecs.at<double>(0, 0), tvecs.at<double>(2, 0), -tvecs.at<double>(1, 0))); //std::cout << "x = " << tvecs.at<double>(0, 0) << " - y = " << tvecs.at<double>(1, 0) << " - z = " << tvecs.at<double>(2, 0) << std::endl; // Calcul d'erreur de reprojection double moy = 0; for(int j = 0; j < pointsVisageInit[1].size() ; j++) { double d = sqrt(pow(pointsVisageInit[0][j].y - imagePoints[j].y, 2) + pow(pointsVisageInit[0][j].x - imagePoints[j].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / pointsVisageInit[1].size(); if(moyDistances > 1) // si l'ecart de reproj est trop grand, reset resetAuto = true; double duree = (clock() - start)/(double) CLOCKS_PER_SEC; std::cout << (int)(1/duree) << " fps" << std::endl; moyCpt += (int)(1/duree); duree = 0; i++; backgroundImage->dirty(); compositeViewer.frame(); }while(!compositeViewer.done() && !resetAuto); }while(!compositeViewer.done()); std::cout << std::endl << "Moyenne des fps : " << moyCpt/i << std::endl; std::system("PAUSE"); }
int main() { cv::Mat imCalibColor; cv::Mat imCalibGray; cv::vector<cv::vector<cv::Point> > contours; cv::vector<cv::Vec4i> hierarchy; cv::vector<cv::Point2f> pointQR; cv::Mat imCalibNext; cv::Mat imQR; cv::vector<cv::Mat> tabQR; /*cv::vector<cv::Point2f> corners1; cv::vector<cv::Point2f> corners2; cv::vector<cv::Point2f> corners3; cv::vector<cv::Point2f> corners4; cv::vector<cv::Point2f> corners5;*/ double qualityLevel = 0.01; double minDistance = 10; int blockSize = 3; bool useHarrisDetector = false; double k = 0.04; int maxCorners = 600; int A = 0, B= 0, C= 0; char key; int mark; bool patternFound = false; cv::VideoCapture vcap("../rsc/capture2.avi"); for (int i = 1; i < 5; i++) { std::ostringstream oss; oss << "../rsc/QrCodes/QR" << i << ".jpg"; imQR = cv::imread(oss.str()); cv::cvtColor(imQR, imQR, CV_BGR2GRAY); std::cout<< "Bouh!!!!!!" << std::endl; tabQR.push_back(imQR); } do { while(imCalibColor.empty()) { vcap >> imCalibColor; } vcap >> imCalibColor; cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1)); cv::cvtColor(imCalibColor, imCalibGray, CV_BGR2GRAY); Canny(imCalibGray, edges, 100 , 200, 3); cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE); cv::imshow("pointInteret", imCalibColor); mark = 0; cv::vector<cv::Moments> mu(contours.size()); cv::vector<cv::Point2f> mc(contours.size()); for( int i = 0; i < contours.size(); i++ ) { mu[i] = moments( contours[i], false ); mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); } for( int i = 0; i < contours.size(); i++ ) { int k=i; int c=0; while(hierarchy[k][2] != -1) { k = hierarchy[k][2] ; c = c+1; } if(hierarchy[k][2] != -1) c = c+1; if (c >= 5) { if (mark == 0) A = i; else if (mark == 1) B = i; // i.e., A is already found, assign current contour to B else if (mark == 2) C = i; // i.e., A and B are already found, assign current contour to C mark = mark + 1 ; } } if (A !=0 && B !=0 && C!=0) { cv::Mat imagecropped = imCalibColor; cv::Rect ROI(280/*pointQR[0].x*/, 260/*pointQR[0].y*/, 253, 218); cv::Mat croppedRef(imagecropped, ROI); cv::cvtColor(croppedRef, imagecropped, CV_BGR2GRAY); cv::threshold(imagecropped, imagecropped, 180, 255, CV_THRESH_BINARY); pointQR.push_back(mc[A]); cv::circle(imCalibColor, cv::Point(pointQR[0].x, pointQR[0].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); pointQR.push_back(mc[B]); cv::circle(imCalibColor, cv::Point(pointQR[1].x, pointQR[1].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); pointQR.push_back(mc[C]); cv::circle(imCalibColor, cv::Point(pointQR[2].x, pointQR[2].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); cv::Point2f D(0.0f,0.0f); cv::Point2f E(0.0f,0.0f); cv::Point2f F(0.0f,0.0f); D.x = (mc[A].x + mc[B].x)/2; E.x = (mc[B].x + mc[C].x)/2; F.x = (mc[C].x + mc[A].x)/2; D.y = (mc[A].y + mc[B].y)/2; E.y = (mc[B].y + mc[C].y)/2; F.y = (mc[C].y + mc[A].y)/2; pointQR.push_back(D); cv::circle(imCalibColor, cv::Point(pointQR[3].x, pointQR[3].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); pointQR.push_back(E); cv::circle(imCalibColor, cv::Point(pointQR[4].x, pointQR[4].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); pointQR.push_back(F); cv::circle(imCalibColor, cv::Point(pointQR[5].x, pointQR[5].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0); patternFound = true; std::cout << "patternfound" << std::endl; cv::SiftFeatureDetector detector; cv::vector<cv::KeyPoint> keypoints1, keypoints2; detector.detect(tabQR[3], keypoints1); detector.detect(imagecropped, keypoints2); cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("SIFT"); cv::Mat descriptors1, descriptors2; descriptor->compute(tabQR[3], keypoints1, descriptors1 ); descriptor->compute(imagecropped, keypoints2, descriptors2 ); cv::FlannBasedMatcher matcher; std::vector< cv::DMatch > matches; matcher.match( descriptors1, descriptors2, matches ); double max_dist = 0; double min_dist = 100; for( int i = 0; i < descriptors1.rows; i++ ) { double dist = matches[i].distance; if( dist < min_dist ) min_dist = dist; if( dist > max_dist ) max_dist = dist; } std::vector< cv::DMatch > good_matches; for( int i = 0; i < descriptors1.rows; i++ ) if( matches[i].distance <= 2*min_dist ) good_matches.push_back( matches[i]); cv::Mat imgout; drawMatches(tabQR[3], keypoints1, imagecropped, keypoints2, good_matches, imgout); std::vector<cv::Point2f> pt_img1; std::vector<cv::Point2f> pt_img2; for( int i = 0; i < (int)good_matches.size(); i++ ) { pt_img1.push_back(keypoints1[ good_matches[i].queryIdx ].pt ); pt_img2.push_back(keypoints2[ good_matches[i].trainIdx ].pt ); } cv::Mat H = findHomography( pt_img1, pt_img2, CV_RANSAC ); cv::Mat result; warpPerspective(tabQR[3],result,H,cv::Size(tabQR[3].cols+imagecropped.cols,tabQR[3].rows)); cv::Mat half(result,cv::Rect(0,0,imagecropped.cols,imagecropped.rows)); imagecropped.copyTo(half); imshow( "Result", result ); break; } key = (char)cv::waitKey(67); }while(patternFound != true && key != 27); if(patternFound) imCalibNext = imCalibColor; return patternFound; }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; bool reset = false; int i = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<cv::Point3f> chessCorners3D; // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); // Creation des points a projeter cubeObjectPoints.push_back(cv::Point3f(52, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 104)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 104)); // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return -1; } char key = 0; do { std::cout << "recherche de mire" << std::endl; bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext); std::cout << "mire detectee" << std::endl << std::endl; if(!detectionMire) break; do { vcap >> imCalibColor; cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); dessinerCube(&imCalibColor, cubeObjectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); dessinerPoints(&imCalibColor, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); cv::imshow("Projection", imCalibColor); key = (char)cv::waitKey(30); }while(key != 27 && key != 32); if(key == 32) { patternfound = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); imCalibNext.release(); } }while(key != 27); return 0; }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; bool reset = false; bool endVideo = false; bool resetAuto = false; int i = 0; int nbImages = 0; double moyFinale = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> QRpointObject3D; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<std::vector<cv::Point2f>> QRpointinit(2); std::vector<cv::Point3f> QRpoint3D; std::vector<cv::Point3f> tabuseless; std::vector<cv::Point3f> chessCorners3D; std::vector<double> distances; std::vector<double> moyDistances; cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); std::ofstream file; file.open ("../rsc/error.txt"); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return -1; } char key = 0; do { /*std::cout << "recherche de mire" << std::endl; bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext); std::cout << "mire detectee" << std::endl << std::endl;*/ bool detectionQR = detecterQR(vcap , &QRpointinit[1], &QRpoint3D, &tabuseless, &QRpointObject3D, &imCalibNext); if(!detectionQR) break; do { vcap >> imCalibColor; if(imCalibColor.empty()){ endVideo = true; break; } cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &QRpointinit, &QRpoint3D, &cameraMatrix, &distCoeffs, &tvecs); dessinerPyra(&imCalibColor, QRpointObject3D, rotVec, tvecs, cameraMatrix, distCoeffs); imagePoints = dessinerPoints(&imCalibColor, tabuseless, rotVec, tvecs, cameraMatrix, distCoeffs); //Calcul d'erreur de reprojection double moy = 0; for(int j = 0; j < QRpointinit[1].size(); j++) { double d = sqrt(pow(QRpointinit[0][j].y - tabuseless[j].y, 2) + pow(QRpointinit[0][j].x - tabuseless[j].x, 2)); distances.push_back(d); moy += d; /*std::cout << "distance point numero " << j << " : " << std::endl << " subpix : x = " << chessCornersInit[0][j].x << " y = " << chessCornersInit[0][j].y << std::endl << " projec : x = " << imagePoints[j].x << " y = " << imagePoints[j].y << std::endl << " distance : " << d << std::endl << std::endl;*/ } moyDistances.push_back(moy / QRpointinit[1].size()); ////std::cout << std::endl << std::endl << "moyenne ecart points image " << i << " : " << moyDistances[i] << std::endl << std::endl; //file << "moyenne ecart points image " << i << " : " << moyDistances[i] << " px" << std::endl; if(moyDistances[i] > 10){ // si l'ecart de reproj est trop grand, reset resetAuto = true; std::cout << "RESET" << std::endl; break; } //moyFinale += moyDistances[i]; i++; nbImages++; cv::imshow("Projection", imCalibColor); key = (char)cv::waitKey(67); }while(key != 27 && key != 32 && resetAuto != true); if(key == 32 || resetAuto == true) { patternfound = false; resetAuto = false; i = 0; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); QRpointinit[0].clear(); QRpointinit[1].clear(); QRpoint3D.clear(); QRpointObject3D.clear(); tabuseless.clear(); moyDistances.clear(); distances.clear(); imCalibNext.release(); } }while(key != 27 && endVideo != true); return 0; }