int Counter::startCount(std::string file, char frontRear/* = 'F'*/) { cv::VideoCapture cap(file.c_str()); if (!cap.isOpened()) { std::cout << "Could not open file" << std::endl; return 1; } fps = 1000/cap.get(CV_CAP_PROP_FPS); //int frate = 1000/fps; int frate = 20; int dumy = 13700; // @debug 13700 15840 18246 18890 21900 // Location recognition DigitRecognizer dr(1,10,5,7, "./origImages"); dr.learnFromImages(); dr.setClassifier(); // set parameters if ('F'==frontRear) { setFrontDoor(); } else { setRearDoor(); } std::vector<cv::Point2f> tripWire; // points on the tripwire std::list<std::vector<cv::Point2f> > trajectories; // a list of trajectories being tracked std::vector<std::list<int> > on_models; // each model is a list of start times std::vector<std::list<int> > off_models; float mean_x=0.0f, mean_y=0.0f, var_x=0.0f, var_y=0.0f, length=0.0f; // trajectory stats cv::Mat capframe, frame, image, gray, prevGray, location; cv::Mat doorHistBG, door, doorHist; cv::Size winSize(31,31); // window size for optical flow computation cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03); int onPassengers = 0; int offPassengers = 0; int missPassengers = 0; int histSize = 2; // size of background histogram float range[] = { 0, 256 }; // range of pixel values for histogram calculation const float* histRange = { range }; // std::string prevGPS, currGPS, speed; generateTrackingPoints(tripWire); while (true) { int fno = cap.get(CV_CAP_PROP_POS_FRAMES); if (fno>=dumy) { std::cout << ""; } cap >> capframe; if (capframe.empty()) break; frame = capframe(cv::Rect(0,0,580,450)); //cv::warpPerspective(frame, frame, M, frame.size() ); frame.copyTo(image); cv::cvtColor(image, gray, CV_BGR2GRAY); //gammaCorrection(gray); // note: it becomes worse with Gamma (anti-) correction if (prevGray.empty()) { gray.copyTo(prevGray); } // check gps location location = capframe(cv::Rect(810, 90, 90, 30)); currGPS = dr.analyseLocationImage(location, false); /*int gpsDistance = 0; if (!prevGPS.empty()) { std::inner_product(prevGPS.begin(), prevGPS.end(), currGPS.begin(), gpsDistance, std::plus<int>(), std::not_equal_to<char>()); } // add points to trajectories /// NEED TO KNOW THAT GPS DOESN'T CHANGE FOR SEVERAL FRAMES if(trajectories.size()<tripWire.size()-10 && gpsDistance<3) { //160 0.8 addPoints(trajectories, tripWire, fno); }*/ // check if door is closed door = gray(rectDoor); if (fno<5) { cv::Mat tmpDoorHistBG; //cv::calcHist(&door, 1, 0, cv::Mat(), tmpDoorHistBG, 1, &histSize, &histRange, true, false); tmpDoorHistBG = Utility::HogComp(door); cv::normalize(tmpDoorHistBG, tmpDoorHistBG, 1, 0, cv::NORM_L2, -1, cv::Mat()); if (doorHistBG.empty()) { doorHistBG = tmpDoorHistBG; } else { cv::addWeighted(doorHistBG, 0.7, tmpDoorHistBG, 0.3, 0, doorHistBG, -1); } } //cv::calcHist(&door, 1, 0, cv::Mat(), doorHist, 1, &histSize, &histRange, true, false); doorHist = Utility::HogComp(door); cv::normalize(doorHist, doorHist, 1, 0, cv::NORM_L2, -1, cv::Mat()); //float similarityDoor = doorHistBG.dot(doorHist); float similarityDoor = cv::compareHist(doorHistBG, doorHist, CV_COMP_CORREL); bool bDoorOpen = similarityDoor<0.9; // add points to trajectories if(trajectories.size()<tripWire.size()-10 && bDoorOpen) { //160 0.8 addPoints(trajectories, tripWire, fno); } std::vector<uchar> status; std::vector<float> err; std::vector<cv::Point2f> nextPoints; std::vector<cv::Point2f> prevPoints = lastPoints(trajectories); if (prevPoints.empty()==false) { cv::calcOpticalFlowPyrLK(prevGray, gray, prevPoints, nextPoints, status, err, winSize, 3, termcrit, 0, 0.001); } int i=0; std::list<std::vector<cv::Point2f> >::iterator iTrack = trajectories.begin(); for (; iTrack!=trajectories.end(); i++) { int szTrack = iTrack->size(); isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length); if ((szTrack>3) && (var_x<1.0f) && (var_y<1.0f)) { // stationary points iTrack = trajectories.erase(iTrack); } else if ((!status[i] || err[i]>13.0) && (szTrack>10)) { // lost of tracking iTrack->at(0).y = 1.0; iTrack++; } else if (szTrack>80) { // too long, remove 120 iTrack = trajectories.erase(iTrack); } else if (szTrack>30) { // long trajectory, try to check 80 iTrack->at(0).y = 2.0; iTrack->push_back(nextPoints[i]); iTrack++; } else { iTrack->push_back(nextPoints[i]); iTrack++; } } // update models according to the direction of trajectories std::vector<int> startTimes; getStartTimes(trajectories, startTimes, fno); std::vector<int>::iterator iTime = startTimes.begin(); for (; iTime!=startTimes.end(); iTime++) { int overall_direction = getMajorityDirection(trajectories, *iTime); for (i=0, iTrack=trajectories.begin(); iTrack!=trajectories.end(); i++) { drawtrajectory(*iTrack, image); if (((int)(iTrack->at(0).x) == *iTime) && (iTrack->at(0).y>0.0f)) { // only use trajectories long enough bool validTrack = isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length); int onoff = onOroff(*iTrack); if (validTrack && (onoff==overall_direction)) { switch(onoff) { case 0: {offPassengers = updateModel(off_models, *iTrack, onoff); /*std::vector<cv::Point2f>::iterator iit = iTrack->begin(); while (iit!=iTrack->end()) { std::cout << iit->x << " " << iit->y << " "; ++iit; } std::cout << std::endl;*/ iTrack = trajectories.erase(iTrack); continue;} case 1: {onPassengers = updateModel(on_models, *iTrack, onoff); iTrack = trajectories.erase(iTrack); continue;} case 2: {missPassengers++; iTrack = trajectories.erase(iTrack); continue;} default: std::cout << "Error: Wrong branch!" << std::endl; } } if ((int)(iTrack->at(0).y) == 1) { // lost tracking iTrack = trajectories.erase(iTrack); } } iTrack++; } } //cv::rectangle(image, rectDoor, cv::Scalar(0,255,0)); showResultImage(image, onPassengers, offPassengers, currGPS, speed); if ((char)cv::waitKey(frate/speedratio)==27) break; cv::swap(prevGray, gray); std::swap(currGPS, prevGPS); } return 0; }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; int i = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31,31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point2f> chessCornersInit[2]; std::vector<cv::Point3f> chessCorners3D; // Creation des points a projeter objectPoints.push_back(cv::Point3f(50,25,0)); objectPoints.push_back(cv::Point3f(150,25,0)); objectPoints.push_back(cv::Point3f(150,125,0)); objectPoints.push_back(cv::Point3f(50,125,0)); objectPoints.push_back(cv::Point3f(50,25,100)); objectPoints.push_back(cv::Point3f(150,25,100)); objectPoints.push_back(cv::Point3f(150,125,100)); objectPoints.push_back(cv::Point3f(50,125,100)); // Creation des coins de la mire for(int x=0 ; x<COLCHESSBOARD ; x++) for(int y=0 ; y<ROWCHESSBOARD ; y++) chessCorners3D.push_back(cv::Point3f(x*26.0f,y*26.0f,0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap("../rsc/capture.avi"); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return -1; } do{ vcap >> imCalibColor; cv::imshow("Projection", imCalibColor); cv::cvtColor(imCalibColor, imCalib, CV_BGR2GRAY); cv::waitKey(); timer = clock(); startImage = clock(); patternfound = cv::findChessboardCorners(imCalib, cv::Size(ROWCHESSBOARD, COLCHESSBOARD), chessCornersInit[0], cv::CALIB_CB_FAST_CHECK); std::cout << "findChessboardCorners\t" << float(clock()-timer)/CLOCKS_PER_SEC << " sec" << std::endl; timer = clock(); } while(!patternfound); for(;;) { vcap >> imCalibColor; if(!imCalibNext.empty()) { cv::swap(imCalib, imCalibNext); // copie de l'ancienne image pour le flot optique for(size_t c = 0; c < chessCornersInit[0].size(); c++) chessCornersInit[0][c] = chessCornersInit[1][c]; chessCornersInit[1].clear(); } else cv::cornerSubPix(imCalib, chessCornersInit[0], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); cv::cvtColor(imCalibColor, imCalibNext, CV_BGR2GRAY); std::vector<uchar> status; std::vector<float> err; cv::calcOpticalFlowPyrLK(imCalib, imCalibNext, chessCornersInit[0], chessCornersInit[1], status, err, winSize, 3, termcrit, 0, 0.0001); cv::solvePnP(chessCorners3D, chessCornersInit[0], cameraMatrix, distCoeffs, rvecs, tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); //Projection cv::projectPoints(objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs, imagePoints); // Dessin des points projetes cv::line(imCalibColor, imagePoints[0], imagePoints[4], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[5], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[6], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[7], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[0], imagePoints[1], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[2], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[3], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[0], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[4], imagePoints[5], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[5], imagePoints[6], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[6], imagePoints[7], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[7], imagePoints[4], cv::Scalar(0,255,255), 2, 8); cv::imshow("Projection", imCalibColor); cv::waitKey(67); } return 0; }
void main() { bool patternfound = false; bool reset = false; bool resetAuto = false; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<cv::Point3f> chessCorners3D; std::vector<double> distances; double moyDistances; // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Node> objet3D; objet3D = osgDB::readNodeFile("dumptruck.osgt"); osg::ref_ptr<osg::Camera> cam = createHUD(backgroundImage); osgViewer::Viewer viewer; group->addChild(cam); group->addChild(objet3D); // set the scene to render viewer.setSceneData(group.get()); // projection viewer.getCamera()->setProjectionMatrixAsPerspective( 40., 1., 1., 100. ); // Create a matrix to specify a distance from the viewpoint. osg::Matrix trans; trans.makeTranslate( 7, 0., -50. ); // Rotation angle (in radians) double angle( 0. ); char key = 0; bool detectionMire = false; do { patternfound = false; resetAuto = false; detectionMire = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); moyDistances = 0; distances.clear(); imCalibNext.release(); group->removeChild(objet3D); std::cout << "recherche de mire" << std::endl; do { vcap >> *frame; backgroundImage->dirty(); detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext); viewer.frame(); }while(!detectionMire && !viewer.done()); if(viewer.done()) break; std::cout << "mire detectee" << std::endl << std::endl; group->addChild(objet3D); do { vcap >> *frame; cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); // Create the rotation matrix. osg::Matrix rot; rot.makeRotate( angle, osg::Vec3( 1., 0., 0. ) ); angle += 0.01; // Set the view matrix (the concatenation of the rotation and // translation matrices). viewer.getCamera()->setViewMatrix( rot * trans ); double moy = 0; for(int j = 0; j < COLCHESSBOARD * ROWCHESSBOARD; j++) { double d = sqrt(pow(chessCornersInit[0][j].y - imagePoints[j].y, 2) + pow(chessCornersInit[0][j].x - imagePoints[j].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / (COLCHESSBOARD * ROWCHESSBOARD); if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset resetAuto = true; key = cv::waitKey(33); // Draw the next frame. backgroundImage->dirty(); viewer.frame(); }while(!viewer.done() && !resetAuto && key != 32); } while(!viewer.done()); }
void main() { bool patternfound = false; bool reset = false; bool resetAuto = false; int nbImages = 0; double moyFinale = 0; char key = 0; bool detectionMire = false; bool detectionVisage = false; int cpt = 0, moyCpt = 0, i = 0; std::cout << "initialisation de Chehra..." << std::endl; Chehra chehra; std::cout << "done" << std::endl; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; cv::Mat Rc, C = cv::Mat(3, 1, CV_64F), rotVecInv; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<cv::Point3f> dessinPointsVisage; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<std::vector<cv::Point2f>> pointsVisageInit(2); std::vector<cv::Point3f> chessCorners3D; std::vector<cv::Point3f> pointsVisage3D; std::vector<cv::Point3f> visage; std::vector<double> distances; double moyDistances; // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f)); // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; double f = (cameraMatrix.at<double>(0, 0) + cameraMatrix.at<double>(1, 1)) / 2; // NEAR = distance focale ; si pixels carrés, fx = fy -> np //mais est généralement différent de fy donc on prend (pour l'instant) par défaut la valeur médiane double g = 2000 * f; // je sais pas pourquoi. au pif. fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Geode> cam = createHUD(backgroundImage, vcap.get(CV_CAP_PROP_FRAME_WIDTH), vcap.get(CV_CAP_PROP_FRAME_HEIGHT), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 2), f); std::cout << "initialisation de l'objet 3D..." << std::endl; osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/Creature.obj"); std::cout << "done" << std::endl; osg::StateSet* obectStateset = objet3D->getOrCreateStateSet(); obectStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); osg::ref_ptr<osg::MatrixTransform> mat = new osg::MatrixTransform(); osg::ref_ptr<osg::PositionAttitudeTransform> pat = new osg::PositionAttitudeTransform(); // construct the viewer. osgViewer::CompositeViewer compositeViewer; osgViewer::View* viewer = new osgViewer::View; osgViewer::View* viewer2 = new osgViewer::View; // add the HUD subgraph. group->addChild(cam); mat->addChild(objet3D); pat->addChild(mat); group->addChild(pat); pat->setScale(osg::Vec3d(3, 3, 3)); osg::Matrixd projectionMatrix; projectionMatrix.makeFrustum( -cameraMatrix.at<double>(0, 2), vcap.get(CV_CAP_PROP_FRAME_WIDTH) - cameraMatrix.at<double>(0, 2), -cameraMatrix.at<double>(1, 2), vcap.get(CV_CAP_PROP_FRAME_HEIGHT) - cameraMatrix.at<double>(1, 2), f, g); osg::Vec3d eye(0.0f, 0.0f, 0.0f), target(0.0f, g, 0.0f), normal(0.0f, 0.0f, 1.0f); // set the scene to render viewer->setSceneData(group.get()); viewer->setUpViewInWindow(0, 0, 1920 / 2, 1080 / 2); viewer->getCamera()->setProjectionMatrix(projectionMatrix); viewer->getCamera()->setViewMatrixAsLookAt(eye, target, normal); viewer2->setSceneData(group.get()); viewer2->setUpViewInWindow(1920 / 2, 0, 1920 / 2, 1080 / 2); viewer2->getCamera()->setProjectionMatrix(projectionMatrix); osg::Vec3d eye2(4 * f, 3 * f / 2, 0.0f), target2(0.0f, f, 0.0f), normal2(0.0f, 0.0f, 1.0f); viewer2->getCamera()->setViewMatrixAsLookAt(eye2, target2, normal2); compositeViewer.addView(viewer); compositeViewer.addView(viewer2); compositeViewer.realize(); // set up windows and associated threads. do { group->removeChild(pat); patternfound = false; resetAuto = false; detectionMire = false; detectionVisage = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); pointsVisageInit[0].clear(); pointsVisageInit[1].clear(); pointsVisage3D.clear(); dessinPointsVisage.clear(); visage.clear(); moyDistances = 0; distances.clear(); imCalibNext.release(); std::cout << "recherche de pattern" << std::endl; time_t start = clock(); double timer = 0; do { start = clock(); vcap >> *frame; backgroundImage->dirty(); //detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext); detectionVisage = detecterVisage(frame, &chehra, &pointsVisageInit[1], &visage, &pointsVisage3D, &imCalibNext); cpt++; double duree = (clock() - start)/(double) CLOCKS_PER_SEC; timer += duree; if(timer >= 1){ std::cout << cpt << " fps" << std::endl; moyCpt += cpt; timer = 0; duree = 0; i++; cpt = 0; start = clock(); } compositeViewer.frame(); }while(!detectionMire && !detectionVisage && !compositeViewer.done()); if(compositeViewer.done()) break; std::cout << "pattern detectee" << std::endl << std::endl; group->addChild(pat); do { start = clock(); vcap >> *frame; cv::Mat rotVec = trackingMire(frame, &imCalibNext, &pointsVisageInit, &pointsVisage3D, &cameraMatrix, &distCoeffs, &tvecs); //cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); //imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); imagePoints = dessinerPoints(frame, pointsVisage3D, rotVec, tvecs, cameraMatrix, distCoeffs); double r11 = rotVec.at<double>(0, 0); double r21 = rotVec.at<double>(1, 0); double r31 = rotVec.at<double>(2, 0); double r32 = rotVec.at<double>(2, 1); double r33 = rotVec.at<double>(2, 2); osg::Matrixd matrixR; matrixR.makeRotate( atan2(r32, r33), osg::Vec3d(1.0, 0.0, 0.0), -atan2(-r31, sqrt((r32 * r32) + (r33 * r33))), osg::Vec3d(0.0, 0.0, 1.0), atan2(r21, r11), osg::Vec3d(0.0, 1.0, 0.0)); mat->setMatrix(matrixR); pat->setPosition(osg::Vec3d(tvecs.at<double>(0, 0), tvecs.at<double>(2, 0), -tvecs.at<double>(1, 0))); //std::cout << "x = " << tvecs.at<double>(0, 0) << " - y = " << tvecs.at<double>(1, 0) << " - z = " << tvecs.at<double>(2, 0) << std::endl; // Calcul d'erreur de reprojection double moy = 0; for(int j = 0; j < pointsVisageInit[1].size() ; j++) { double d = sqrt(pow(pointsVisageInit[0][j].y - imagePoints[j].y, 2) + pow(pointsVisageInit[0][j].x - imagePoints[j].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / pointsVisageInit[1].size(); if(moyDistances > 1) // si l'ecart de reproj est trop grand, reset resetAuto = true; double duree = (clock() - start)/(double) CLOCKS_PER_SEC; std::cout << (int)(1/duree) << " fps" << std::endl; moyCpt += (int)(1/duree); duree = 0; i++; backgroundImage->dirty(); compositeViewer.frame(); }while(!compositeViewer.done() && !resetAuto); }while(!compositeViewer.done()); std::cout << std::endl << "Moyenne des fps : " << moyCpt/i << std::endl; std::system("PAUSE"); }
// ---------------------------------------------------------------------------------- void QualityMatcher::doTheMagic(cv::Mat imageSrc, cv::Mat imageDst, cv::Mat priorH, MatchingResultCallback cb) { // keypoints std::vector<cv::KeyPoint> featuresSrc; std::vector<cv::KeyPoint> featuresDst; // TODO - use the provided prior // prefilter slightly cv::Mat imgSrc = imageSrc, imgDst = imageDst; //cv::GaussianBlur(imageSrc, imgSrc, cv::Size(3,3), 5.0); //cv::GaussianBlur(imageDst, imgDst, cv::Size(3,3), 5.0); //cv::medianBlur(imageSrc, imgSrc, 3); //cv::medianBlur(imageDst, imgDst, 3); cv::Mat descriptorsSrc, descriptorsDst; // detect //cv::Ptr<cv::FeatureDetector> detector = cv::FeatureDetector::create("SURF"); //detector->detect(imgSrc, featuresSrc); //detector->detect(imgDst, featuresDst); // features cv::FAST(imgSrc, featuresSrc, 50, cv::FastFeatureDetector::TYPE_9_16); cv::FAST(imgDst, featuresDst, 50, cv::FastFeatureDetector::TYPE_9_16); printf("input %d vs %d\n", (int)featuresSrc.size(), (int)featuresDst.size()); cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("ORB" ); descriptor->compute(imgSrc, featuresSrc, descriptorsSrc); descriptor->compute(imgDst, featuresDst, descriptorsDst); // descriptors //cv::BriefDescriptorExtractor descriptor; //descriptor.compute(imgSrc, featuresSrc, descriptorsSrc); //descriptor.compute(imgDst, featuresDst, descriptorsDst); if (featuresDst.size() < 10 || featuresSrc.size() < 10 || descriptorsSrc.rows != featuresSrc.size() || descriptorsDst.rows != featuresDst.size()) { cb(false, priorH); return; } // matching (simple nearest neighbours) cv::BFMatcher matcher(cv::NORM_HAMMING); std::vector<cv::DMatch> matches; matcher.match( descriptorsSrc, descriptorsDst, matches ); std::vector<cv::DMatch> goodMatches; std::vector<cv::Point2f> ptsSrc, ptsDst; for( int i = 0; i < matches.size(); i++ ) { if( matches[i].distance <= 20)//std::max(4. * min_dist, 0.02) ) { goodMatches.push_back(matches[i]); } } for( int i = 0; i < goodMatches.size(); i++ ) { ptsSrc.push_back( featuresSrc[ goodMatches[i].queryIdx ].pt ); ptsDst.push_back( featuresDst[ goodMatches[i].trainIdx ].pt ); } if (goodMatches.size() < 10) { printf("MATCH FAILED\n"); cb(false, priorH); return; } /*cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE ); cv::Mat img; cv::drawMatches(imgSrc, featuresSrc, imgDst, featuresDst, goodMatches, img); cv::imshow("imgae1", img); //cv::imshow("imgae1", imgSrc); //cv::imshow("imgae2", imgDst); cv::waitKey(0); */ // ---------------------------- // KLT tracker to further improve the result // ---------------------------- cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.03); cv::cornerSubPix(imgSrc, ptsSrc, cv::Size(3,3), cv::Size(-1,-1), termcrit); cv::cornerSubPix(imgDst, ptsDst, cv::Size(3,3), cv::Size(-1,-1), termcrit); if(1) { std::vector<uchar> status; std::vector<float> err; cv::Size winSize(7,7); std::vector<cv::Point2f> ptsDstKlt = ptsDst; std::vector<cv::Point2f> ptsSrcOld = ptsSrc; std::vector<cv::Mat> pyrSrc, pyrDst; cv::buildOpticalFlowPyramid(imgSrc, pyrSrc, winSize, 4); cv::buildOpticalFlowPyramid(imgDst, pyrDst, winSize, 4); /*cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE ); cv::Mat img; cv::drawMatches(imgSrc, featuresSrc, imgDst, featuresDst, goodMatches, img); cv::imshow("imgae1", img); cv::waitKey(0);*/ cv::calcOpticalFlowPyrLK(pyrSrc, pyrDst, ptsSrc, ptsDstKlt, status, err, winSize, 4, termcrit, cv::OPTFLOW_USE_INITIAL_FLOW); // remove bad points ptsSrc.clear(); ptsDst.clear(); for (size_t i=0; i < status.size(); i++) { if (!status[i]) continue; ptsSrc.push_back(ptsSrcOld[i]); ptsDst.push_back(ptsDstKlt[i]); } } printf("klt tracked %d\n", (int)ptsDst.size()); if (ptsDst.size() < 10) { printf("MATCH FAILED\n"); cb(false, priorH); return; } cv::Mat H = cv::findHomography(ptsSrc, ptsDst, CV_RANSAC, 10.); H.convertTo(H, CV_32FC1); if (!niceHomography(H)) { printf("MATCH FAILED\n"); cb(false, priorH); return; } // DEBUG printf("H:\n"); for (int i=0; i < 3; i++) printf("%f %f %f\n", H.at<float>(i,0), H.at<float>(i,1), H.at<float>(i,2)); printf("prior H:\n"); for (int i=0; i < 3; i++) printf("%f %f %f\n", priorH.at<float>(i,0), priorH.at<float>(i,1), priorH.at<float>(i,2)); float nrm = cv::norm(priorH); if (nrm > 2) { nrm = cv::norm(priorH, H); printf("(H-prior).norm() = %f\n", nrm); if (nrm > 10.0) { printf("MATCH FAILED - bad H\n"); cb(false, priorH); return; } } cb(true, H); printf("matched %d features\n", (int)featuresSrc.size()); }
int show_flow (unsigned char *data, int n, int m, int nt, int img_type, int type_size) { Point2f point; bool addRemovePt = false; VideoCapture cap; TermCriteria termcrit (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); Size subPixWinSize (10, 10), winSize (31, 31); const int MAX_COUNT = 300; bool needToInit = false; bool nightMode = false; Mat gray, prevGray, image; vector<Point2f> points[2]; Mat img_max_gauss, img_gauss_max, img_max; Mat imgc_max_gauss, imgc_gauss_max, imgc_max, img_gauss_max_thr; img_max.create (n, m, CV_8UC1); img_max.setTo (0); img_gauss_max.create (n, m, CV_8UC1); img_gauss_max.setTo (0); for (int i = 0; i < nt; i++) { Mat img (n, m, img_type, data + i * n * m * type_size); img = min (img, 400); double minVal, maxVal; minMaxLoc (img, &minVal, &maxVal); //find minimum and maximum intensities img.convertTo (img, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal)); img_max = max (img, img_max); GaussianBlur (img, img, cv::Size (3, 3), 1.0, 1.0); img_gauss_max = max (img, img_gauss_max); } threshold (img_gauss_max, img_gauss_max_thr, 0, 255, THRESH_BINARY | CV_THRESH_OTSU); Mat img_gauss_max_thr_res; //resize (img_gauss_max_thr, img_gauss_max_thr_res, cv::Size (img_gauss_max_thr.cols * 3, img_gauss_max_thr.rows * 3), 0, 0, 0); Mat prev_gray, prev_img; //int x = (int)string (matfilename).find ('.'); //string file_name = string (matfilename).substr (0, x); //string path = matfiledir + "\\" + file_name + "\\flow"; //imwrite (path + "\\img_big\\img_gauss_max_thr_res" + +".bmp", img_gauss_max_thr_res); for (int i = 0; i < nt; i++) { printf ("i : %d\n", i); /*Mat frame (n, m, imgtype, data + i * n * m * type_size); frame = min (frame, 100); double minVal, maxVal; minMaxLoc (frame, &minVal, &maxVal); frame.convertTo (frame, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal)); GaussianBlur (frame, frame, cv::Size (3, 3), 1.0, 1.0); //Mat res_img; //resize (frame, res_img, cv::Size (frame.cols * 3, frame.rows * 3)); //res_img.copyTo (frame); Mat img, image; applyColorMap (frame, img, COLORMAP_JET); img.copyTo (image, img_gauss_max_thr);*/ //frame.copyTo (gray, img_gauss_max_thr); /*Mat img; bitwise_and (image, img_gauss_max_thr, img); image = img;*/ //cvtColor (image, gray, COLOR_BGR2GRAY); Mat flow, cflow; /*if (prev_gray.data != nullptr) { calcOpticalFlowFarneback (prev_gray, gray, flow, 0.5, 5, 10, 1, 5, 1.2, 0); //cvtColor (prev_img, cflow, COLOR_GRAY2BGR); prev_img.copyTo(cflow); draw_optical_flow (flow, cflow, 4, Scalar (0, 100, 0), Scalar (0, 0, 255)); //imshow ("Flow", cflow); //imshow ("gray", gray); //imshow ("prev_gray", prev_gray); char c = (char)waitKey (200); } gray.copyTo (prev_gray); image.copyTo (prev_img);*/ //char c = (char)waitKey (200); /* if (nightMode) image = Scalar::all (0); if (needToInit) { // automatic initialization goodFeaturesToTrack (gray, points[0], MAX_COUNT, 0.01, 10, Mat (), 3, 0, 0.04); cornerSubPix (gray, points[0], subPixWinSize, Size (-1, -1), termcrit); addRemovePt = false; } else if (!points[0].empty ()) { vector<uchar> status; vector<float> err; if (prevGray.empty ()) gray.copyTo (prevGray); calcOpticalFlowPyrLK (prevGray, gray, points[0], points[1], status, err); //, winSize,3, termcrit, 0, 0.001); size_t i, k; for (i = k = 0; i < points[1].size (); i++) { if (!status[i]) continue; points[1][k++] = points[1][i]; if (points->size() > i) { line (image, points[1][i], points[0][i], Scalar(255, 255, 255), 2); } circle (image, points[1][i], 2, Scalar (0, 0, 0), -1, 8); } points[1].resize (k); } if (addRemovePt && points[1].size () < (size_t)MAX_COUNT) { vector<Point2f> tmp; tmp.push_back (point); cornerSubPix (gray, tmp, winSize, cvSize (-1, -1), termcrit); points[1].push_back (tmp[0]); addRemovePt = false; } needToInit = false; imshow ("LK Demo", image); if (i == 0) char c = (char)waitKey ();*/ //imwrite (path + "\\img_big\\" + std::to_string (i) + ".bmp", image); //imwrite (path + "\\img_\\" + std::to_string (i) + ".bmp", img); /*char c = (char)waitKey (0); if (c == 27) break; switch (c) { case 'r': needToInit = true; break; case 'c': points[0].clear (); points[1].clear (); break;\ case 'n': nightMode = !nightMode; break; case 's': imwrite (path + "\\" + std::to_string (i) + ".bmp", prev_img); }*/ } destroyAllWindows (); return 0; }
void CMessage::drawIWindow(CInfoWindow * ret, std::string text, PlayerColor player) { bool blitOr = false; if(dynamic_cast<CSelWindow*>(ret)) //it's selection window, so we'll blit "or" between components blitOr = true; const int sizes[][2] = {{400, 125}, {500, 150}, {600, 200}, {480, 400}}; for(int i = 0; i < ARRAY_COUNT(sizes) && sizes[i][0] < screen->w - 150 && sizes[i][1] < screen->h - 150 && ret->text->slider; i++) { ret->text->resize(Point(sizes[i][0], sizes[i][1])); } if(ret->text->slider) { ret->text->slider->addUsedEvents(CIntObject::WHEEL | CIntObject::KEYBOARD); } else { ret->text->resize(ret->text->label->textSize + Point(10, 10)); } std::pair<int,int> winSize(ret->text->pos.w, ret->text->pos.h); //start with text size ComponentsToBlit comps(ret->components,500, blitOr); if (ret->components.size()) winSize.second += 10 + comps.h; //space to first component int bw = 0; if (ret->buttons.size()) { // Compute total width of buttons bw = 20*(ret->buttons.size()-1); // space between all buttons for(auto & elem : ret->buttons) //and add buttons width bw+=elem->pos.w; winSize.second += 20 + //before button ok->ourImages[0].bitmap->h; //button } // Clip window size vstd::amax(winSize.second, 50); vstd::amax(winSize.first, 80); vstd::amax(winSize.first, comps.w); vstd::amax(winSize.first, bw); vstd::amin(winSize.first, screen->w - 150); ret->bitmap = drawDialogBox (winSize.first + 2*SIDE_MARGIN, winSize.second + 2*SIDE_MARGIN, player); ret->pos.h=ret->bitmap->h; ret->pos.w=ret->bitmap->w; ret->center(); int curh = SIDE_MARGIN; int xOffset = (ret->pos.w - ret->text->pos.w)/2; if(!ret->buttons.size() && !ret->components.size()) //improvement for very small text only popups -> center text vertically { if(ret->bitmap->h > ret->text->pos.h + 2*SIDE_MARGIN) curh = (ret->bitmap->h - ret->text->pos.h)/2; } ret->text->moveBy(Point(xOffset, curh)); curh += ret->text->pos.h; if (ret->components.size()) { curh += BEFORE_COMPONENTS; comps.blitCompsOnSur (blitOr, BETWEEN_COMPS, curh, ret->bitmap); } if(ret->buttons.size()) { // Position the buttons at the bottom of the window bw = (ret->bitmap->w/2) - (bw/2); curh = ret->bitmap->h - SIDE_MARGIN - ret->buttons[0]->pos.h; for(auto & elem : ret->buttons) { elem->moveBy(Point(bw, curh)); bw += elem->pos.w + 20; } } for(size_t i=0; i<ret->components.size(); i++) ret->components[i]->moveBy(Point(ret->pos.x, ret->pos.y)); }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; bool reset = false; int i = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> cubeObjectPoints; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<cv::Point3f> chessCorners3D; // Creation des points a projeter for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); // Creation des points a projeter cubeObjectPoints.push_back(cv::Point3f(52, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 0)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 0)); cubeObjectPoints.push_back(cv::Point3f(52, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 26, 104)); cubeObjectPoints.push_back(cv::Point3f(156, 128, 104)); cubeObjectPoints.push_back(cv::Point3f(52, 128, 104)); // Creation des coins de la mire for(int x = 0; x < COLCHESSBOARD; x++) for(int y = 0; y < ROWCHESSBOARD; y++) chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return -1; } char key = 0; do { std::cout << "recherche de mire" << std::endl; bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext); std::cout << "mire detectee" << std::endl << std::endl; if(!detectionMire) break; do { vcap >> imCalibColor; cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs); dessinerCube(&imCalibColor, cubeObjectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); dessinerPoints(&imCalibColor, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs); cv::imshow("Projection", imCalibColor); key = (char)cv::waitKey(30); }while(key != 27 && key != 32); if(key == 32) { patternfound = false; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); imCalibNext.release(); } }while(key != 27); return 0; }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; bool reset = false; bool endVideo = false; bool resetAuto = false; int i = 0; int nbImages = 0; double moyFinale = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31, 31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point3f> QRpointObject3D; std::vector<std::vector<cv::Point2f>> chessCornersInit(2); std::vector<std::vector<cv::Point2f>> QRpointinit(2); std::vector<cv::Point3f> QRpoint3D; std::vector<cv::Point3f> tabuseless; std::vector<cv::Point3f> chessCorners3D; std::vector<double> distances; std::vector<double> moyDistances; cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); std::ofstream file; file.open ("../rsc/error.txt"); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return -1; } char key = 0; do { /*std::cout << "recherche de mire" << std::endl; bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext); std::cout << "mire detectee" << std::endl << std::endl;*/ bool detectionQR = detecterQR(vcap , &QRpointinit[1], &QRpoint3D, &tabuseless, &QRpointObject3D, &imCalibNext); if(!detectionQR) break; do { vcap >> imCalibColor; if(imCalibColor.empty()){ endVideo = true; break; } cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &QRpointinit, &QRpoint3D, &cameraMatrix, &distCoeffs, &tvecs); dessinerPyra(&imCalibColor, QRpointObject3D, rotVec, tvecs, cameraMatrix, distCoeffs); imagePoints = dessinerPoints(&imCalibColor, tabuseless, rotVec, tvecs, cameraMatrix, distCoeffs); //Calcul d'erreur de reprojection double moy = 0; for(int j = 0; j < QRpointinit[1].size(); j++) { double d = sqrt(pow(QRpointinit[0][j].y - tabuseless[j].y, 2) + pow(QRpointinit[0][j].x - tabuseless[j].x, 2)); distances.push_back(d); moy += d; /*std::cout << "distance point numero " << j << " : " << std::endl << " subpix : x = " << chessCornersInit[0][j].x << " y = " << chessCornersInit[0][j].y << std::endl << " projec : x = " << imagePoints[j].x << " y = " << imagePoints[j].y << std::endl << " distance : " << d << std::endl << std::endl;*/ } moyDistances.push_back(moy / QRpointinit[1].size()); ////std::cout << std::endl << std::endl << "moyenne ecart points image " << i << " : " << moyDistances[i] << std::endl << std::endl; //file << "moyenne ecart points image " << i << " : " << moyDistances[i] << " px" << std::endl; if(moyDistances[i] > 10){ // si l'ecart de reproj est trop grand, reset resetAuto = true; std::cout << "RESET" << std::endl; break; } //moyFinale += moyDistances[i]; i++; nbImages++; cv::imshow("Projection", imCalibColor); key = (char)cv::waitKey(67); }while(key != 27 && key != 32 && resetAuto != true); if(key == 32 || resetAuto == true) { patternfound = false; resetAuto = false; i = 0; imagePoints.clear(); chessCornersInit[0].clear(); chessCornersInit[1].clear(); QRpointinit[0].clear(); QRpointinit[1].clear(); QRpoint3D.clear(); QRpointObject3D.clear(); tabuseless.clear(); moyDistances.clear(); distances.clear(); imCalibNext.release(); } }while(key != 27 && endVideo != true); return 0; }
void DGStdCursorPictureLayer::updateCustomGpuParameter(DShaderObject* so) { if (!so->isValid()) { return; } // we assume the so is the only so named "UI_StdCursor_t0_p0" DMatrix4 vertTransform; DMatrix3 uvTransform = DMatrix3::IDENTITY; DGCursor::CursorAction action; DVector2 posOffset(0.0f, 0.0f); DReal cw = mHostCursor->getSize().getWidth() * 0.5f; DReal ch = mHostCursor->getSize().getHeight() * 0.5f; bool makeOffset = false; switch (mHostCursor->getCursorAction()) { case Duel::DGCursor::CA_Idle: action = DGCursor::CA_Idle; makeOffset = true; break; case Duel::DGCursor::CA_Busy: action = DGCursor::CA_Busy; makeOffset = true; break; case Duel::DGCursor::CA_Pressed: action = DGCursor::CA_Pressed; makeOffset = true; break; case Duel::DGCursor::CA_Drag: action = DGCursor::CA_Drag; makeOffset = true; break; case Duel::DGCursor::CA_ScaleHorizontal: action = DGCursor::CA_ScaleHorizontal; break; case Duel::DGCursor::CA_ScaleVertical: action = DGCursor::CA_ScaleVertical; break; case Duel::DGCursor::CA_Scale_LeftCorner: action = DGCursor::CA_Scale_LeftCorner; break; case Duel::DGCursor::CA_Scale_RightCorner: action = DGCursor::CA_Scale_RightCorner; break; case Duel::DGCursor::CA_Link: action = DGCursor::CA_Link; makeOffset = true; break; case Duel::DGCursor::CA_Scroll: action = DGCursor::CA_Scroll; break; case Duel::DGCursor::CA_Edit: action = DGCursor::CA_Edit; break; case Duel::DGCursor::CA_Invalid: action = DGCursor::CA_Invalid; break; default: action = DGCursor::CA_Idle; makeOffset = true; break; } if (makeOffset) { posOffset.x += cw; posOffset.y -= ch; } // 因为标准ui的HotPoint根据不同的动作会有不同的坐标, 因此需要重新计算. DVector2 pos = mHostCursor->getPointInScreen(); pos += posOffset; DGSize winSize((DReal)mHostCursor->getHostWindow()->getWidth(), (DReal)mHostCursor->getHostWindow()->getHeight()); vertTransform = DGGUIMathTool::getScreenSpaceTransform(pos, mHostCursor->getSize(), winSize); so->getVertexProgramParameters()->setValue("vertTransform", vertTransform); // 处理UV, 反正标准Cursor图里只有12个图标. 直接计算. DReal uvXScale = 1.0f / 12.0f; uvTransform[0][0] = uvXScale; uint32 iconId = (uint32)action; DReal uvXOffset = uvXScale * iconId; // 列主序, 列主存储, x 位移. uvTransform[2][0] = uvXOffset; so->getVertexProgramParameters()->setValue("uvTransform", uvTransform); so->getPixelProgramParameters()->setValue("texUnit", mTexture->getAs<DTexture>()->getGpuTexutureConstant()); }