void CGView::move(GLdouble x, GLdouble y, GLdouble z) { Vector3d t=Vector3d(x,y,z); if(this->t_act){ this->objs[active].move(rotVec(t)); } else { this->center+=rotVec(t); } }
PxQuat computeQuatFromNormal(const PxVec3& n) { //parallel or anti-parallel if(n.x > 0.9999f) { //parallel return PxQuat::createIdentity(); } else if (n.x < -0.9999f) { //anti-parallel //contactQuaternion.fromAngleAxisFast(PXD_PI, Vector3(0.0f, 1.0f, 0.0f)); return PxQuat(0.0f, 1.0f, 0.0f, 0.0f); } else { PxVec3 rotVec(0.0f, -n.z, n.y); //Convert to quat PxReal angle = rotVec.magnitude(); rotVec *= 1.0f/angle; if(angle > 1.0f) angle = 1.0f; // djs: injudiciously imbecilic use of trig functions, good thing Adam is going to trample this path like a // frustrated rhinoceros in mating season angle = PxAsin(angle); if(n.x < 0) angle = PxPi - angle; return PxQuat(angle, rotVec); } }
// CartPoseCostCalculator(const CartPoseCostCalculator& other) : pose_(other.pose_), manip_(other.manip_), rs_(other.rs_) {} VectorXd operator()(const VectorXd& dof_vals) const { manip_->SetDOFValues(toDblVec(dof_vals)); OR::Transform newpose = link_->GetTransform(); OR::Transform pose_err = pose_inv_ * newpose; VectorXd err = concat(rotVec(pose_err.rot), toVector3d(pose_err.trans)); return err; }
void TurretShape::updateMove(const Move* move) { PROFILE_SCOPE( TurretShape_UpdateMove ); if (!move) return; Point3F vec, pos; // Update orientation mTurretDelta.rotVec = mRot; VectorF rotVec(0, 0, 0); if (getAllowManualRotation()) { if (mPitchAllowed) { rotVec.x = move->pitch * 2.0f; // Assume that our -2PI to 2PI range was clamped to -PI to PI in script; if (mPitchRate > 0) { rotVec.x *= mPitchRate * TickSec; } } if (mHeadingAllowed) { rotVec.z = move->yaw * 2.0f; // Assume that our -2PI to 2PI range was clamped to -PI to PI in script if (mHeadingRate > 0) { rotVec.z *= mHeadingRate * TickSec; } } } mRot.x += rotVec.x; mRot.z += rotVec.z; _applyLimits(mRot); if (isServerObject()) { // As this ends up animating shape nodes, we have no sense of a transform and // render transform. Therefore we treat this as the true transform and leave the // client shape node changes to interpolateTick() as the render transform. Otherwise // on the client we'll have this node change from processTick() and then backstepping // and catching up to the true node change in interpolateTick(), which causes the // turret to stutter. _setRotation( mRot ); } else { // If on the client, calc delta for backstepping mTurretDelta.rot = mRot; mTurretDelta.rotVec = mTurretDelta.rotVec - mTurretDelta.rot; } setMaskBits(TurretUpdateMask); }
cv::Mat trackingMire(cv::Mat *imCalibColor, cv::Mat *imCalibNext, std::vector<std::vector<cv::Point2f>> *chessCornersInit, std::vector<cv::Point3f> *chessCorners3D, cv::Mat *cameraMatrix, cv::Mat *distCoeffs, cv::Mat *tvecs) { cv::Mat imCalib, rvecs; cv::swap(imCalib, *imCalibNext); // copie de l'ancienne image pour le flot optique (*chessCornersInit)[0] = (*chessCornersInit)[1]; (*chessCornersInit)[1].clear(); cv::cornerSubPix(imCalib, (*chessCornersInit)[0], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); cv::cvtColor(*imCalibColor, *imCalibNext, CV_BGR2GRAY); std::vector<uchar> status; std::vector<float> err; cv::calcOpticalFlowPyrLK(imCalib, *imCalibNext, (*chessCornersInit)[0], (*chessCornersInit)[1], status, err, cv::Size(31, 31), 3, cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1), 0, 0.0001); cv::solvePnP(*chessCorners3D, (*chessCornersInit)[0], *cameraMatrix, *distCoeffs, rvecs, *tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); return rotVec; }
void main() { bool patternfound = false; bool resetAuto = false; int nbImages = 0; double moyFinale = 0; bool detectionVisage = false; int nbrLoopSinceLastDetection = 0; int criticalValueOfLoopWithoutDetection = 15; std::cout << "initialisation de Chehra..." << std::endl; Chehra chehra; std::cout << "done" << std::endl; cv::Mat cameraMatrix, distCoeffs; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point2f> pointsVisage2D; std::vector<cv::Point2f> moyPointsVisage2D; std::vector<cv::Point3f> pointsVisage3D; std::vector<cv::Point3f> visage; std::vector<double> distances; double moyDistances; std::vector<std::vector<cv::Point2f>> images; std::vector<cv::Mat> frames; double s = 10.0f; osg::Matrixd matrixS; // scale matrixS.set( s, 0, 0, 0, 0, s, 0, 0, 0, 0, s, 0, 0, 0, 0, 1); pointsVisage3D.push_back(cv::Point3f(90,0,-80)); pointsVisage3D.push_back(cv::Point3f(-90,0,-80)); pointsVisage3D.push_back(cv::Point3f(0,0,0)); pointsVisage3D.push_back(cv::Point3f(600,0,600)); pointsVisage3D.push_back(cv::Point3f(0,0,600)); pointsVisage3D.push_back(cv::Point3f(-600,0,600)); /* pointsVisage3D.push_back(cv::Point3f(13.1, -98.1,108.3)); // exterieur narine gauche pointsVisage3D.push_back(cv::Point3f(-13.1, -98.1,108.3)); // exterieur narine droite pointsVisage3D.push_back(cv::Point3f(0, -87.2, 124.2)); // bout du nez pointsVisage3D.push_back(cv::Point3f(44.4, -57.9, 83.7)); // exterieur oeil gauche pointsVisage3D.push_back(cv::Point3f(0, 55.4, 101.4)); // haut du nez, centre des yeux pointsVisage3D.push_back(cv::Point3f(-44.4, -57.9, 83.7)); // exterieur oeil droit */ cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; double f = (cameraMatrix.at<double>(0, 0) + cameraMatrix.at<double>(1, 1)) / 2; // NEAR = distance focale ; si pixels carrés, fx = fy -> np //mais est généralement différent de fy donc on prend (pour l'instant) par défaut la valeur médiane double g = 2000 * f; // je sais pas pourquoi. au pif. fs.release(); cv::VideoCapture vcap(0); if(!vcap.isOpened()) { std::cout << "FAIL!" << std::endl; return; } cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3)); do { vcap >> *frame; }while(frame->empty()); osg::ref_ptr<osg::Image> backgroundImage = new osg::Image; backgroundImage->setImage(frame->cols, frame->rows, 3, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE, (uchar*)(frame->data), osg::Image::AllocationMode::NO_DELETE, 1); // read the scene from the list of file specified commandline args. osg::ref_ptr<osg::Group> group = new osg::Group; osg::ref_ptr<osg::Geode> cam = createHUD(backgroundImage, vcap.get(CV_CAP_PROP_FRAME_WIDTH), vcap.get(CV_CAP_PROP_FRAME_HEIGHT), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 2), f); std::cout << "initialisation de l'objet 3D..." << std::endl; osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/brain.obj"); //osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/dumptruck.osgt"); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* osg::Sphere* unitSphere = new osg::Sphere(osg::Vec3(0, -1000, 1000), 100.0); osg::ShapeDrawable* unitSphereDrawable = new osg::ShapeDrawable(unitSphere); osg::Geode* objet3D = new osg::Geode(); objet3D->addDrawable(unitSphereDrawable); */ //osg::StateSet* sphereStateset = unitSphereDrawable->getOrCreateStateSet(); //sphereStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); //sphereStateset->setMode(GL_LIGHTING, osg::StateAttribute::OFF); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout << "done" << std::endl; osg::StateSet* obectStateset = objet3D->getOrCreateStateSet(); obectStateset->setMode(GL_LIGHTING,osg::StateAttribute::OFF); obectStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF); osg::ref_ptr<osg::MatrixTransform> mat = new osg::MatrixTransform(); // construct the viewer. osgViewer::CompositeViewer compositeViewer; osgViewer::View* viewer = new osgViewer::View; osgViewer::View* viewer2 = new osgViewer::View; // add the HUD subgraph. group->addChild(cam); mat->addChild(objet3D); group->addChild(mat); osg::Matrixd projectionMatrix; projectionMatrix.makeFrustum( -cameraMatrix.at<double>(0, 2), vcap.get(CV_CAP_PROP_FRAME_WIDTH) - cameraMatrix.at<double>(0, 2), -cameraMatrix.at<double>(1, 2), vcap.get(CV_CAP_PROP_FRAME_HEIGHT) - cameraMatrix.at<double>(1, 2), f, g); osg::Vec3d eye(0.0f, 0.0f, 0.0f), target(0.0f, g, 0.0f), normal(0.0f, 0.0f, 1.0f); // set the scene to render viewer->setSceneData(group.get()); viewer->setUpViewInWindow(0, 0, 1920 / 2, 1080 / 2); viewer->getCamera()->setProjectionMatrix(projectionMatrix); viewer->getCamera()->setViewMatrixAsLookAt(eye, target, normal); viewer2->setSceneData(group.get()); viewer2->setUpViewInWindow(1920 / 2, 0, 1920 / 2, 1080 / 2); viewer2->getCamera()->setProjectionMatrix(projectionMatrix); osg::Vec3d eye2(4 * f, 3 * f / 2, 0.0f), target2(0.0f, f, 0.0f), normal2(0.0f, 0.0f, 1.0f); viewer2->getCamera()->setViewMatrixAsLookAt(eye2, target2, normal2); compositeViewer.addView(viewer2); compositeViewer.addView(viewer); compositeViewer.realize(); // set up windows and associated threads. do { patternfound = false; resetAuto = false; detectionVisage = false; moyPointsVisage2D.clear(); pointsVisage2D.clear(); visage.clear(); moyDistances = 0; distances.clear(); std::cout << "recherche de pattern" << std::endl << "nbr images sauvegardees : " << images.size() << std::endl; vcap >> *frame; frames.push_back(*frame); detectionVisage = detecterVisage(frame, &chehra, &pointsVisage2D, &visage); if(detectionVisage) { images.push_back(pointsVisage2D); nbrLoopSinceLastDetection = 0; group->addChild(mat); } else nbrLoopSinceLastDetection++; if((images.size() > NBRSAVEDIMAGES || nbrLoopSinceLastDetection > criticalValueOfLoopWithoutDetection) && !images.empty()) images.erase(images.begin()); if(images.empty()) group->removeChild(mat); else { //cv::cornerSubPix(*frame, pointsVisage2D, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); for(int i = 0; i < NBRFACEPOINTSDETECTED; i++) { cv::Point2f coordonee(0.0f, 0.0f); for(int j = 0; j < images.size(); j++) { coordonee.x += images[j][i].x; coordonee.y += images[j][i].y; } coordonee.x /= images.size(); coordonee.y /= images.size(); moyPointsVisage2D.push_back(coordonee); } cv::solvePnP(pointsVisage3D, moyPointsVisage2D, cameraMatrix, distCoeffs, rvecs, tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); imagePoints = dessinerPoints(frame, pointsVisage3D, rotVec, tvecs, cameraMatrix, distCoeffs); double t3 = tvecs.at<double>(2, 0); double t1 = tvecs.at<double>(0, 0); double t2 = tvecs.at<double>(1, 0) + t3 / 27.5; // and now, magic ! double r11 = rotVec.at<double>(0, 0); double r12 = rotVec.at<double>(0, 1); double r13 = rotVec.at<double>(0, 2); double r21 = rotVec.at<double>(1, 0); double r22 = rotVec.at<double>(1, 1); double r23 = rotVec.at<double>(1, 2); double r31 = rotVec.at<double>(2, 0); double r32 = rotVec.at<double>(2, 1); double r33 = rotVec.at<double>(2, 2); osg::Matrixd matrixR; // rotation (transposee de rotVec) matrixR.set( r11, r21, r31, 0, r12, r22, r32, 0, r13, r23, r33, 0, 0, 0, 0, 1); osg::Matrixd matrixT; // translation matrixT.makeTranslate(t1, t2, t3); osg::Matrixd matrix90; // rotation de repere entre opencv et osg matrix90.makeRotate(osg::Quat(osg::DegreesToRadians(-90.0f), osg::Vec3d(1.0, 0.0, 0.0))); mat->setMatrix(matrixS * matrixR * matrixT * matrix90); // Calcul d'erreur de reprojection double moy = 0; for(int i = 0; i < pointsVisage2D.size(); i++) { double d = sqrt(pow(pointsVisage2D[i].y - imagePoints[i].y, 2) + pow(pointsVisage2D[i].x - imagePoints[i].x, 2)); distances.push_back(d); moy += d; } moyDistances = moy / pointsVisage2D.size(); if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset resetAuto = true; } backgroundImage->dirty(); compositeViewer.frame(); }while(!compositeViewer.done()); }
int main() { time_t timer = 0; time_t start = clock(); time_t startImage = 0; std::cout << "Debut projection\t" << std::endl; bool patternfound = false; int i = 0; cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03); cv::Size winSize(31,31); cv::Mat cameraMatrix, distCoeffs; cv::Mat imCalib; cv::Mat imCalibColor; cv::Mat imCalibNext; cv::Mat rvecs, tvecs; std::vector<cv::Point2f> imagePoints; std::vector<cv::Point3f> objectPoints; std::vector<cv::Point2f> chessCornersInit[2]; std::vector<cv::Point3f> chessCorners3D; // Creation des points a projeter objectPoints.push_back(cv::Point3f(50,25,0)); objectPoints.push_back(cv::Point3f(150,25,0)); objectPoints.push_back(cv::Point3f(150,125,0)); objectPoints.push_back(cv::Point3f(50,125,0)); objectPoints.push_back(cv::Point3f(50,25,100)); objectPoints.push_back(cv::Point3f(150,25,100)); objectPoints.push_back(cv::Point3f(150,125,100)); objectPoints.push_back(cv::Point3f(50,125,100)); // Creation des coins de la mire for(int x=0 ; x<COLCHESSBOARD ; x++) for(int y=0 ; y<ROWCHESSBOARD ; y++) chessCorners3D.push_back(cv::Point3f(x*26.0f,y*26.0f,0.0f)); cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; fs["distCoeffs"] >> distCoeffs; fs.release(); cv::VideoCapture vcap("../rsc/capture.avi"); if(!vcap.isOpened()){ std::cout << "FAIL!" << std::endl; return -1; } do{ vcap >> imCalibColor; cv::imshow("Projection", imCalibColor); cv::cvtColor(imCalibColor, imCalib, CV_BGR2GRAY); cv::waitKey(); timer = clock(); startImage = clock(); patternfound = cv::findChessboardCorners(imCalib, cv::Size(ROWCHESSBOARD, COLCHESSBOARD), chessCornersInit[0], cv::CALIB_CB_FAST_CHECK); std::cout << "findChessboardCorners\t" << float(clock()-timer)/CLOCKS_PER_SEC << " sec" << std::endl; timer = clock(); } while(!patternfound); for(;;) { vcap >> imCalibColor; if(!imCalibNext.empty()) { cv::swap(imCalib, imCalibNext); // copie de l'ancienne image pour le flot optique for(size_t c = 0; c < chessCornersInit[0].size(); c++) chessCornersInit[0][c] = chessCornersInit[1][c]; chessCornersInit[1].clear(); } else cv::cornerSubPix(imCalib, chessCornersInit[0], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); cv::cvtColor(imCalibColor, imCalibNext, CV_BGR2GRAY); std::vector<uchar> status; std::vector<float> err; cv::calcOpticalFlowPyrLK(imCalib, imCalibNext, chessCornersInit[0], chessCornersInit[1], status, err, winSize, 3, termcrit, 0, 0.0001); cv::solvePnP(chessCorners3D, chessCornersInit[0], cameraMatrix, distCoeffs, rvecs, tvecs); cv::Mat rotVec(3, 3, CV_64F); cv::Rodrigues(rvecs, rotVec); //Projection cv::projectPoints(objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs, imagePoints); // Dessin des points projetes cv::line(imCalibColor, imagePoints[0], imagePoints[4], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[5], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[6], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[7], cv::Scalar(255,255,0), 2, 8); cv::line(imCalibColor, imagePoints[0], imagePoints[1], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[1], imagePoints[2], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[2], imagePoints[3], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[3], imagePoints[0], cv::Scalar(255,0,255), 2, 8); cv::line(imCalibColor, imagePoints[4], imagePoints[5], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[5], imagePoints[6], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[6], imagePoints[7], cv::Scalar(0,255,255), 2, 8); cv::line(imCalibColor, imagePoints[7], imagePoints[4], cv::Scalar(0,255,255), 2, 8); cv::imshow("Projection", imCalibColor); cv::waitKey(67); } return 0; }
cv::Mat Transform::GetCvRotationVector() const { cv::Mat rotVec(3,1,cv::DataType<mitk::ScalarType>::type); cv::Rodrigues( this->GetCvRotationMatrix(), rotVec ); return rotVec; }