void Doom3GroupNode::_applyTransformation() { _d3Group.revertTransform(); evaluateTransform(); _d3Group.freezeTransform(); if (!_d3Group.isModel()) { // Update the origin when we're in "child primitive" mode _renderableName.setOrigin(_d3Group.getOrigin()); } }
void Doom3GroupNode::_onTransformationChanged() { // If this is a container, pass the call to the children and leave the entity unharmed if (!_d3Group.isModel()) { ChildTransformReverter reverter; traverse(reverter); evaluateTransform(); // Update the origin when we're in "child primitive" mode _renderableName.setOrigin(_d3Group.getOrigin()); } else { // It's a model _d3Group.revertTransform(); evaluateTransform(); _d3Group.updateTransform(); } _d3Group.m_curveNURBS.curveChanged(); _d3Group.m_curveCatmullRom.curveChanged(); }
void BrushInstance::applyTransform () { m_brush.revertTransform(); evaluateTransform(); m_brush.freezeTransform(); }
const Matrix4& Node::localToWorld() const { evaluateTransform(); return _local2world; }
void LightInstance::applyTransform () { m_contained.revertTransform(); evaluateTransform(); m_contained.freezeTransform(); }
void LightNode::_applyTransformation() { _light.revertTransform(); evaluateTransform(); _light.freezeTransform(); }
void LightNode::_onTransformationChanged() { _light.revertTransform(); evaluateTransform(); _light.updateOrigin(); }
void applyTransform() { m_contained.revertTransform(); evaluateTransform(); m_contained.freezeTransform(); }
//pointcloud can make memery leaks,so we use depth image instead bool FlannMatcher::getFinalTransform(cv::Mat& image1,cv::Mat& image2, cv::Mat& depth1,cv::Mat& depth2, std::vector<cv::DMatch>& bestMatches, Eigen::Matrix4f& bestTransform) { vector<cv::KeyPoint> keypoints1,keypoints2; vector<cv::DMatch> matches; getMatches(depth1,depth2,image1,image2,matches,keypoints1,keypoints2); vector<Eigen::Vector3f> eigenPoints1,eigenPoints2; for(int i=0;i<matches.size();++i) { cv::Point2f p2d1; cv::Point2f p2d2; p2d1=keypoints1[matches[i].queryIdx].pt; p2d2=keypoints2[matches[i].trainIdx].pt; //calculate the first x,y,z unsigned short d1=depth1.at<unsigned short>(round(p2d1.y),round(p2d1.x)); double z1=double(d1)/camera_factor; double x1=(p2d1.x-cx)*z1/fx; double y1=(p2d1.y-cy)*z1/fy; //calculate the second x,y,x unsigned short d2=depth2.at<unsigned short>(round(p2d2.y),round(p2d2.x)); double z2=double(d2)/camera_factor; double x2=(p2d2.x-cx)*z2/fx; double y2=(p2d2.y-cy)*z2/fy; //push them into eigenPoints eigenPoints1.push_back(Eigen::Vector3f(x1,y1,z1)); eigenPoints2.push_back(Eigen::Vector3f(x2,y2,z2)); } /***********************/ bool validTrans=false; pcl::TransformationFromCorrespondences tfc; int k=3; double bestError=1E10; float bestRatio=0.0; int numValidMatches=matches.size(); vector<int> bestInliersIndex; bestMatches.clear(); if(numValidMatches<k) return false; for(int iteration=0;iteration<maxIterations;++iteration) { tfc.reset(); for(int i=0;i<k;++i) { int id_match=rand()%numValidMatches; /* Eigen::Vector3f from(pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).x, pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).y, pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).z); Eigen::Vector3f to(pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).x, pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).y, pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).z); tfc.add(from,to); */ tfc.add(eigenPoints1[id_match],eigenPoints2[id_match]); } Eigen::Matrix4f transformation = tfc.getTransformation().matrix(); vector<int> indexInliers; double maxInlierDistance = 0.05; double meanError; float ratio; evaluateTransform(transformation, eigenPoints1,eigenPoints2, maxInlierDistance*maxInlierDistance, indexInliers, meanError, ratio); if(meanError<0 || meanError >= maxInlierDistance) continue; if (meanError < bestError) { if (ratio > bestRatio) bestRatio = ratio; if (indexInliers.size()<10 || ratio<0.3) continue; // not enough inliers found } tfc.reset(); for(int idInlier = 0; idInlier < indexInliers.size(); idInlier++) { int idMatch = indexInliers[idInlier]; tfc.add(eigenPoints1[idInlier],eigenPoints2[idInlier]); } transformation = tfc.getTransformation().matrix(); evaluateTransform(transformation, eigenPoints1,eigenPoints2, maxInlierDistance*maxInlierDistance, indexInliers, meanError, ratio); if (meanError < bestError) { if (ratio > bestRatio) bestRatio = ratio; if (indexInliers.size()<10 || ratio<0.3) continue; // not enough inliers found bestTransform=transformation; bestError=meanError; //cout<<"indexInliers size is: "<<indexInliers.size()<<endl; bestInliersIndex=indexInliers; } } if(bestInliersIndex.size()>0) { std::cout<<"**********************************"<<std::endl; std::cout<<"we get----> "<<bestInliersIndex.size()<<"/"<<eigenPoints1.size()<<" inliers!!"<<std::endl; std::cout<<"inliers percentage: "<<bestInliersIndex.size()*100/eigenPoints1.size()<<"% !"<<std::endl; std::cout<<"**********************************"<<std::endl; cout<<"transformation: "<<endl<<bestTransform<<endl; for(int i=0;i<bestInliersIndex.size();++i) { //std::cout<<"inliers i is: "<<bestInliersInliers[i]<<endl; bestMatches.push_back(matches[bestInliersIndex[i]]); } validTrans=true; /* //draw cv::Mat img_matches; cv::drawMatches(image1,keypoints1,image2,keypoints2, matches,img_matches,CV_RGB(255,0,0)); cv::drawMatches(image1,keypoints1,image2,keypoints2, bestMatches,img_matches,CV_RGB(0,255,0)); cv::imshow("ransac matches",img_matches); */ drawInliers(image1,image2,keypoints1,keypoints2,matches,bestMatches); cv::waitKey(10); } else { cout<<"bestRatio is: "<<bestRatio<<" ,but no valid Transform founded!!"<<endl; validTrans=false; } return validTrans; }