void Segmentation::createPlane(std::vector < pcl::PointCloud<pcl::PointXYZRGBA>::Ptr > &clusters) { _planes.clear(); for (int v = 0 ; v < clusters.size(); v++) { Plane plane; //pcl::transformPointCloud(*clusters[v], *clusters[v], transformXY); plane.setPlaneCloud(clusters[v]); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr concaveHull(new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::ConcaveHull<pcl::PointXYZRGBA> chull; chull.setInputCloud (clusters[v]); chull.setAlpha (0.1); chull.reconstruct (*concaveHull); //vectorHull.push_back(concaveHull); plane.setHull(concaveHull); _planes.push_back(plane); } //pcl::PointCloud<pcl::sPointXYZRGBA>::Ptr gr(new pcl::PointCloud<pcl::PointXYZRGBA>); //gr = _ground.getPlaneCloud(); //pcl::transformPointCloud(*_mainCloud, *_mainCloud, transformXY); //pcl::transformPointCloud(*gr, *gr, transformXY); //_ground.setPlaneCloud(gr); if(!first) { computeTransformation(); first = true; } computeHeight(); }
FourPoints computeTransformed(const FourPoints &pointsFrom, const FourPoints &pointsTo, const FourPoints &from) { TAffine aff; TPointD perspectiveDen; computeTransformation(pointsFrom, pointsTo, aff, perspectiveDen); double den; FourPoints fp; den = perspectiveDen.x * from.m_p00.x + perspectiveDen.y * from.m_p00.y + 1; assert(den != 0); fp.m_p00 = (1.0 / den) * (aff * from.m_p00); den = perspectiveDen.x * from.m_p01.x + perspectiveDen.y * from.m_p01.y + 1; assert(den != 0); fp.m_p01 = (1.0 / den) * (aff * from.m_p01); den = perspectiveDen.x * from.m_p10.x + perspectiveDen.y * from.m_p10.y + 1; assert(den != 0); fp.m_p10 = (1.0 / den) * (aff * from.m_p10); den = perspectiveDen.x * from.m_p11.x + perspectiveDen.y * from.m_p11.y + 1; assert(den != 0); fp.m_p11 = (1.0 / den) * (aff * from.m_p11); return fp; }
// finds the transformation matrix with the highest score, stores it in "best_transformation", and returns the index of the first vector of the couple chosen in the first map (the point around which the rotation is performed) unsigned int find_best_transformation(std::vector<Eigen::Vector3d> * map1, std::vector<Eigen::Vector3d> * map2, Eigen::Matrix3d * best_transformation, double distance_threshold) { // variables for storing the best results unsigned int best_points[4]; // <i1, i2, j1, j2> unsigned int most_friends = 0; unsigned int evaluated_models = 0; // i1 and i2 are indices for map1. // j1 and j2 are indices for map2 for (unsigned int i1 = 0; i1 < map1->size() - 1; i1++) { for (unsigned int i2 = i1 + 1; i2 < map1->size(); i2++) { // for every couple of points in the first map for (unsigned int j1 = 0; j1 < map2->size() - 1; j1++) { for (unsigned int j2 = 0; j2 < map2->size(); j2++) { // for every couple of points in the second map if (j2 == j1) continue; // must be a couple! Eigen::Matrix3d transf = computeTransformation((*map1)[i1], (*map1)[i2], (*map2)[j1], (*map2)[j2]); // do checks on the scale if(!approveModel(&transf)){ continue; } // apply the scale factor transf(0,0) = transf(0,0) / transf(2,2); transf(0,1) = transf(0,1) / transf(2,2); transf(1,0) = transf(1,0) / transf(2,2); transf(1,1) = transf(1,1) / transf(2,2); transf(2,2) = 1; unsigned int friends = compute_score(map1, map2, transf, (*map1)[i1], distance_threshold); evaluated_models++; if (friends > most_friends) { most_friends = friends; best_points[0] = i1; best_points[1] = i2; best_points[2] = j1; best_points[3] = j2; *best_transformation = transf; } } } } } std::cout << "evaluated models: " << evaluated_models << "\n"; return best_points[0]; }
template <typename PointSource, typename PointTarget> inline void pcl::Registration<PointSource, PointTarget>::align (PointCloudSource &output, const Eigen::Matrix4f& guess) { if (!initCompute ()) return; if (!target_) { PCL_WARN ("[pcl::%s::compute] No input target dataset was given!\n", getClassName ().c_str ()); return; } // Resize the output dataset if (output.points.size () != indices_->size ()) output.points.resize (indices_->size ()); // Copy the header output.header = input_->header; // Check if the output will be computed for all points or only a subset if (indices_->size () != input_->points.size ()) { output.width = (int) indices_->size (); output.height = 1; } else { output.width = input_->width; output.height = input_->height; } output.is_dense = input_->is_dense; // Copy the point data to output for (size_t i = 0; i < indices_->size (); ++i) output.points[i] = input_->points[(*indices_)[i]]; // Set the internal point representation of choice if (point_representation_) tree_->setPointRepresentation (point_representation_); // Perform the actual transformation computation converged_ = false; final_transformation_ = transformation_ = previous_transformation_ = Eigen::Matrix4f::Identity (); // Right before we estimate the transformation, we set all the point.data[3] values to 1 to aid the rigid // transformation for (size_t i = 0; i < indices_->size (); ++i) output.points[i].data[3] = 1.0; computeTransformation (output, guess); deinitCompute (); }
void ICP::run(bool withCuda, InputArray initObjSet) { assert(!m_objSet.empty() && !m_modSet.empty()); double d_pre = 100000, d_now = 100000; int iterCnt = 0; Mat objSet; Transformation tr; if (initObjSet.empty()) { objSet = m_objSet.clone(); } else { objSet = initObjSet.getMat(); } /* plotTwoPoint3DSet(objSet, m_modSet);*/ do { d_pre = d_now; Mat closestSet; Mat lambda(objSet.rows, 1, CV_64FC1); RUNANDTIME(global_timer, closestSet = getClosestPointsSet(objSet, lambda, KDTREE).clone(), OUTPUT && SUBOUTPUT, "compute closest points."); Mat tmpObjSet = convertMat(m_objSet); Mat tmpModSet = convertMat(closestSet); RUNANDTIME(global_timer, tr = computeTransformation(tmpObjSet, tmpModSet, lambda), OUTPUT && SUBOUTPUT, "compute transformation"); Mat transformMat = tr.toMatrix(); RUNANDTIME(global_timer, transformPointCloud( m_objSet, objSet, transformMat, withCuda), OUTPUT && SUBOUTPUT, "transform points."); RUNANDTIME(global_timer, d_now = computeError(objSet, closestSet, lambda, withCuda), OUTPUT && SUBOUTPUT, "compute error."); iterCnt++; } while (fabs(d_pre - d_now) > m_epsilon && iterCnt <= m_iterMax); m_tr = tr; /* waitKey();*/ /* plotTwoPoint3DSet(objSet, m_modSet);*/ }
int main(int argc, char** argv) { ros::init(argc, argv, "kinect_scan"); ros::NodeHandle n; Eigen::Affine3d T_; computeTransformation(virtual_laser_frame, kinect_frame, T_); T_kinect_virtual_laser=Eigen::Affine3f(T_); image_transport::ImageTransport depth_image_transport(n); image_transport::CameraSubscriber depth_sub = depth_image_transport.subscribeCamera("/tower_cam3d/depth/image", 1, depth_callback); //ros::Subscriber sub = n.subscribe("/tower_cam3d/depth_registered/points", 1, depth_callback); scan_pub = n.advertise<sensor_msgs::LaserScan>("kinect_base_scan", 1); ros::spin(); return 0; }
// recursively build a table of geometry instances with ID and transform // to be accessed when geometries are read in the second pass bool OgreCollada::MeshWriter::createSceneDFS(const COLLADAFW::Node* cn, // node to instantiate Ogre::Matrix4 xn) // accumulated transform { // apply this node's transformation matrix to the one inherited from its parent const COLLADAFW::TransformationPointerArray& tarr = cn->getTransformations(); // COLLADA spec says multiple transformations are "postmultiplied in the order in which // they are specified", which I think means like this: for (size_t i = 0; i < tarr.getCount(); ++i) { xn = xn * computeTransformation(tarr[i]); } // record any geometry instances present in this node, along with their attached materials // and cumulative transform const COLLADAFW::InstanceGeometryPointerArray& ginodes = cn->getInstanceGeometries(); for (int i = 0, count = ginodes.getCount(); i < count; ++i) { COLLADAFW::InstanceGeometry* gi = ginodes[i]; if (m_geometryUsage.find(gi->getInstanciatedObjectId()) == m_geometryUsage.end()) { // create empty usage list for this geometry m_geometryUsage.insert(std::make_pair(gi->getInstanciatedObjectId(), GeoInstUsageList())); } m_geometryUsage[gi->getInstanciatedObjectId()].push_back(std::make_pair(&(gi->getMaterialBindings()), xn)); } // recursively follow child nodes and library instances const COLLADAFW::NodePointerArray& cnodes = cn->getChildNodes(); for (int i = 0, count = cnodes.getCount(); i < count; ++i) { if (!createSceneDFS(cnodes[i], xn)) return false; } const COLLADAFW::InstanceNodePointerArray& inodes = cn->getInstanceNodes(); for (int i = 0, count = inodes.getCount(); i < count; ++i) { LibNodesIterator lit = m_libNodes.find(inodes[i]->getInstanciatedObjectId()); if (lit == m_libNodes.end()) { LOG_DEBUG("COLLADA WARNING: could not find library node with unique ID " + boost::lexical_cast<Ogre::String>(inodes[i]->getInstanciatedObjectId())); continue; } if (!createSceneDFS(lit->second, xn)) return false; } return true; }
void FeatureDetector::onTest() { Sample n1; Sample n2; n1.generate(1); n2.generate(2); n1.generateJacobian(); n2.generateJacobian(); Transformation t=computeTransformation(n1,n2); qDebug("n1 z (%f,%f,%f)",n1.getZ().X(),n1.getZ().Y(),n1.getZ().Z()); qDebug("n2 z (%f,%f,%f)",n2.getZ().X(),n2.getZ().Y(),n2.getZ().Z()); GGL::Point3f n1AfterRotation=t.getRotationMatrix()*n1.getZ(); // n1AfterRotation.Normalize(); qDebug("n1 z after rotation: %f,%f,%f\n",n1AfterRotation.X(),n1AfterRotation.Y(),n1AfterRotation.Z()); qDebug("n1 x (%f,%f,%f)",n1.getX().X(),n1.getX().Y(),n1.getX().Z()); qDebug("n2 x (%f,%f,%f)",n2.getX().X(),n2.getX().Y(),n2.getX().Z()); GGL::Point3f n1xAfterRotation=t.getRotationMatrix()*n1.getX(); qDebug("n1 x after rotation: %f,%f,%f\n",n1xAfterRotation.X(),n1xAfterRotation.Y(),n1xAfterRotation.Z()); qDebug("n1 y (%f,%f,%f)",n1.getY().X(),n1.getY().Y(),n1.getY().Z()); qDebug("n2 y (%f,%f,%f)",n2.getY().X(),n2.getY().Y(),n2.getY().Z()); GGL::Point3f n1yAfterRotation=t.getRotationMatrix()*n1.getY(); qDebug("n1 y after rotation: %f,%f,%f\n",n1yAfterRotation.X(),n1yAfterRotation.Y(),n1yAfterRotation.Z()); qDebug("rotation axis: %f,%f,%f\n",t.getRotationAxis().X(),t.getRotationAxis().Y(),t.getRotationAxis().Z()); }
QPointF PHIBaseItem::adjustedPos() const { QRectF br=boundingRect(); if ( hasGraphicEffect() ) { switch ( _effect->graphicsType() ) { case PHIEffect::GTShadow: { QColor c; qreal offx, offy, radius; _effect->shadow( c, offx, offy, radius ); QPixmapDropShadowFilter f; f.setOffset( offx, offy ); f.setBlurRadius( radius ); QRectF r=f.boundingRectFor( br ); if ( r.x()<0 ) br.setX( br.x()+r.x() ); if ( r.y()<0 ) br.setY( br.y()+r.y() ); break; } case PHIEffect::GTColorize: { break; } case PHIEffect::GTBlur: { qreal radius; _effect->blur( radius ); QPixmapBlurFilter f; f.setRadius( radius ); f.setBlurHints( QGraphicsBlurEffect::AnimationHint ); QRectF r=f.boundingRectFor( br ); br.setX( br.x()+r.x()+1 ); br.setY( br.y()+r.y()+1 ); break; } case PHIEffect::GTReflection: { break; } default:; } } QTransform t=computeTransformation( false ); return t.map( br.topLeft() ); }
std::map<std::string, cv::Matx44d> estimate(const std::map<int, Quad> &tags) { std::map<std::string, cv::Matx44d> objects; std::map< const std::string, //name of the object std::pair< std::vector<cv::Point3f>, //points in object std::vector<cv::Point2f> > > //points in frame objectToPointMapping; auto configurationIt = mId2Configuration.begin(); auto configurationEnd = mId2Configuration.end(); for (const auto &tag : tags) { int tagId = tag.first; const cv::Mat_<cv::Point2f> corners(tag.second); while (configurationIt != configurationEnd && configurationIt->first < tagId) ++configurationIt; if (configurationIt != configurationEnd) { if (configurationIt->first == tagId) { const auto &configuration = configurationIt->second; if (configuration.second.mKeep) { computeTransformation(cv::format("tag_%d", tagId), configuration.second.mLocalcorners, corners, objects); } auto & pointMapping = objectToPointMapping[configuration.first]; pointMapping.first.insert( pointMapping.first.end(), configuration.second.mCorners.begin(), configuration.second.mCorners.end()); pointMapping.second.insert( pointMapping.second.end(), corners.begin(), corners.end()); } else if (!mOmitOtherTags) { computeTransformation(cv::format("tag_%d", tagId), mDefaultTagCorners, corners, objects); } } else if (!mOmitOtherTags) { computeTransformation(cv::format("tag_%d", tagId), mDefaultTagCorners, corners, objects); } } for (auto& objectToPoints : objectToPointMapping) { computeTransformation(objectToPoints.first, objectToPoints.second.first, cv::Mat_<cv::Point2f>(objectToPoints.second.second), objects); } return mFilter(objects); }