void RotatingRaySensor::prepareFinalPointcloud(){ // Copies current full pointcloud to pointcloud_full. poseMutex.lock(); Eigen::Affine3d current_pose2 = current_pose; Eigen::Affine3d rot; rot.setIdentity(); rot.rotate(config.transf_sensor_rot_to_sensor); poseMutex.unlock(); std::list<utils::Vector>::iterator it = fromCloud->begin(); pointcloud_full.clear(); pointcloud_full.reserve(fromCloud->size()); base::Vector3d vec_local; for(int i=0; it != fromCloud->end(); it++, i++) { // Transforms the pointcloud back from world to current node (see receiveDate()). // In addition 'transf_sensor_rot_to_sensor' is applied which describes // the orientation of the sensor in the unturned sensor frame. vec_local = rot * current_pose2.inverse() * (*it); pointcloud_full.push_back(vec_local); } //LOG_DEBUG("[RotatingRaySensor::prepareFinalPointcloud]: Pointcloud size: %d", pointcloud_full.size()); fromCloud->clear(); }
TEST(TFEigenConversions, eigen_tf_transform) { tf::Transform t; Eigen::Affine3d k; Eigen::Quaterniond kq; kq.coeffs()(0) = gen_rand(-1.0,1.0); kq.coeffs()(1) = gen_rand(-1.0,1.0); kq.coeffs()(2) = gen_rand(-1.0,1.0); kq.coeffs()(3) = gen_rand(-1.0,1.0); kq.normalize(); k.translate(Eigen::Vector3d(gen_rand(-10,10),gen_rand(-10,10),gen_rand(-10,10))); k.rotate(kq); transformEigenToTF(k,t); for(int i=0; i < 3; i++) { ASSERT_NEAR(t.getOrigin()[i],k.matrix()(i,3),1e-6); for(int j=0; j < 3; j++) { ASSERT_NEAR(t.getBasis()[i][j],k.matrix()(i,j),1e-6); } } }
icp_result sac_icp(PointCloudT::Ptr object, PointCloudT::Ptr scene, Eigen::Affine3d seed_pose) { /* Apply ICP at many different offsets and orientations to seek a more accurate pose estimate */ // This is good, allow configurable distance-offset Eigen::Vector3d offsets[] = { Eigen::Vector3d(0., 0., 0. ), Eigen::Vector3d(-0.06, 0, 0), Eigen::Vector3d(0, -0.06, 0), Eigen::Vector3d(0, 0, -0.06), Eigen::Vector3d(0.06, 0, 0 ), Eigen::Vector3d(0, 0.06, 0 ), Eigen::Vector3d(0, 0, 0.06 ) }; // -> Do this better (try 45 deg increments, try auto-generating) orientation orientations[] = { {Eigen::Vector3d::UnitZ(), 0.0}, {Eigen::Vector3d::UnitZ(), M_PI / 5}, {Eigen::Vector3d::UnitZ(), 2 * M_PI / 5}, {Eigen::Vector3d::UnitZ(), 3 * M_PI / 5}, {Eigen::Vector3d::UnitZ(), 4 * M_PI / 5}, {Eigen::Vector3d::UnitZ(), M_PI}, {Eigen::Vector3d::UnitX(), M_PI / 5}, {Eigen::Vector3d::UnitX(), 2 * M_PI / 5}, {Eigen::Vector3d::UnitX(), 3 * M_PI / 5}, {Eigen::Vector3d::UnitX(), 4 * M_PI / 5}, {Eigen::Vector3d::UnitX(), M_PI}, {Eigen::Vector3d::UnitY(), M_PI / 5}, {Eigen::Vector3d::UnitY(), 2 * M_PI / 5}, {Eigen::Vector3d::UnitY(), 3 * M_PI / 5}, {Eigen::Vector3d::UnitY(), 4 * M_PI / 5}, {Eigen::Vector3d::UnitY(), M_PI}, }; // void affine_cloud(Eigen::Vector3f axis, float theta, Eigen::Vector3f translation, pcl::PointCloud<pcl::PointXYZ>& input_cloud, // pcl::PointCloud<pcl::PointXYZ>& destination_cloud) { pcl::PointCloud<pcl::PointXYZ>::Ptr xyz_scene(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*scene, *xyz_scene); // Try this with pointNormals (Can we do this?) pcl::visualization::PCLVisualizer viewer ("Point Cloud Visualization ENHANCED!"); viewer.setSize (1280, 1024); // Visualiser window size viewer.registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL); pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> scene_color_h(xyz_scene, 255, 0, 0); viewer.addPointCloud(xyz_scene, scene_color_h, "scene"); pcl::PointCloud<pcl::PointXYZ>::Ptr affined_cloud(new pcl::PointCloud<pcl::PointXYZ>); viewer.addPointCloud(affined_cloud, "object"); // Should be using .reset() instead of new, right? I duno // Eigen::Affine3d initial_transform; // Eigen::Affine3d transform; Eigen::Matrix3d seed_rotation = seed_pose.linear(); Eigen::Vector3d seed_translation = seed_pose.translation(); for (int k = 0; k < 7; k++) { for (int j = 0; j < 16; j++) { pcl::console::print_highlight ("At %i %i\n", k, j); affined_cloud.reset(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*object, *affined_cloud); Eigen::Matrix3d rotation = seed_rotation * (Eigen::AngleAxisd(orientations[j].theta, orientations[j].axis)); Eigen::Vector3d translation = seed_translation + offsets[k]; Eigen::Affine3d transform = Eigen::Affine3d::Identity(); transform.rotate(rotation); transform.translation() = translation; pcl::transformPointCloud(*affined_cloud, *affined_cloud, transform); // affine_cloud(orientations[j].axis, orientations[j].theta, offsets[k], *affined_cloud, *affined_cloud); /*icp_result result = */apply_icp(affined_cloud, xyz_scene, 20, 0.01); // /*icp_result result = */apply_icp(affined_cloud, xyz_scene, 40, 0.01); // icp_result result = apply_icp(affined_cloud, xyz_scene, 40, 0.005); // affine_cloud(result.affine, *affined_cloud, *affined_cloud); viewer.updatePointCloud(affined_cloud, "object"); viewer.updatePointCloud(xyz_scene, scene_color_h, "scene"); next_iteration = false; // visualize(affined_cloud, xyz_scene, "Post-icp"); while(next_iteration == false) { viewer.spinOnce(); }; } } }
// Usage: ./Volumetricd.exe ../../data/monkey.obj 256 4 2 90 int main(int argc, char **argv) { if (argc < 6) { std::cerr << "Missing parameters. Abort." << std::endl << "Usage: ./Volumetricd.exe ../../data/monkey.obj 256 8 2 90" << std::endl; return EXIT_FAILURE; } Timer timer; const std::string filepath = argv[1]; const int vol_size = atoi(argv[2]); const int vx_size = atoi(argv[3]); const int cloud_count = atoi(argv[4]); const int rot_interval = atoi(argv[5]); std::pair<std::vector<double>, std::vector<double>> depth_buffer; // // Projection and Modelview Matrices // Eigen::Matrix4d K = perspective_matrix(fov_y, aspect_ratio, near_plane, far_plane); std::pair<Eigen::Matrix4d, Eigen::Matrix4d> T(Eigen::Matrix4d::Identity(), Eigen::Matrix4d::Identity()); // // Creating volume // Eigen::Vector3d voxel_size(vx_size, vx_size, vx_size); Eigen::Vector3d volume_size(vol_size, vol_size, vol_size); Eigen::Vector3d voxel_count(volume_size.x() / voxel_size.x(), volume_size.y() / voxel_size.y(), volume_size.z() / voxel_size.z()); // Eigen::Affine3d grid_affine = Eigen::Affine3d::Identity(); grid_affine.translate(Eigen::Vector3d(0, 0, -256)); grid_affine.scale(Eigen::Vector3d(1, 1, -1)); // z is negative inside of screen Grid grid(volume_size, voxel_size, grid_affine.matrix()); // // Importing .obj // timer.start(); std::vector<Eigen::Vector3d> points3DOrig, pointsTmp; import_obj(filepath, points3DOrig); timer.print_interval("Importing monkey : "); std::cout << "Monkey point count : " << points3DOrig.size() << std::endl; // // Translating and rotating monkey point cloud std::pair<std::vector<Eigen::Vector3d>, std::vector<Eigen::Vector3d>> cloud; // Eigen::Affine3d rotate = Eigen::Affine3d::Identity(); Eigen::Affine3d translate = Eigen::Affine3d::Identity(); translate.translate(Eigen::Vector3d(0, 0, -256)); // // Compute first cloud // for (Eigen::Vector3d p3d : points3DOrig) { Eigen::Vector4d rot = translate.matrix() * rotate.matrix() * p3d.homogeneous(); rot /= rot.w(); cloud.first.push_back(rot.head<3>()); } // // Update grid with first cloud // timer.start(); create_depth_buffer<double>(depth_buffer.first, cloud.first, K, Eigen::Matrix4d::Identity(), far_plane); timer.print_interval("CPU compute depth : "); timer.start(); update_volume(grid, depth_buffer.first, K, T.first); timer.print_interval("CPU Update volume : "); // // Compute next clouds Eigen::Matrix4d cloud_mat = Eigen::Matrix4d::Identity(); Timer iter_timer; for (int i = 1; i < cloud_count; ++i) { std::cout << std::endl << i << " : " << i * rot_interval << std::endl; iter_timer.start(); // Rotation matrix rotate = Eigen::Affine3d::Identity(); rotate.rotate(Eigen::AngleAxisd(DegToRad(i * rot_interval), Eigen::Vector3d::UnitY())); cloud.second.clear(); for (Eigen::Vector3d p3d : points3DOrig) { Eigen::Vector4d rot = translate.matrix() * rotate.matrix() * p3d.homogeneous(); rot /= rot.w(); cloud.second.push_back(rot.head<3>()); } //export_obj("../../data/cloud_cpu_2.obj", cloud.second); timer.start(); create_depth_buffer<double>(depth_buffer.second, cloud.second, K, Eigen::Matrix4d::Identity(), far_plane); timer.print_interval("Compute depth buffer: "); //export_depth_buffer("../../data/cpu_depth_buffer_2.obj", depth_buffer.second); timer.start(); Eigen::Matrix4d icp_mat; ComputeRigidTransform(cloud.first, cloud.second, icp_mat); timer.print_interval("Compute rigid transf: "); //std::cout << std::fixed << std::endl << "icp_mat " << std::endl << icp_mat << std::endl; // accumulate matrix cloud_mat = cloud_mat * icp_mat; //std::cout << std::fixed << std::endl << "cloud_mat " << std::endl << cloud_mat << std::endl; timer.start(); //update_volume(grid, depth_buffer.second, K, cloud_mat.inverse()); update_volume(grid, depth_buffer.second, K, cloud_mat.inverse()); timer.print_interval("Update volume : "); // copy second point cloud to first cloud.first = cloud.second; //depth_buffer.first = depth_buffer.second; iter_timer.print_interval("Iteration time : "); } //std::cout << "------- // --------" << std::endl; //for (int i = 0; i < grid.data.size(); ++i) //{ // const Eigen::Vector3d& point = grid.data[i].point; // std::cout << point.transpose() << "\t\t" << grid.data[i].tsdf << " " << grid.data[i].weight << std::endl; //} //std::cout << "------- // --------" << std::endl; // timer.start(); // export_volume("../../data/grid_volume_cpu.obj", grid.data); // timer.print_interval("Exporting volume : "); // return 0; QApplication app(argc, argv); // // setup opengl viewer // GLModelViewer glwidget; glwidget.resize(640, 480); glwidget.setPerspective(60.0f, 0.1f, 10240.0f); glwidget.move(320, 0); glwidget.setWindowTitle("Point Cloud"); glwidget.setWeelSpeed(0.1f); glwidget.setDistance(-0.5f); glwidget.show(); Eigen::Matrix4d to_origin = Eigen::Matrix4d::Identity(); to_origin.col(3) << -(volume_size.x() / 2.0), -(volume_size.y() / 2.0), -(volume_size.z() / 2.0), 1.0; // set translate std::vector<Eigen::Vector4f> vertices, colors; int i = 0; for (int z = 0; z <= volume_size.z(); z += voxel_size.z()) { for (int y = 0; y <= volume_size.y(); y += voxel_size.y()) { for (int x = 0; x <= volume_size.x(); x += voxel_size.x(), i++) { const float tsdf = grid.data.at(i).tsdf; //Eigen::Vector4d p = grid_affine.matrix() * to_origin * Eigen::Vector4d(x, y, z, 1); Eigen::Vector4d p = to_origin * Eigen::Vector4d(x, y, z, 1); p /= p.w(); if (tsdf > 0.1) { vertices.push_back(p.cast<float>()); colors.push_back(Eigen::Vector4f(0, 1, 0, 1)); } else if (tsdf < -0.1) { vertices.push_back(p.cast<float>()); colors.push_back(Eigen::Vector4f(1, 0, 0, 1)); } } } } // // setup model // std::shared_ptr<GLModel> model(new GLModel); model->initGL(); model->setVertices(&vertices[0][0], vertices.size(), 4); model->setColors(&colors[0][0], colors.size(), 4); glwidget.addModel(model); // // setup kinect shader program // std::shared_ptr<GLShaderProgram> kinectShaderProgram(new GLShaderProgram); if (kinectShaderProgram->build("color.vert", "color.frag")) model->setShaderProgram(kinectShaderProgram); return app.exec(); }
void smurf::Robot::loadFromSmurf(const std::string& path) { configmaps::ConfigMap map; // parse joints from model boost::filesystem::path filepath(path); model = smurf_parser::parseFile(&map, filepath.parent_path().generic_string(), filepath.filename().generic_string(), true); //first we need to create all Frames for (configmaps::ConfigVector::iterator it = map["frames"].begin(); it != map["frames"].end(); ++it) { configmaps::ConfigMap &fr(it->children); Frame *frame = new Frame(fr["name"]); availableFrames.push_back(frame); //std::cout << "Adding additional frame " << frame->getName() << std::endl; } for(std::pair<std::string, boost::shared_ptr<urdf::Link>> link: model->links_) { Frame *frame = new Frame(link.first); for(boost::shared_ptr<urdf::Visual> visual : link.second->visual_array) { frame->addVisual(*visual); } availableFrames.push_back(frame); //std::cout << "Adding link frame " << frame->getName() << std::endl; } for(std::pair<std::string, boost::shared_ptr<urdf::Joint> > jointIt: model->joints_) { boost::shared_ptr<urdf::Joint> joint = jointIt.second; //std::cout << "Adding joint " << joint->name << std::endl; Frame *source = getFrameByName(joint->parent_link_name); Frame *target = getFrameByName(joint->child_link_name); //TODO this might not be set in some cases, perhaps force a check configmaps::ConfigMap annotations; for(configmaps::ConfigItem &cv : map["joints"]) { if(static_cast<std::string>(cv.children["name"]) == joint->name) { annotations = cv.children; } } switch(joint->type) { case urdf::Joint::FIXED: { const urdf::Pose &tr(joint->parent_to_joint_origin_transform); StaticTransformation *transform = new StaticTransformation(source, target, Eigen::Quaterniond(tr.rotation.w, tr.rotation.x, tr.rotation.y, tr.rotation.z), Eigen::Vector3d(tr.position.x, tr.position.y, tr.position.z)); staticTransforms.push_back(transform); } break; case urdf::Joint::FLOATING: { DynamicTransformation *transform = new DynamicTransformation(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port")); dynamicTransforms.push_back(transform); Eigen::Vector3d axis(joint->axis.x, joint->axis.y, joint->axis.z); Eigen::Affine3d sourceToAxis(Eigen::Affine3d::Identity()); sourceToAxis.translation() = axis; base::JointLimitRange limits; const urdf::Pose parentToOrigin(joint->parent_to_joint_origin_transform); Eigen::Quaterniond rot(parentToOrigin.rotation.w, parentToOrigin.rotation.x, parentToOrigin.rotation.y, parentToOrigin.rotation.z); Eigen::Vector3d trans(parentToOrigin.position.x, parentToOrigin.position.y, parentToOrigin.position.z); Eigen::Affine3d parentToOriginAff; parentToOriginAff.setIdentity(); parentToOriginAff.rotate(rot); parentToOriginAff.translation() = trans; Joint *smurfJoint = new Joint (source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, parentToOriginAff, joint); joints.push_back(smurfJoint); } break; case urdf::Joint::REVOLUTE: case urdf::Joint::CONTINUOUS: case urdf::Joint::PRISMATIC: { base::JointState minState; minState.position = joint->limits->lower; minState.effort = 0; minState.speed = 0; base::JointState maxState; maxState.position = joint->limits->upper; maxState.effort = joint->limits->effort; maxState.speed = joint->limits->velocity; base::JointLimitRange limits; limits.min = minState; limits.max = maxState; Eigen::Vector3d axis(joint->axis.x, joint->axis.y, joint->axis.z); Eigen::Affine3d sourceToAxis(Eigen::Affine3d::Identity()); sourceToAxis.translation() = axis; DynamicTransformation *transform = NULL; Joint *smurfJoint; // push the correspondent smurf::joint const urdf::Pose parentToOrigin(joint->parent_to_joint_origin_transform); Eigen::Quaterniond rot(parentToOrigin.rotation.w, parentToOrigin.rotation.x, parentToOrigin.rotation.y, parentToOrigin.rotation.z); Eigen::Vector3d trans(parentToOrigin.position.x, parentToOrigin.position.y, parentToOrigin.position.z); Eigen::Affine3d parentToOriginAff; parentToOriginAff.setIdentity(); parentToOriginAff.rotate(rot); parentToOriginAff.translation() = trans; if(joint->type == urdf::Joint::REVOLUTE || joint->type == urdf::Joint::CONTINUOUS) { transform = new RotationalJoint(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, axis, parentToOriginAff, joint); smurfJoint = (Joint *)transform; } else { transform = new TranslationalJoint(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, axis, parentToOriginAff, joint); smurfJoint = (Joint *)transform; } dynamicTransforms.push_back(transform); joints.push_back(smurfJoint); } break; default: throw std::runtime_error("Smurf: Error, got unhandles Joint type"); } } // parse sensors from map for (configmaps::ConfigVector::iterator it = map["sensors"].begin(); it != map["sensors"].end(); ++it) { configmaps::ConfigMap sensorMap = it->children; smurf::Sensor *sensor = new Sensor(sensorMap["name"], sensorMap["type"], sensorMap["taskInstanceName"], getFrameByName(sensorMap["link"])); sensors.push_back(sensor); } }