Esempio n. 1
0
template<typename PointInT> void
pcl::TextureMapping<PointInT>::textureMeshwithMultipleCameras (pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras)
{

  if (mesh.tex_polygons.size () != 1)
    return;

  pcl::PointCloud<pcl::PointXYZ>::Ptr mesh_cloud (new pcl::PointCloud<pcl::PointXYZ>);

  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);

  std::vector<pcl::Vertices> faces;

  for (int current_cam = 0; current_cam < static_cast<int> (cameras.size ()); ++current_cam)
  {
    PCL_INFO ("Processing camera %d of %d.\n", current_cam+1, cameras.size ());
    
    // transform mesh into camera's frame
    pcl::PointCloud<pcl::PointXYZ>::Ptr camera_cloud (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::transformPointCloud (*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse ());

    // CREATE UV MAP FOR CURRENT FACES
    pcl::PointCloud<pcl::PointXY>::Ptr projections (new pcl::PointCloud<pcl::PointXY>);
    std::vector<pcl::Vertices>::iterator current_face;
    std::vector<bool> visibility;
    visibility.resize (mesh.tex_polygons[current_cam].size ());
    std::vector<UvIndex> indexes_uv_to_points;
    // for each current face

    //TODO change this
    pcl::PointXY nan_point;
    nan_point.x = std::numeric_limits<float>::quiet_NaN ();
    nan_point.y = std::numeric_limits<float>::quiet_NaN ();
    UvIndex u_null;
    u_null.idx_cloud = -1;
    u_null.idx_face = -1;

    int cpt_invisible=0;
    for (int idx_face = 0; idx_face <  static_cast<int> (mesh.tex_polygons[current_cam].size ()); ++idx_face)
    {
      //project each vertice, if one is out of view, stop
      pcl::PointXY uv_coord1;
      pcl::PointXY uv_coord2;
      pcl::PointXY uv_coord3;

      if (isFaceProjected (cameras[current_cam],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
                           uv_coord1,
                           uv_coord2,
                           uv_coord3))
       {
        // face is in the camera's FOV

        // add UV coordinates
        projections->points.push_back (uv_coord1);
        projections->points.push_back (uv_coord2);
        projections->points.push_back (uv_coord3);

        // remember corresponding face
        UvIndex u1, u2, u3;
        u1.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[0];
        u2.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[1];
        u3.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[2];
        u1.idx_face = idx_face; u2.idx_face = idx_face; u3.idx_face = idx_face;
        indexes_uv_to_points.push_back (u1);
        indexes_uv_to_points.push_back (u2);
        indexes_uv_to_points.push_back (u3);

        //keep track of visibility
        visibility[idx_face] = true;
      }
      else
      {
        projections->points.push_back (nan_point);
        projections->points.push_back (nan_point);
        projections->points.push_back (nan_point);
        indexes_uv_to_points.push_back (u_null);
        indexes_uv_to_points.push_back (u_null);
        indexes_uv_to_points.push_back (u_null);
        //keep track of visibility
        visibility[idx_face] = false;
        cpt_invisible++;
      }
    }

    // projections contains all UV points of the current faces
    // indexes_uv_to_points links a uv point to its point in the camera cloud
    // visibility contains tells if a face was in the camera FOV (false = skip)

    // TODO handle case were no face could be projected
    if (visibility.size () - cpt_invisible !=0)
    {
        //create kdtree
        pcl::KdTreeFLANN<pcl::PointXY> kdtree;
        kdtree.setInputCloud (projections);

        std::vector<int> idxNeighbors;
        std::vector<float> neighborsSquaredDistance;
        // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
        // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
        cpt_invisible = 0;
        for (int idx_pcam = 0 ; idx_pcam <= current_cam ; ++idx_pcam)
        {
          // project all faces
          for (int idx_face = 0; idx_face <  static_cast<int> (mesh.tex_polygons[idx_pcam].size ()); ++idx_face)
          {

            if (idx_pcam == current_cam && !visibility[idx_face])
            {
              // we are now checking for self occlusions within the current faces
              // the current face was already declared as occluded.
              // therefore, it cannot occlude another face anymore => we skip it
              continue;
            }

            // project each vertice, if one is out of view, stop
            pcl::PointXY uv_coord1;
            pcl::PointXY uv_coord2;
            pcl::PointXY uv_coord3;

            if (isFaceProjected (cameras[current_cam],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
                                 uv_coord1,
                                 uv_coord2,
                                 uv_coord3))
             {
              // face is in the camera's FOV
              //get its circumsribed circle
              double radius;
              pcl::PointXY center;
              // getTriangleCircumcenterAndSize (uv_coord1, uv_coord2, uv_coord3, center, radius);
              getTriangleCircumcscribedCircleCentroid(uv_coord1, uv_coord2, uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize

              // get points inside circ.circle
              if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
              {
                // for each neighbor
                for (size_t i = 0; i < idxNeighbors.size (); ++i)
                {
                  if (std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
                                std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z, 
                                          camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
                     < camera_cloud->points[indexes_uv_to_points[idxNeighbors[i]].idx_cloud].z)
                  {
                    // neighbor is farther than all the face's points. Check if it falls into the triangle
                    if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, projections->points[idxNeighbors[i]]))
                    {
                      // current neighbor is inside triangle and is closer => the corresponding face
                      visibility[indexes_uv_to_points[idxNeighbors[i]].idx_face] = false;
                      cpt_invisible++;
                      //TODO we could remove the projections of this face from the kd-tree cloud, but I fond it slower, and I need the point to keep ordered to querry UV coordinates later
                    }
                  }
                }
              }
             }
          }
        }
    }

    // now, visibility is true for each face that belongs to the current camera
    // if a face is not visible, we push it into the next one.

    if (static_cast<int> (mesh.tex_coordinates.size ()) <= current_cam)
    {
      std::vector<Eigen::Vector2f> dummy_container;
      mesh.tex_coordinates.push_back (dummy_container);
    }
    mesh.tex_coordinates[current_cam].resize (3 * visibility.size ());

    std::vector<pcl::Vertices> occluded_faces;
    occluded_faces.resize (visibility.size ());
    std::vector<pcl::Vertices> visible_faces;
    visible_faces.resize (visibility.size ());

    int cpt_occluded_faces = 0;
    int cpt_visible_faces = 0;

    for (size_t idx_face = 0 ; idx_face < visibility.size () ; ++idx_face)
    {
      if (visibility[idx_face])
      {
        // face is visible by the current camera copy UV coordinates
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = projections->points[idx_face*3].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = projections->points[idx_face*3].y;

        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = projections->points[idx_face*3 + 1].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = projections->points[idx_face*3 + 1].y;

        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = projections->points[idx_face*3 + 2].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = projections->points[idx_face*3 + 2].y;

        visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];

        cpt_visible_faces++;
      }
      else
      {
        // face is occluded copy face into temp vector
        occluded_faces[cpt_occluded_faces] = mesh.tex_polygons[current_cam][idx_face];
        cpt_occluded_faces++;
      }
    }
    mesh.tex_coordinates[current_cam].resize (cpt_visible_faces*3);

    occluded_faces.resize (cpt_occluded_faces);
    mesh.tex_polygons.push_back (occluded_faces);

    visible_faces.resize (cpt_visible_faces);
    mesh.tex_polygons[current_cam].clear ();
    mesh.tex_polygons[current_cam] = visible_faces;

    int nb_faces = 0;
    for (int i = 0; i < static_cast<int> (mesh.tex_polygons.size ()); i++)
      nb_faces += static_cast<int> (mesh.tex_polygons[i].size ());
  }

  // we have been through all the cameras.
  // if any faces are left, they were not visible by any camera
  // we still need to produce uv coordinates for them

  if (mesh.tex_coordinates.size() <= cameras.size ())
  {
   std::vector<Eigen::Vector2f> dummy_container;
   mesh.tex_coordinates.push_back(dummy_container);
   }


  for(size_t idx_face = 0 ; idx_face < mesh.tex_polygons[cameras.size()].size() ; ++idx_face)
  {
    Eigen::Vector2f UV1, UV2, UV3;
    UV1(0) = -1.0; UV1(1) = -1.0;
    UV2(0) = -1.0; UV2(1) = -1.0;
    UV3(0) = -1.0; UV3(1) = -1.0;
    mesh.tex_coordinates[cameras.size()].push_back(UV1);
    mesh.tex_coordinates[cameras.size()].push_back(UV2);
    mesh.tex_coordinates[cameras.size()].push_back(UV3);
  }

}
Esempio n. 2
0
template<typename PointInT> void
pcl::TextureMapping<PointInT>::mapMultipleTexturesToMeshUV (pcl::TextureMesh &tex_mesh, pcl::texture_mapping::CameraVector &cams)
{

  if (tex_mesh.tex_polygons.size () != cams.size () + 1)
  {
    PCL_ERROR ("The mesh should be divided into nbCamera+1 sub-meshes.\n");
    PCL_ERROR ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
    return;
  }

  PCL_INFO ("You provided %d  cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());

  pcl::PointCloud<pcl::PointXYZ>::Ptr originalCloud (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::PointCloud<pcl::PointXYZ>::Ptr camera_transformed_cloud (new pcl::PointCloud<pcl::PointXYZ>);

  // convert mesh's cloud to pcl format for ease
  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *originalCloud);

  // texture coordinates for each mesh
  std::vector<std::vector<Eigen::Vector2f> > texture_map;

  for (size_t m = 0; m < cams.size (); ++m)
  {
    // get current camera parameters
    Camera current_cam = cams[m];

    // get camera transform
    Eigen::Affine3f cam_trans = current_cam.pose;

    // transform cloud into current camera frame
    pcl::transformPointCloud (*originalCloud, *camera_transformed_cloud, cam_trans.inverse ());

    // vector of texture coordinates for each face
    std::vector<Eigen::Vector2f> texture_map_tmp;

    // processing each face visible by this camera
    pcl::PointXYZ pt;
    size_t idx;
    for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
    {
      Eigen::Vector2f tmp_VT;
      // for each point of this face
      for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
      {
        // get point
        idx = tex_mesh.tex_polygons[m][i].vertices[j];
        pt = camera_transformed_cloud->points[idx];

        // compute UV coordinates for this point
        getPointUVCoordinates (pt, current_cam, tmp_VT);
        texture_map_tmp.push_back (tmp_VT);

      }// end points
    }// end faces

    // texture materials
    std::stringstream tex_name;
    tex_name << "material_" << m;
    tex_name >> tex_material_.tex_name;
    tex_material_.tex_file = current_cam.texture_file;
    tex_mesh.tex_materials.push_back (tex_material_);

    // texture coordinates
    tex_mesh.tex_coordinates.push_back (texture_map_tmp);
  }// end cameras

  // push on extra empty UV map (for unseen faces) so that obj writer does not crash!
  std::vector<Eigen::Vector2f> texture_map_tmp;
  for (size_t i = 0; i < tex_mesh.tex_polygons[cams.size ()].size (); ++i)
    for (size_t j = 0; j < tex_mesh.tex_polygons[cams.size ()][i].vertices.size (); ++j)
    {
      Eigen::Vector2f tmp_VT;
      tmp_VT[0] = -1;
      tmp_VT[1] = -1;
      texture_map_tmp.push_back (tmp_VT);
    }

  tex_mesh.tex_coordinates.push_back (texture_map_tmp);

  // push on an extra dummy material for the same reason
  std::stringstream tex_name;
  tex_name << "material_" << cams.size ();
  tex_name >> tex_material_.tex_name;
  tex_material_.tex_file = "occluded.jpg";
  tex_mesh.tex_materials.push_back (tex_material_);

}
Esempio n. 3
0
template<typename PointInT> int
pcl::TextureMapping<PointInT>::sortFacesByCamera (pcl::TextureMesh &tex_mesh, pcl::TextureMesh &sorted_mesh,
                                                  const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size,
                                                  PointCloud &visible_pts)
{
  if (tex_mesh.tex_polygons.size () != 1)
  {
    PCL_ERROR ("The mesh must contain only 1 sub-mesh!\n");
    return (-1);
  }

  if (cameras.size () == 0)
  {
    PCL_ERROR ("Must provide at least one camera info!\n");
    return (-1);
  }

  // copy mesh
  sorted_mesh = tex_mesh;
  // clear polygons from cleaned_mesh
  sorted_mesh.tex_polygons.clear ();

  pcl::PointCloud<pcl::PointXYZ>::Ptr original_cloud (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::PointCloud<pcl::PointXYZ>::Ptr transformed_cloud (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::PointCloud<pcl::PointXYZ>::Ptr filtered_cloud (new pcl::PointCloud<pcl::PointXYZ>);

  // load points into a PCL format
  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *original_cloud);

  // for each camera
  for (size_t cam = 0; cam < cameras.size (); ++cam)
  {
    // get camera pose as transform
    Eigen::Affine3f cam_trans = cameras[cam].pose;

    // transform original cloud in camera coordinates
    pcl::transformPointCloud (*original_cloud, *transformed_cloud, cam_trans.inverse ());

    // find occlusions on transformed cloud
    std::vector<int> visible, occluded;
    removeOccludedPoints (transformed_cloud, filtered_cloud, octree_voxel_size, visible, occluded);
    visible_pts = *filtered_cloud;

    // find visible faces => add them to polygon N for camera N
    // add polygon group for current camera in clean
    std::vector<pcl::Vertices> visibleFaces_currentCam;
    // iterate over the faces of the current mesh
    for (size_t faces = 0; faces < tex_mesh.tex_polygons[0].size (); ++faces)
    {
      // check if all the face's points are visible
      bool faceIsVisible = true;
      std::vector<int>::iterator it;

      // iterate over face's vertex
      for (size_t current_pt_indice = 0; faceIsVisible && current_pt_indice < tex_mesh.tex_polygons[0][faces].vertices.size (); ++current_pt_indice)
      {
        // TODO this is far too long! Better create an helper function that raycasts here.
        it = find (occluded.begin (), occluded.end (), tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]);

        if (it == occluded.end ())
        {
          // point is not occluded
          // does it land on the camera's image plane?
          pcl::PointXYZ pt = transformed_cloud->points[tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]];
          Eigen::Vector2f dummy_UV;
          if (!getPointUVCoordinates (pt, cameras[cam], dummy_UV))
          {
            // point is not visible by the camera
            faceIsVisible = false;
          }
        }
        else
        {
          faceIsVisible = false;
        }
      }

      if (faceIsVisible)
      {
        // push current visible face into the sorted mesh
        visibleFaces_currentCam.push_back (tex_mesh.tex_polygons[0][faces]);
        // remove it from the unsorted mesh
        tex_mesh.tex_polygons[0].erase (tex_mesh.tex_polygons[0].begin () + faces);
        faces--;
      }

    }
    sorted_mesh.tex_polygons.push_back (visibleFaces_currentCam);
  }

  // we should only have occluded and non-visible faces left in tex_mesh.tex_polygons[0]
  // we need to add them as an extra polygon in the sorted mesh
  sorted_mesh.tex_polygons.push_back (tex_mesh.tex_polygons[0]);
  return (0);
}
/** \brief Display a 3D representation showing the a cloud and a list of camera with their 6DOf poses */
void showCameras (pcl::texture_mapping::CameraVector cams, pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud)
{

  // visualization object
  pcl::visualization::PCLVisualizer visu ("cameras");

  // add a visual for each camera at the correct pose
  for(size_t i = 0 ; i < cams.size () ; ++i)
  {
    // read current camera
    pcl::TextureMapping<pcl::PointXYZ>::Camera cam = cams[i];
    double focal = cam.focal_length;
    double height = cam.height;
    double width = cam.width;
    
    // create a 5-point visual for each camera
    pcl::PointXYZ p1, p2, p3, p4, p5;
    p1.x=0; p1.y=0; p1.z=0;
    double dist = 0.75;
    double minX, minY, maxX, maxY;
    maxX = dist*tan (atan (width / (2.0*focal)));
    minX = -maxX;
    maxY = dist*tan (atan (height / (2.0*focal)));
    minY = -maxY;
    p2.x=minX; p2.y=minY; p2.z=dist;
    p3.x=maxX; p3.y=minY; p3.z=dist;
    p4.x=maxX; p4.y=maxY; p4.z=dist;
    p5.x=minX; p5.y=maxY; p5.z=dist;
    p1=pcl::transformPoint (p1, cam.pose);
    p2=pcl::transformPoint (p2, cam.pose);
    p3=pcl::transformPoint (p3, cam.pose);
    p4=pcl::transformPoint (p4, cam.pose);
    p5=pcl::transformPoint (p5, cam.pose);
    std::stringstream ss;
    ss << "Cam #" << i+1;
    visu.addText3D(ss.str (), p1, 0.1, 1.0, 1.0, 1.0, ss.str ());

    ss.str ("");
    ss << "camera_" << i << "line1";
    visu.addLine (p1, p2,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line2";
    visu.addLine (p1, p3,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line3";
    visu.addLine (p1, p4,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line4";
    visu.addLine (p1, p5,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line5";
    visu.addLine (p2, p5,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line6";
    visu.addLine (p5, p4,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line7";
    visu.addLine (p4, p3,ss.str ());
    ss.str ("");
    ss << "camera_" << i << "line8";
    visu.addLine (p3, p2,ss.str ());
  }
  
  // add a coordinate system
  visu.addCoordinateSystem (1.0, "global");
  
  // add the mesh's cloud (colored on Z axis)
  pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ> color_handler (cloud, "z");
  visu.addPointCloud (cloud, color_handler, "cloud");
  
  // reset camera
  visu.resetCamera ();
  
  // wait for user input
  visu.spin ();
}