Esempio n. 1
0
// Accepts a triangle (XYZ and UV values for each point) and returns a poly base and UV vectors
// NOTE : the UV coords should be scaled by the texture size
static inline void FTexCoordsToVectors(const FVector& V0, const FVector& UV0,
                                       const FVector& V1, const FVector& InUV1,
                                       const FVector& V2, const FVector& InUV2,
                                       FVector* InBaseResult, FVector* InUResult, FVector* InVResult )
{
    // Create polygon normal.
    FVector PN = FVector((V0-V1) ^ (V2-V0));
    PN = PN.GetSafeNormal();

    FVector UV1( InUV1 );
    FVector UV2( InUV2 );

    // Fudge UV's to make sure no infinities creep into UV vector math, whenever we detect identical U or V's.
    if( ( UV0.X == UV1.X ) || ( UV2.X == UV1.X ) || ( UV2.X == UV0.X ) ||
            ( UV0.Y == UV1.Y ) || ( UV2.Y == UV1.Y ) || ( UV2.Y == UV0.Y ) )
    {
        UV1 += FVector(0.004173f,0.004123f,0.0f);
        UV2 += FVector(0.003173f,0.003123f,0.0f);
    }

    //
    // Solve the equations to find our texture U/V vectors 'TU' and 'TV' by stacking them
    // into a 3x3 matrix , one for  u(t) = TU dot (x(t)-x(o) + u(o) and one for v(t)=  TV dot (.... ,
    // then the third assumes we're perpendicular to the normal.
    //
    FMatrix TexEqu = FMatrix::Identity;
    TexEqu.SetAxis( 0, FVector(	V1.X - V0.X, V1.Y - V0.Y, V1.Z - V0.Z ) );
    TexEqu.SetAxis( 1, FVector( V2.X - V0.X, V2.Y - V0.Y, V2.Z - V0.Z ) );
    TexEqu.SetAxis( 2, FVector( PN.X,        PN.Y,        PN.Z        ) );
    TexEqu = TexEqu.InverseFast();

    const FVector UResult( UV1.X-UV0.X, UV2.X-UV0.X, 0.0f );
    const FVector TUResult = TexEqu.TransformVector( UResult );

    const FVector VResult( UV1.Y-UV0.Y, UV2.Y-UV0.Y, 0.0f );
    const FVector TVResult = TexEqu.TransformVector( VResult );

    //
    // Adjust the BASE to account for U0 and V0 automatically, and force it into the same plane.
    //
    FMatrix BaseEqu = FMatrix::Identity;
    BaseEqu.SetAxis( 0, TUResult );
    BaseEqu.SetAxis( 1, TVResult );
    BaseEqu.SetAxis( 2, FVector( PN.X, PN.Y, PN.Z ) );
    BaseEqu = BaseEqu.InverseFast();

    const FVector BResult = FVector( UV0.X - ( TUResult|V0 ), UV0.Y - ( TVResult|V0 ),  0.0f );

    *InBaseResult = - 1.0f *  BaseEqu.TransformVector( BResult );
    *InUResult = TUResult;
    *InVResult = TVResult;

}
Esempio n. 2
0
template<typename PointInT> void
pcl::TextureMapping<PointInT>::textureMeshwithMultipleCameras (pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras)
{

  if (mesh.tex_polygons.size () != 1)
    return;

  pcl::PointCloud<pcl::PointXYZ>::Ptr mesh_cloud (new pcl::PointCloud<pcl::PointXYZ>);

  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);

  std::vector<pcl::Vertices> faces;

  for (int current_cam = 0; current_cam < static_cast<int> (cameras.size ()); ++current_cam)
  {
    PCL_INFO ("Processing camera %d of %d.\n", current_cam+1, cameras.size ());
    
    // transform mesh into camera's frame
    pcl::PointCloud<pcl::PointXYZ>::Ptr camera_cloud (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::transformPointCloud (*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse ());

    // CREATE UV MAP FOR CURRENT FACES
    pcl::PointCloud<pcl::PointXY>::Ptr projections (new pcl::PointCloud<pcl::PointXY>);
    std::vector<pcl::Vertices>::iterator current_face;
    std::vector<bool> visibility;
    visibility.resize (mesh.tex_polygons[current_cam].size ());
    std::vector<UvIndex> indexes_uv_to_points;
    // for each current face

    //TODO change this
    pcl::PointXY nan_point;
    nan_point.x = std::numeric_limits<float>::quiet_NaN ();
    nan_point.y = std::numeric_limits<float>::quiet_NaN ();
    UvIndex u_null;
    u_null.idx_cloud = -1;
    u_null.idx_face = -1;

    int cpt_invisible=0;
    for (int idx_face = 0; idx_face <  static_cast<int> (mesh.tex_polygons[current_cam].size ()); ++idx_face)
    {
      //project each vertice, if one is out of view, stop
      pcl::PointXY uv_coord1;
      pcl::PointXY uv_coord2;
      pcl::PointXY uv_coord3;

      if (isFaceProjected (cameras[current_cam],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
                           camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
                           uv_coord1,
                           uv_coord2,
                           uv_coord3))
       {
        // face is in the camera's FOV

        // add UV coordinates
        projections->points.push_back (uv_coord1);
        projections->points.push_back (uv_coord2);
        projections->points.push_back (uv_coord3);

        // remember corresponding face
        UvIndex u1, u2, u3;
        u1.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[0];
        u2.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[1];
        u3.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[2];
        u1.idx_face = idx_face; u2.idx_face = idx_face; u3.idx_face = idx_face;
        indexes_uv_to_points.push_back (u1);
        indexes_uv_to_points.push_back (u2);
        indexes_uv_to_points.push_back (u3);

        //keep track of visibility
        visibility[idx_face] = true;
      }
      else
      {
        projections->points.push_back (nan_point);
        projections->points.push_back (nan_point);
        projections->points.push_back (nan_point);
        indexes_uv_to_points.push_back (u_null);
        indexes_uv_to_points.push_back (u_null);
        indexes_uv_to_points.push_back (u_null);
        //keep track of visibility
        visibility[idx_face] = false;
        cpt_invisible++;
      }
    }

    // projections contains all UV points of the current faces
    // indexes_uv_to_points links a uv point to its point in the camera cloud
    // visibility contains tells if a face was in the camera FOV (false = skip)

    // TODO handle case were no face could be projected
    if (visibility.size () - cpt_invisible !=0)
    {
        //create kdtree
        pcl::KdTreeFLANN<pcl::PointXY> kdtree;
        kdtree.setInputCloud (projections);

        std::vector<int> idxNeighbors;
        std::vector<float> neighborsSquaredDistance;
        // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
        // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
        cpt_invisible = 0;
        for (int idx_pcam = 0 ; idx_pcam <= current_cam ; ++idx_pcam)
        {
          // project all faces
          for (int idx_face = 0; idx_face <  static_cast<int> (mesh.tex_polygons[idx_pcam].size ()); ++idx_face)
          {

            if (idx_pcam == current_cam && !visibility[idx_face])
            {
              // we are now checking for self occlusions within the current faces
              // the current face was already declared as occluded.
              // therefore, it cannot occlude another face anymore => we skip it
              continue;
            }

            // project each vertice, if one is out of view, stop
            pcl::PointXY uv_coord1;
            pcl::PointXY uv_coord2;
            pcl::PointXY uv_coord3;

            if (isFaceProjected (cameras[current_cam],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
                                 camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
                                 uv_coord1,
                                 uv_coord2,
                                 uv_coord3))
             {
              // face is in the camera's FOV
              //get its circumsribed circle
              double radius;
              pcl::PointXY center;
              // getTriangleCircumcenterAndSize (uv_coord1, uv_coord2, uv_coord3, center, radius);
              getTriangleCircumcscribedCircleCentroid(uv_coord1, uv_coord2, uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize

              // get points inside circ.circle
              if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
              {
                // for each neighbor
                for (size_t i = 0; i < idxNeighbors.size (); ++i)
                {
                  if (std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
                                std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z, 
                                          camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
                     < camera_cloud->points[indexes_uv_to_points[idxNeighbors[i]].idx_cloud].z)
                  {
                    // neighbor is farther than all the face's points. Check if it falls into the triangle
                    if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, projections->points[idxNeighbors[i]]))
                    {
                      // current neighbor is inside triangle and is closer => the corresponding face
                      visibility[indexes_uv_to_points[idxNeighbors[i]].idx_face] = false;
                      cpt_invisible++;
                      //TODO we could remove the projections of this face from the kd-tree cloud, but I fond it slower, and I need the point to keep ordered to querry UV coordinates later
                    }
                  }
                }
              }
             }
          }
        }
    }

    // now, visibility is true for each face that belongs to the current camera
    // if a face is not visible, we push it into the next one.

    if (static_cast<int> (mesh.tex_coordinates.size ()) <= current_cam)
    {
      std::vector<Eigen::Vector2f> dummy_container;
      mesh.tex_coordinates.push_back (dummy_container);
    }
    mesh.tex_coordinates[current_cam].resize (3 * visibility.size ());

    std::vector<pcl::Vertices> occluded_faces;
    occluded_faces.resize (visibility.size ());
    std::vector<pcl::Vertices> visible_faces;
    visible_faces.resize (visibility.size ());

    int cpt_occluded_faces = 0;
    int cpt_visible_faces = 0;

    for (size_t idx_face = 0 ; idx_face < visibility.size () ; ++idx_face)
    {
      if (visibility[idx_face])
      {
        // face is visible by the current camera copy UV coordinates
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = projections->points[idx_face*3].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = projections->points[idx_face*3].y;

        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = projections->points[idx_face*3 + 1].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = projections->points[idx_face*3 + 1].y;

        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = projections->points[idx_face*3 + 2].x;
        mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = projections->points[idx_face*3 + 2].y;

        visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];

        cpt_visible_faces++;
      }
      else
      {
        // face is occluded copy face into temp vector
        occluded_faces[cpt_occluded_faces] = mesh.tex_polygons[current_cam][idx_face];
        cpt_occluded_faces++;
      }
    }
    mesh.tex_coordinates[current_cam].resize (cpt_visible_faces*3);

    occluded_faces.resize (cpt_occluded_faces);
    mesh.tex_polygons.push_back (occluded_faces);

    visible_faces.resize (cpt_visible_faces);
    mesh.tex_polygons[current_cam].clear ();
    mesh.tex_polygons[current_cam] = visible_faces;

    int nb_faces = 0;
    for (int i = 0; i < static_cast<int> (mesh.tex_polygons.size ()); i++)
      nb_faces += static_cast<int> (mesh.tex_polygons[i].size ());
  }

  // we have been through all the cameras.
  // if any faces are left, they were not visible by any camera
  // we still need to produce uv coordinates for them

  if (mesh.tex_coordinates.size() <= cameras.size ())
  {
   std::vector<Eigen::Vector2f> dummy_container;
   mesh.tex_coordinates.push_back(dummy_container);
   }


  for(size_t idx_face = 0 ; idx_face < mesh.tex_polygons[cameras.size()].size() ; ++idx_face)
  {
    Eigen::Vector2f UV1, UV2, UV3;
    UV1(0) = -1.0; UV1(1) = -1.0;
    UV2(0) = -1.0; UV2(1) = -1.0;
    UV3(0) = -1.0; UV3(1) = -1.0;
    mesh.tex_coordinates[cameras.size()].push_back(UV1);
    mesh.tex_coordinates[cameras.size()].push_back(UV2);
    mesh.tex_coordinates[cameras.size()].push_back(UV3);
  }

}
Esempio n. 3
0
void obj_display() {

	// 只有一个物体
	mesh *object = globalscene->mList[0].obejct;
	//bind texture 0
	// glUnifor1i( 0) for colorTexture
	glActiveTexture( GL_TEXTURE0 );
	glBindTexture(GL_TEXTURE_2D, globalscene->mList[0].ambTextureId);
	GLint location0 = glGetUniformLocation(MyShader, "colorTexture");
	if(location0 == -1)
		printf("Cant find texture name: colorTexture\n");
	else
		glUniform1i(location0, 0);
	//bind texture 1
	// glUnifor1i( 1) for diffuse Texture
	glActiveTexture( GL_TEXTURE1 );
	glBindTexture(GL_TEXTURE_2D, globalscene->mList[0].difTextureId);
	GLint location1 = glGetUniformLocation(MyShader, "diffuseTex");
	if(location1 == -1)
		printf("Cant find texture name: diffuseTex\n");
	else
		glUniform1i(location1, 1);
	//bind texture 2
	// glUnifor1i( 2) for specular Texture
	glActiveTexture( GL_TEXTURE2 );
	glBindTexture(GL_TEXTURE_2D, globalscene->mList[0].spcTextureId);
	GLint location2 = glGetUniformLocation(MyShader, "specularTex");
	if(location2 == -1)
		printf("Cant find texture name: specularTex\n");
	else
		glUniform1i(location2, 2);

	// 确定顶点属性的 location,算完切向量后,再设置属性值
	tangent_loc = glGetAttribLocation(MyShader, "tangent");
	bitangent_loc = glGetAttribLocation(MyShader, "bitangent");

	int lastMaterial = -1;
	for(size_t i=0;i < object->fTotal;++i)
	{
		// set material property if this face used different material
		if(lastMaterial != object->faceList[i].m)
		{
			lastMaterial = (int)object->faceList[i].m;
			glMaterialfv(GL_FRONT, GL_AMBIENT  , object->mList[lastMaterial].Ka);
			glMaterialfv(GL_FRONT, GL_DIFFUSE  , object->mList[lastMaterial].Kd);
			glMaterialfv(GL_FRONT, GL_SPECULAR , object->mList[lastMaterial].Ks);
			glMaterialfv(GL_FRONT, GL_SHININESS, &object->mList[lastMaterial].Ns);

			//you can obtain the texture name by object->mList[lastMaterial].map_Kd
			//load them once in the main function before mainloop
			//bind them in display function here
		}

		// 取得三个顶点 p0 p1 p2 坐标
		float *vertex0 = object->vList[object->faceList[i][0].v].ptr;
		float *vertex1 = object->vList[object->faceList[i][1].v].ptr;
		float *vertex2 = object->vList[object->faceList[i][2].v].ptr;
		glm::vec3 p0(vertex0[0], vertex0[1], vertex0[2]);
		glm::vec3 p1(vertex1[0], vertex1[1], vertex1[2]);
		glm::vec3 p2(vertex2[0], vertex2[1], vertex2[2]);
		// 取得贴图三点 对应贴图坐标
		float *texture0 = object->tList[object->faceList[i][0].t].ptr;
		float *texture1 = object->tList[object->faceList[i][1].t].ptr;
		float *texture2 = object->tList[object->faceList[i][2].t].ptr;
		glm::vec2 UV0(texture0[0], texture0[1]);
		glm::vec2 UV1(texture1[0], texture1[1]);
		glm::vec2 UV2(texture2[0], texture2[1]);
		// 得到两边
		glm::vec3 Edge1 = p1 - p0;
		glm::vec3 Edge2 = p2 - p0;
		glm::vec2 Edge1uv = UV1 - UV0;
		glm::vec2 Edge2uv = UV2 - UV0;
		// 计算切向量,副切向量
		glm::vec3 tangent, bitangent;
		float cp = Edge1uv.x * Edge2uv.y - Edge2uv.x * Edge1uv.y;
		if(cp != 0.0f) {
			float mul = 1.0f /cp;
			tangent = (Edge1 * Edge2uv.y + Edge2 * -Edge1uv.y) * mul;
			bitangent = (Edge1 * -Edge2uv.x + Edge2 * Edge1uv.x) * mul;
		}
		// specify the value of a generic vertex attribute 设置顶点属性
		glVertexAttrib3f(tangent_loc, tangent.x, tangent.y, tangent.z);
		glVertexAttrib3f(bitangent_loc, bitangent.x, bitangent.y, bitangent.z);

		glBegin(GL_TRIANGLES);
		for (size_t j=0;j<3;++j)
		{
			//textex corrd. 
			glMultiTexCoord2fv(GL_TEXTURE0, object->tList[object->faceList[i][j].t].ptr);
			glMultiTexCoord2fv(GL_TEXTURE1, object->tList[object->faceList[i][j].t].ptr);
			glMultiTexCoord2fv(GL_TEXTURE2, object->tList[object->faceList[i][j].t].ptr);
			glNormal3fv(object->nList[object->faceList[i][j].n].ptr);
			glVertex3fv(object->vList[object->faceList[i][j].v].ptr);	
		}
		glEnd();
	}
}