コード例 #1
0
void Reconstructor::decodePaterns()
{

	std::cout<<"Decoding paterns...";

	int w=camera->width;
	int h=camera->height;


	cv::Point projPixel;
	
	for(int i=0; i<w; i++)
	{
		for(int j=0; j<h; j++)
		{

			//if the pixel is not shadow reconstruct
			if(shadowMask.at<uchar>(j,i))
			{

				//get the projector pixel for camera (i,j) pixel
				bool error = getProjPixel(i,j,projPixel);

				if(error)
				{
					shadowMask.at<uchar>(j,i)=0;
					continue;
				}

				camPixels[ac(projPixel.x,projPixel.y)].push_back(cv::Point(i,j));

			}
		}
	}
	std::cout<<"done!\n";
}
コード例 #2
0
bool GrayCodePattern_Impl::decode( const std::vector< std::vector<Mat> >& patternImages, OutputArray disparityMap,
                                   InputArrayOfArrays blackImages, InputArrayOfArrays whitheImages, int flags ) const
{
  const std::vector<std::vector<Mat> >& acquired_pattern = patternImages;

  if( flags == DECODE_3D_UNDERWORLD )
  {
    // Computing shadows mask
    std::vector<Mat> shadowMasks;
    computeShadowMasks( blackImages, whitheImages, shadowMasks );

    int cam_width = acquired_pattern[0][0].cols;
    int cam_height = acquired_pattern[0][0].rows;

    Point projPixel;

    // Storage for the pixels of the two cams that correspond to the same pixel of the projector
    std::vector<std::vector<std::vector<Point> > > camsPixels;
    camsPixels.resize( acquired_pattern.size() );

    // TODO: parallelize for (k and j)
    for( size_t k = 0; k < acquired_pattern.size(); k++ )
    {
      camsPixels[k].resize( params.height * params.width );
      for( int i = 0; i < cam_width; i++ )
      {
        for( int j = 0; j < cam_height; j++ )
        {
          //if the pixel is not shadowed, reconstruct
          if( shadowMasks[k].at<uchar>( j, i ) )
          {
            //for a (x,y) pixel of the camera returns the corresponding projector pixel by calculating the decimal number
            bool error = getProjPixel( acquired_pattern[k], i, j, projPixel );

            if( error )
            {
              continue;
            }

            camsPixels[k][projPixel.x * params.height + projPixel.y].push_back( Point( i, j ) );
          }
        }
      }
    }

    std::vector<Point> cam1Pixs, cam2Pixs;

    Mat& disparityMap_ = *( Mat* ) disparityMap.getObj();
    disparityMap_ = Mat( cam_height, cam_width, CV_64F, double( 0 ) );

    double number_of_pixels_cam1 = 0;
    double number_of_pixels_cam2 = 0;

    for( int i = 0; i < params.width; i++ )
    {
      for( int j = 0; j < params.height; j++ )
      {
        cam1Pixs = camsPixels[0][i * params.height + j];
        cam2Pixs = camsPixels[1][i * params.height + j];

        if( cam1Pixs.size() == 0 || cam2Pixs.size() == 0 )
          continue;

        Point p1;
        Point p2;

        double sump1x = 0;
        double sump2x = 0;

        number_of_pixels_cam1 += cam1Pixs.size();
        number_of_pixels_cam2 += cam2Pixs.size();
        for( int c1 = 0; c1 < (int) cam1Pixs.size(); c1++ )
        {
          p1 = cam1Pixs[c1];
          sump1x += p1.x;
        }
        for( int c2 = 0; c2 < (int) cam2Pixs.size(); c2++ )
        {
          p2 = cam2Pixs[c2];
          sump2x += p2.x;
        }

        sump2x /= cam2Pixs.size();
        sump1x /= cam1Pixs.size();
        for( int c1 = 0; c1 < (int) cam1Pixs.size(); c1++ )
        {
          p1 = cam1Pixs[c1];
          disparityMap_.at<double>( p1.y, p1.x ) = ( double ) (sump2x - sump1x);
        }

        sump2x = 0;
        sump1x = 0;
      }
    }

    return true;
  }  // end if flags

  return false;
}