Example #1
0
/*!
  Change the look up table (LUT) of an image. Considering pixel gray
  level values \f$ l \f$ in the range \f$[A, B]\f$, this method allows
  to rescale these values in \f$[A^*, B^*]\f$ by linear interpolation:

  \f$
  \left\{ \begin{array}{ll}
  l \in ]-\infty, A] \mbox{, } &  l = A^* \\
  l \in  [B, \infty[ \mbox{, } &  l = B^* \\
  l \in ]A, B[ \mbox{, }       &  l = A^* + (l-A) * \frac{B^*-A^*}{B-A}
  \end{array}
  \right.
  \f$

  \param I : Image to process.
  \param A : Low gray level value of the range to consider.
  \param A_star : New gray level value \f$ A^*\f$ to attribute to pixel
  who's value was A
  \param B : Height gray level value of the range to consider.
  \param B_star : New gray level value \f$ B^*\f$ to attribute to pixel
  who's value was B
  \return The modified image.

  \exception vpImageException::incorrectInitializationError If \f$B \leq A\f$.

  As shown in the example below, this method can be used to binarize
  an image. For an unsigned char image (in the range 0-255),
  thresholding this image at level 127 can be done by:

  \code
#include <visp3/core/vpImageTools.h>
#include <visp3/core/vpImage.h>
#include <visp3/io/vpImageIo.h>

int main()
{
  vpImage<unsigned char> I;
#ifdef _WIN32
  std::string filename("C:/temp/ViSP-images/Klimt/Klimt.ppm");
#else
  std::string filename("/local/soft/ViSP/ViSP-images/Klimt/Klimt.ppm");
#endif

  // Read an image from the disk
  vpImageIo::read(I, filename);

  // Binarize image I:
  // - gray level values less than or equal to 127 are set to 0,
  // - gray level values greater than 128 are set to 255
  vpImageTools::changeLUT(I, 127, 0, 128, 255);

  vpImageIo::write(I, "Klimt.pgm"); // Write the image in a PGM P5 image file format
}
  \endcode

*/
void vpImageTools::changeLUT(vpImage<unsigned char>& I,
                             unsigned char A,
                             unsigned char A_star,
                             unsigned char B,
                             unsigned char B_star)
{
  // Test if input values are valid
  if (B <= A) {
    vpERROR_TRACE("Bad gray levels") ;
    throw (vpImageException(vpImageException::incorrectInitializationError ,
                            "Bad gray levels"));
  }
  unsigned char v;

  double factor = (double)(B_star - A_star)/(double)(B - A);

  for (unsigned int i=0 ; i < I.getHeight(); i++)
    for (unsigned int j=0 ; j < I.getWidth(); j++) {
      v = I[i][j];

      if (v <= A)
        I[i][j] = A_star;
      else if (v >= B)
        I[i][j] = B_star;
      else
        I[i][j] = (unsigned char)(A_star + factor*(v-A));
    }
}
Example #2
0
/*!
  Display the line. The 3D line is projected into the image.

  \param I : The image.
  \param cMo : Pose used to project the 3D model into the image.
  \param cam : The camera parameters.
  \param col : The desired color.
  \param thickness : The thickness of the line.
  \param displayFullModel : If true, the line is displayed even if it is not visible.
*/
void
vpMbtDistanceLine::display(const vpImage<vpRGBa>&I, const vpHomogeneousMatrix &cMo, const vpCameraParameters&cam, const vpColor col, const unsigned int thickness, const bool displayFullModel)
{
    p1->changeFrame(cMo);
    p2->changeFrame(cMo);

    if(isvisible || displayFullModel) {
        vpImagePoint ip1, ip2;
        vpCameraParameters c = cam;
        if(poly.getClipping() > 3) // Contains at least one FOV constraint
            c.computeFov(I.getWidth(), I.getHeight());

        poly.computeRoiClipped(c);

        if( poly.roiPointsClip.size() == 2 &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::NEAR_CLIPPING) == 0) &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::FAR_CLIPPING) == 0) &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::DOWN_CLIPPING) == 0) &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::UP_CLIPPING) == 0) &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::LEFT_CLIPPING) == 0) &&
                ((poly.roiPointsClip[1].second & poly.roiPointsClip[0].second & vpMbtPolygon::RIGHT_CLIPPING) == 0)) {
            vpMeterPixelConversion::convertPoint(cam,poly.roiPointsClip[0].first.get_x(),poly.roiPointsClip[0].first.get_y(),ip1);
            vpMeterPixelConversion::convertPoint(cam,poly.roiPointsClip[1].first.get_x(),poly.roiPointsClip[1].first.get_y(),ip2);

            vpDisplay::displayLine(I,ip1,ip2,col, thickness);
        }
    }
}
Example #3
0
/*!

  Computes the SURF points in only a part of the current image I and
  try to matched them with the points in the reference list. The part
  of the image is a rectangle defined by its top left corner, its
  height and its width. The parameters of this rectangle must be given
  in pixel. Only the matched points are stored.

  \param I : The gray scaled image where the points are computed.

  \param iP : The top left corner of the rectangle.

  \param height : height of the rectangle (in pixel).

  \param width : width of the rectangle (in pixel).

  \return the number of point which have been matched.
*/
unsigned int vpKeyPointSurf::matchPoint(const vpImage<unsigned char> &I,
			       const vpImagePoint &iP,
			       const unsigned int height, const unsigned int width)
{
  if((iP.get_i()+height) >= I.getHeight()
     || (iP.get_j()+width) >= I.getWidth())
  {
    vpTRACE("Bad size for the subimage");
    throw(vpException(vpImageException::notInTheImage ,
		      "Bad size for the subimage"));
  }

  vpImage<unsigned char> subImage;

  vpImageTools::createSubImage(I,
			       (unsigned int)iP.get_i(),
			       (unsigned int)iP.get_j(),
			       height, width, subImage);

  unsigned int nbMatchedPoint = this->matchPoint(subImage);

  for(unsigned int k = 0; k < nbMatchedPoint; k++)
  {
    (currentImagePointsList[k]).set_i((currentImagePointsList[k]).get_i()
				      + iP.get_i());
    (currentImagePointsList[k]).set_j((currentImagePointsList[k]).get_j()
				      + iP.get_j());
  }

  return(nbMatchedPoint);
}
Example #4
0
/*!  
  Initialize the display (size, position and title) of a color
  image in RGBa format.

  \param I : Image to be displayed (not that image has to be initialized)
  \param x, y : The window is set at position x,y (column index, row index).
  \param title : Window title.

*/
void
vpDisplayGTK::init(vpImage<vpRGBa> &I,
                   int x,
                   int y,
                   const std::string &title)
{
  if ((I.getHeight() == 0) || (I.getWidth()==0))
  {
    vpERROR_TRACE("Image not initialized " ) ;
    throw(vpDisplayException(vpDisplayException::notInitializedError,
                             "Image not initialized")) ;
  }

  if (x != -1)
    windowXPosition = x ;
  if (y != -1)
    windowYPosition = y ;

  if (! title.empty())
    title_ = title;
  init (I.getWidth(), I.getHeight(), windowXPosition, windowYPosition, title_) ;

  I.display = this ;
  displayHasBeenInitialized = true ;
}
Example #5
0
void 
vpMbKltTracker::init(const vpImage<unsigned char>& I)
{
  if(!modelInitialised){
    throw vpException(vpException::fatalError, "model not initialized");
  }
  
 bool reInitialisation = false;
  if(!useOgre)
    faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
  else{
#ifdef VISP_HAVE_OGRE   
    if(!faces.isOgreInitialised()){
      faces.setBackgroundSizeOgre(I.getHeight(), I.getWidth());
      faces.setOgreShowConfigDialog(ogreShowConfigDialog);
      faces.initOgre(cam);
	  // Turn off Ogre config dialog display for the next call to this function
	  // since settings are saved in the ogre.cfg file and used during the next
	  // call 
	  ogreShowConfigDialog = false;
    }
    
    faces.setVisibleOgre(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
    
#else
    faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation);
#endif
  }
  reinit(I);
}
Example #6
0
/*!
  Apply a filter to an image.

  \param I : Image to filter
  \param If : Filtered image.
  \param M : Filter coefficients.

*/
void
vpImageFilter::filter(const vpImage<unsigned char> &I,
		      vpImage<double>& If,
		      const vpMatrix& M)
{

  unsigned int size = M.getRows() ;
  unsigned int half_size = size/2 ;

  If.resize(I.getHeight(),I.getWidth()) ;

  If = 0 ;

  for (unsigned int i=half_size ; i < I.getHeight()-half_size ; i++)
  {
    for (unsigned int j=half_size ; j < I.getWidth()-half_size ; j++)
    {
      double   conv_x = 0 ;

      for(unsigned int a = 0 ; a < size ; a++ )
        for(unsigned int b = 0 ; b < size ; b++ )
	{
	  double val =  I[i-half_size+a][j-half_size+b] ;
	  conv_x += M[a][b] * val ;
	}
      If[i][j] = conv_x ;
    }
  }

}
Example #7
0
int Conversion::convert(const vpImage<float> & dmap, Eigen::MatrixXf & point3D, 
double fx, double fy, double cx, double cy)
{
      int height = dmap.getHeight();
      int width  = dmap.getWidth();
      point3D.resize(height*width,3);
      
      
      int index=0;
      for(int  i=0 ; i< height ; i++){
       for(int j=0 ; j< width ; j++){
           
           float z =dmap[i][j];
           if (fabs(z + 1.f) > std::numeric_limits<float>::epsilon() & z>0 ){
            point3D(index,2) = z;
            point3D(index,0) = (float)((i-cx)*point3D(index,2)/fx); 
            point3D(index,1) = (float)((j-cy)*point3D(index,2)/fy);
            index++;
          }
        }
      }
      // resize the point max to remove the points that have been pruned du to negative z value
      point3D.conservativeResize(index,3);
      return 1;
  
}
Example #8
0
/*!
  Test wether the line is close to the border of the image (at a given threshold)
  
  \param I : the input image (to know its size)
  \param threshold : the threshold in pixel 
  \return true if the line is near the border of the image
*/
bool 
vpMbtDistanceLine::closeToImageBorder(const vpImage<unsigned char>& I, const unsigned int threshold)
{
  if(threshold > I.getWidth() || threshold > I.getHeight()){
    return true;
  }
  if (isvisible){

    for(unsigned int i = 0 ; i < meline.size() ; i++){
      for(std::list<vpMeSite>::const_iterator it=meline[i]->getMeList().begin(); it!=meline[i]->getMeList().end(); ++it){
        int i = it->i ;
        int j = it->j ;

        if(i < 0 || j < 0){ //out of image.
          return true;
        }

        if( ((unsigned int)i > (I.getHeight()- threshold) ) || (unsigned int)i < threshold ||
            ((unsigned int)j > (I.getWidth ()- threshold) ) || (unsigned int)j < threshold ) {
          return true;
        }
      }
    }
  }
  return false;
}
Example #9
0
/*!
  Display of a moving line thanks to its equation parameters and its extremities with all the site list.

  \param I : The image used as background.

  \param PExt1 : First extrimity

  \param PExt2 : Second extrimity

  \param site_list : vpMeSite list

  \param A : Parameter a of the line equation a*i + b*j + c = 0

  \param B : Parameter b of the line equation a*i + b*j + c = 0

  \param C : Parameter c of the line equation a*i + b*j + c = 0

  \param color : Color used to display the line.

  \param thickness : Thickness of the line.
*/
void vpMeLine::display(const vpImage<vpRGBa>& I,const vpMeSite &PExt1, const vpMeSite &PExt2,
                       const std::list<vpMeSite> &site_list,
                       const double &A, const double &B, const double &C,
                       const vpColor &color,  unsigned int thickness)
{
  vpImagePoint ip;

  for(std::list<vpMeSite>::const_iterator it=site_list.begin(); it!=site_list.end(); ++it){
    vpMeSite pix = *it;
    ip.set_i( pix.ifloat );
    ip.set_j( pix.jfloat );

    if (pix.getState() == vpMeSite::M_ESTIMATOR)
      vpDisplay::displayCross(I, ip, 5, vpColor::green,thickness);
    else
      vpDisplay::displayCross(I, ip, 5, color,thickness);

    //vpDisplay::flush(I);
  }

  vpImagePoint ip1, ip2;

  if (fabs(A) < fabs(B)) {
    double i1, j1, i2, j2;
    i1 = 0;
    j1 = (-A*i1 -C) / B;
    i2 = I.getHeight() - 1.0;
    j2 = (-A*i2 -C) / B;

    ip1.set_i( i1 );
    ip1.set_j( j1 );
    ip2.set_i( i2 );
    ip2.set_j( j2 );
    vpDisplay::displayLine(I, ip1, ip2, color);
    //vpDisplay::flush(I);

  }
  else {
    double i1, j1, i2, j2;
    j1 = 0;
    i1 = -(B * j1 + C) / A;
    j2 = I.getWidth() - 1.0;
    i2 = -(B * j2 + C) / A;

    ip1.set_i( i1 );
    ip1.set_j( j1 );
    ip2.set_i( i2 );
    ip2.set_j( j2 );
    vpDisplay::displayLine(I, ip1, ip2, color);
    //vpDisplay::flush(I);
  }

  ip1.set_i( PExt1.ifloat );
  ip1.set_j( PExt1.jfloat );
  vpDisplay::displayCross(I, ip1, 10, vpColor::green,thickness);

  ip1.set_i( PExt2.ifloat );
  ip1.set_j( PExt2.jfloat );
  vpDisplay::displayCross(I, ip1, 10, vpColor::green,thickness);
}
/*!
  Grabs a grayscale image from the selected camera. If the camera color
  coding differs from vp1394CMUGrabber::MONO8, the acquired image is
  converted in a gray level image to match the requested format.

  \param I : Acquired gray level image.
  */
void 
vp1394CMUGrabber::acquire(vpImage<unsigned char> &I)
{
  // get image data
  unsigned long length;
  unsigned char *rawdata = NULL ;
  int dropped;
  unsigned int size;

  if(init == false){
    close();
    throw (vpFrameGrabberException(vpFrameGrabberException::initializationError,
                                   "Initialization not done") );
  }

  camera->AcquireImageEx(TRUE,&dropped);
  rawdata = camera->GetRawData(&length);

  size = I.getWidth() * I.getHeight();
  switch(_color) {
    case vp1394CMUGrabber::MONO8:
      memcpy(I.bitmap, (unsigned char *) rawdata, size);
      break;
    case vp1394CMUGrabber::MONO16:
      vpImageConvert::MONO16ToGrey(rawdata, I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV411:
      vpImageConvert::YUV411ToGrey(rawdata, I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV422:
      vpImageConvert::YUV422ToGrey(rawdata, I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV444:
      vpImageConvert::YUV444ToGrey(rawdata, I.bitmap, size);
      break;

    case vp1394CMUGrabber::RGB8:
      vpImageConvert::RGBToGrey(rawdata, I.bitmap, size);
      break;

    default:
      close();
      vpERROR_TRACE("Format conversion not implemented. Acquisition failed.");
      throw (vpFrameGrabberException(vpFrameGrabberException::otherError,
                                     "Format conversion not implemented. "
                                     "Acquisition failed.") );
      break;
  };

  //unsigned short depth = 0;
  //camera->GetVideoDataDepth(&depth);
  //std::cout << "depth: " << depth << " computed: " << (float)(length/(I.getHeight() * I.getWidth())) <<  std::endl;


  //memcpy(I.bitmap,rawdata,length);

}
Example #11
0
// Specific function for ME
double
vpMeSite::convolution(const vpImage<unsigned char>&I, const  vpMe *me)
{
    int half;
    unsigned int index_mask ;
    int height_ = static_cast<int>(I.getHeight());
    int width_  = static_cast<int>(I.getWidth());

    double conv = 0.0 ;
    unsigned int msize = me->getMaskSize();
    half = (static_cast<int>(msize) - 1) >> 1 ;

    if(horsImage( i , j , half + me->getStrip() , height_, width_))
    {
        conv = 0.0 ;
        i = 0 ;
        j = 0 ;
    }
    else
    {
        // Calculate tangent angle from normal
        double theta  = alpha+M_PI/2;
        // Move tangent angle to within 0->M_PI for a positive
        // mask index
        while (theta<0) theta += M_PI;
        while (theta>M_PI) theta -= M_PI;

        // Convert radians to degrees
        int thetadeg = vpMath::round(theta * 180 / M_PI) ;

        if(abs(thetadeg) == 180 )
        {
            thetadeg= 0 ;
        }

        index_mask = (unsigned int)(thetadeg/(double)me->getAngleStep());

        unsigned int i_ = static_cast<unsigned int>(i);
        unsigned int j_ = static_cast<unsigned int>(j);
        unsigned int half_ = static_cast<unsigned int>(half);

        unsigned int ihalf = i_-half_ ;
        unsigned int jhalf = j_-half_ ;
        unsigned int ihalfa ;

        for(unsigned int a = 0 ; a < msize ; a++ )
        {
            ihalfa = ihalf+a ;
            for(unsigned int b = 0 ; b < msize ; b++ )
            {
                conv += mask_sign* me->getMask()[index_mask][a][b] *
                        //	  I(i-half+a,j-half+b) ;
                        I(ihalfa,jhalf+b) ;
            }
        }

    }

    return(conv) ;
}
Example #12
0
/*!
  Get the view of the virtual camera. Be carefull, the image I is modified. The projected image is not added as an overlay!
  
  \param I : The image used to store the result.
  \param cam : The parameters of the virtual camera.
*/
void
vpImageSimulator::getImage(vpImage<unsigned char> &I, const vpCameraParameters cam)
{
  int nb_point_dessine = 0;
  if (cleanPrevImage)
  {
    unsigned char col = (unsigned char)(0.2126 * bgColor.R + 0.7152 * bgColor.G + 0.0722 * bgColor.B);
    for (int i = (int)rect.getTop(); i < (int)rect.getBottom(); i++)
    {
      for (int j = (int)rect.getLeft(); j < (int)rect.getRight(); j++)
      {
	I[i][j] = col;
      }
    }
  }
  if(visible)
  {
    getRoi(I.getWidth(),I.getHeight(),cam,pt,rect);
    
    double top = rect.getTop();
    double bottom = rect.getBottom();
    double left = rect.getLeft();
    double right= rect.getRight();
    
    unsigned char *bitmap = I.bitmap;
    unsigned int width = I.getWidth();
    vpImagePoint ip;
    
    for (int i = (int)top; i < (int)bottom; i++)
    {
      for (int j = (int)left; j < (int)right; j++)
      {
        double x=0,y=0;
	ip.set_ij(i,j);
        vpPixelMeterConversion::convertPoint(cam,ip, x,y);
	ip.set_ij(y,x);
	if (colorI == GRAY_SCALED)
	{
	  unsigned char Ipixelplan = 0;
	  if(getPixel(ip,Ipixelplan))
	  {
	    *(bitmap+i*width+j)=Ipixelplan;
	    nb_point_dessine++;
	  }
	}
	else if (colorI == COLORED)
	{
	  vpRGBa Ipixelplan;
	  if(getPixel(ip,Ipixelplan))
	  {
	    unsigned char pixelgrey = (unsigned char)(0.2126 * Ipixelplan.R + 0.7152 * Ipixelplan.G + 0.0722 * Ipixelplan.B);
	    *(bitmap+i*width+j)=pixelgrey;
	    nb_point_dessine++;
	  }
	}
      }
    }
  }
}
Example #13
0
/*!

\brief Constructor : initialize a display to visualize a grayscale image
(8 bits).

\param I  Image to be displayed (note that image has to be initialized).
\param winx, winy The window is set at position x,y (column index, row index).
\param title  Window's title.
\param scaleType : If this parameter is set to:
  - vpDisplay::SCALE_AUTO, the display size is adapted to ensure the image
    is fully displayed in the screen;
  - vpDisplay::SCALE_DEFAULT or vpDisplay::SCALE_1, the display size is the same than the image size.
  - vpDisplay::SCALE_2, the display size is downscaled by 2 along the lines and the columns.
  - vpDisplay::SCALE_3, the display size is downscaled by 3 along the lines and the columns.
  - vpDisplay::SCALE_4, the display size is downscaled by 4 along the lines and the columns.
  - vpDisplay::SCALE_5, the display size is downscaled by 5 along the lines and the columns.

*/
vpDisplayD3D::vpDisplayD3D(vpImage<unsigned char> &I,
                           int winx, int winy,
                           const std::string &title, vpScaleType scaleType)
  : vpDisplayWin32(new vpD3DRenderer())
{
  setScale(scaleType, I.getWidth(), I.getHeight());
  init(I,winx,winy,title);
}
/*!
  Sets all the parameters needed to read the video or the image sequence.
  
  Grab the first frame and stores it in the image \f$ I \f$.
  
  \param I : The image where the frame is stored.
*/
void vpVideoReader::open(vpImage< vpRGBa > &I)
{
  if (!initFileName)
  {
    vpERROR_TRACE("The generic filename has to be set");
    throw (vpImageException(vpImageException::noFileNameError,"filename empty"));
  }
  
  if (formatType == FORMAT_PGM ||
      formatType == FORMAT_PPM ||
      formatType == FORMAT_JPEG ||
      formatType == FORMAT_PNG)
  {
    imSequence = new vpDiskGrabber;
    imSequence->setGenericName(fileName);
    imSequence->setImageNumber((int)firstFrame);
  }
  #ifdef VISP_HAVE_FFMPEG
  else if (formatType == FORMAT_AVI ||
           formatType == FORMAT_MPEG ||
           formatType == FORMAT_MOV ||
           formatType == FORMAT_OGV)
  {
    ffmpeg = new vpFFMPEG;
    if(!ffmpeg->openStream(fileName, vpFFMPEG::COLORED))
      throw (vpException(vpException::ioError ,"Could not open the video"));
    ffmpeg->initStream();
  }
  
  #else
  else if (formatType == FORMAT_AVI ||
           formatType == FORMAT_MPEG ||
           formatType == FORMAT_MOV ||
           formatType == FORMAT_OGV)
  {
    vpERROR_TRACE("To read video files the FFmpeg library has to be installed");
    throw (vpException(vpException::fatalError ,"the FFmpeg library is required"));
  }
  #endif
  else if (formatType == FORMAT_UNKNOWN)
  {
    vpERROR_TRACE("The format of the file does not correpsond to a readable format.");
    throw (vpException(vpException::fatalError ,"The format of the file does not correpsond to a readable format."));
  }
  
  frameCount = firstFrame;
  if(!getFrame(I,firstFrame))
  {
    vpERROR_TRACE("Could not read the first frame");
    throw (vpException(vpException::ioError ,"Could not read the first frame"));
  }
  height = I.getHeight();
  width = I.getWidth();
  
  isOpen = true;
  
  findLastFrameIndex();
}
/*!
  Display a selection of the gray level image \e I (8bits).

  \warning Display has to be initialized.

  \warning Suppress the overlay drawing in the region of interest.

  \param I : Image to display.
  
  \param iP : Top left corner of the region of interest
  
  \param width : Width of the region of interest
  
  \param height : Height of the region of interest

  \sa init(), closeDisplay()
*/
void vpDisplayOpenCV::displayImageROI ( const vpImage<unsigned char> &I,const vpImagePoint &iP, const unsigned int width, const unsigned int height )
{
  if (displayHasBeenInitialized)
  { 
    vpImage<unsigned char> Itemp;
    vpImageTools::createSubImage(I,(unsigned int)iP.get_i(),(unsigned int)iP.get_j(),height,width,Itemp);
    vpImage<vpRGBa> Ic;
    vpImageConvert::convert(Itemp,Ic);
    
    CvSize size = cvSize((int)this->width, (int)this->height);
    int depth = 8;
    int channels = 3;
    if (background != NULL){
      if(background->nChannels != channels || background->depth != depth
         || background->height != (int) I.getHeight() || background->width != (int) I.getWidth()){
        if(background->nChannels != 0) cvReleaseImage(&background);
        background = cvCreateImage( size, depth, channels );
      }
    }
    else background = cvCreateImage( size, depth, channels );
    
    IplImage* Ip = NULL;
    vpImageConvert::convert(Ic, Ip);
    
    unsigned char * input = (unsigned char*)Ip->imageData;
    unsigned char * output = (unsigned char*)background->imageData;
    
    unsigned int iwidth = Ic.getWidth();

    input = input;
    output = output + (int)(iP.get_i()*3*this->width+ iP.get_j()*3);
    
    unsigned int i = 0;
    while (i < height)
    {
      unsigned int j = 0;
      while (j < width)
      {
	*(output+3*j) = *(input+j*3);
	*(output+3*j+1) = *(input+j*3+1);
	*(output+3*j+2) = *(input+j*3+2);
	j++;
      }
      input = input + 3*iwidth;
      output = output + 3*this->width;
      i++;
    }

    cvReleaseImage(&Ip);
  }
  else
  {
    vpERROR_TRACE("openCV not initialized " ) ;
    throw(vpDisplayException(vpDisplayException::notInitializedError,
                             "OpenCV not initialized")) ;
  }
}
Example #16
0
void histogramme(const vpImage<unsigned char>  &I, unsigned int* histo, int &max)
{
    for (int i=0; i<256; i++) 
        histo[i] = 0;

    for (int i=0; i<I.getHeight(); i++)
        for (int j=0; j<I.getWidth(); j++)
            if (++histo[I[i][j]] > max) max = histo[I[i][j]];
}
Example #17
0
bool
vpImageSimulator::getPixel(vpImage<unsigned char> &Isrc, 
			   const vpImagePoint &iP, unsigned char &Ipixelplan)
{
  //test si pixel dans zone projetee
  bool inside = false;
  for(unsigned int i = 0 ; i < listTriangle.size() ; i++)
      if(listTriangle[i].inTriangle(iP)){
          inside = true;
          break;
      }
  if(!inside) return false;

//  if(!T1.inTriangle(iP) && !T2.inTriangle(iP))
//    return false;

  //methoed algebrique
  double z;

  //calcul de la profondeur de l'intersection
  z = distance/(normal_Cam_optim[0]*iP.get_u()+normal_Cam_optim[1]*iP.get_v()+normal_Cam_optim[2]);
  //calcul coordonnees 3D intersection
  Xinter_optim[0]=iP.get_u()*z;
  Xinter_optim[1]=iP.get_v()*z;
  Xinter_optim[2]=z;

  //recuperation des coordonnes de l'intersection dans le plan objet
  //repere plan object : 
  //	centre = X0_2_optim[i] (premier point definissant le plan)
  //	base =  u:(X[1]-X[0]) et v:(X[3]-X[0])
  //ici j'ai considere que le plan est un rectangle => coordonnees sont simplement obtenu par un produit scalaire
  double u = 0, v = 0;
  for(unsigned int i = 0; i < 3; i++)
  {
    double diff = (Xinter_optim[i]-X0_2_optim[i]);
    u += diff*vbase_u_optim[i];
    v += diff*vbase_v_optim[i];
  }
  u = u/(euclideanNorm_u*euclideanNorm_u);
  v = v/(euclideanNorm_v*euclideanNorm_v);

  if( u > 0 && v > 0 && u < 1. && v < 1.)
  {
    double i2,j2;
    i2=v*(Isrc.getHeight()-1);
    j2=u*(Isrc.getWidth()-1);
    if (interp == BILINEAR_INTERPOLATION)
      Ipixelplan = Isrc.getValue(i2,j2);
    else if (interp == SIMPLE)
      Ipixelplan = Isrc[(unsigned int)i2][(unsigned int)j2];
    return true;
  }
  else
    return false;
}
Example #18
0
/*!
Sets all the parameters needed to read the video or the image sequence.

Grab the first frame and stores it in the image \f$ I \f$.

\param I : The image where the frame is stored.
*/
void vpVideoReader::open(vpImage<unsigned char> &I)
{
	if (!initFileName)
	{
		vpERROR_TRACE("The generic filename has to be set");
		throw (vpImageException(vpImageException::noFileNameError,"filename empty"));
	}

	if (isImageExtensionSupported())
	{
		imSequence = new vpDiskGrabber;
		imSequence->setGenericName(fileName);
		if (firstFrameIndexIsSet)
			imSequence->setImageNumber(firstFrame);
	}
	else if (isVideoExtensionSupported())
	{
#ifdef VISP_HAVE_FFMPEG
		ffmpeg = new vpFFMPEG;
		if (!ffmpeg->openStream(fileName, vpFFMPEG::GRAY_SCALED))
      throw (vpException(vpException::ioError ,"Could not open the video with ffmpeg"));
		ffmpeg->initStream();
#elif VISP_HAVE_OPENCV_VERSION >= 0x020100
		capture.open(fileName);

		if(!capture.isOpened())
		{
      throw (vpException(vpException::ioError ,"Could not open the video with opencv"));
		}
#else
    //vpERROR_TRACE("To read video files ViSP should be build with ffmpeg or opencv 3rd party libraries.");
    throw (vpException(vpException::fatalError ,"To read video files ViSP should be build with ffmpeg or opencv >= 2.1.0 3rd party libraries."));
#endif
	}
	else if (formatType == FORMAT_UNKNOWN)
	{
    //vpERROR_TRACE("The format of the file does not correspond to a readable format.");
    throw (vpException(vpException::fatalError ,"The format of the file does not correspond to a readable format supported by ViSP."));
  }

	findFirstFrameIndex();
	frameCount = firstFrame;
	if(!getFrame(I,firstFrame))
	{
    //vpERROR_TRACE("Could not read the video first frame");
    throw (vpException(vpException::ioError ,"Could not read the video first frame"));
  }

	height = I.getHeight();
	width = I.getWidth();

	isOpen = true;
	findLastFrameIndex();
	frameCount = firstFrame; // open() should not increase the frame counter
}
/*!
  Grabs a color image from the selected camera. Since the cameras
  are not able to grab RGBa color coding format, the acquired image is
  converted in a RGBa to match the requested format. This transformation
  could be time consuming.

  \param I : Acquired color image in RGBa format.
 */
void 
vp1394CMUGrabber::acquire(vpImage<vpRGBa> &I)
{
  // get image data
  unsigned long length;
  unsigned char *rawdata = NULL;
  int dropped;
  unsigned int size;

  if(init == false){
    close();
    throw (vpFrameGrabberException(vpFrameGrabberException::initializationError,
                                   "Initialization not done") );
  }

  camera->AcquireImageEx(TRUE,&dropped);
  rawdata = camera->GetRawData(&length);
  size = I.getWidth() * I.getHeight();

  switch (_color) {
    case vp1394CMUGrabber::MONO8:
      vpImageConvert::GreyToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    case vp1394CMUGrabber::MONO16:
      vpImageConvert::MONO16ToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV411:
      vpImageConvert::YUV411ToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV422:
      vpImageConvert::YUV422ToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    case vp1394CMUGrabber::YUV444:
      vpImageConvert::YUV444ToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    case vp1394CMUGrabber::RGB8:
      size = length / 3;
      vpImageConvert::RGBToRGBa(rawdata, (unsigned char *)I.bitmap, size);
      break;

    default:
      close();
      vpERROR_TRACE("Format conversion not implemented. Acquisition failed.");
      throw (vpFrameGrabberException(vpFrameGrabberException::otherError,
                                     "Format conversion not implemented. "
                                     "Acquisition failed.") );
      break;
  };
}
Example #20
0
  void convertCorbaImageToVispImage(const ImageData& imageCorbaSrc,
				    vpImage<unsigned char>& imageVispDest)
  {
    if (imageCorbaSrc.width - imageVispDest.getWidth() != 0
	|| imageCorbaSrc.height - imageVispDest.getHeight() != 0)
      throw "Invalid image size in convertCorbaImageToVispImage";

    unsigned char *pt =imageVispDest.bitmap;
    for(unsigned int l=0;l<imageCorbaSrc.octetData.length();++l)
      *pt++ = imageCorbaSrc.octetData[l];
  }
/*!
  Train the classifier from the entire image (it is therefore assumed that the 
  image is planar).
  
  Depending on the parameters, the training can take up to several minutes.
  
  \param _I : The image use to train the classifier.
  
  \return The number of reference points.
*/
unsigned int
vpPlanarObjectDetector::buildReference(const vpImage<unsigned char> &_I)
{
  modelROI.x = 0;
  modelROI.y = 0;
  modelROI.width = (int)_I.getWidth();
  modelROI.height = (int)_I.getHeight();  

  initialiseRefCorners(modelROI);  
  
  return fern.buildReference(_I);
}
void vispImageToRos(sensor_msgs::Image& dst,
		    const vpImage<unsigned char>& src)
{
  dst.width = src.getWidth();
  dst.height = src.getHeight();
  dst.encoding = sensor_msgs::image_encodings::MONO8;
  dst.step = src.getWidth();
  dst.data.resize(dst.height * dst.step);
  for(unsigned i = 0; i < src.getWidth (); ++i)
    for(unsigned j = 0; j < src.getHeight (); ++j)
      dst.data[j * dst.step + i] = src[j][i];
}
Example #23
0
/*!
  Display the 3D model at a given position using the given camera parameters

  \param I : The color image.
  \param cMo_ : Pose used to project the 3D model into the image.
  \param camera : The camera parameters.
  \param col : The desired color.
  \param thickness : The thickness of the lines.
  \param displayFullModel : Boolean to say if all the model has to be displayed, even the faces that are not visible.
*/
void
vpMbKltTracker::display(const vpImage<vpRGBa>& I, const vpHomogeneousMatrix &cMo_, const vpCameraParameters & camera,
                        const vpColor& col , const unsigned int thickness, const bool displayFullModel)
{
  vpCameraParameters c = camera;
  
  if(clippingFlag > 3) // Contains at least one FOV constraint
    c.computeFov(I.getWidth(), I.getHeight());

  vpMbtDistanceKltPoints *kltpoly;
  vpMbtDistanceKltCylinder *kltPolyCylinder;

  // Previous version 12/08/2015
//  for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
//    kltpoly = *it;
//    kltpoly->polygon->changeFrame(cMo_);
//    kltpoly->polygon->computePolygonClipped(c);
//  }
  faces.computeClippedPolygons(cMo_,c);

  if(useScanLine && !displayFullModel)
    faces.computeScanLineRender(cam,I.getWidth(), I.getHeight());

  for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=kltPolygons.begin(); it!=kltPolygons.end(); ++it){
    kltpoly = *it;

    kltpoly->display(I,cMo_,camera,col,thickness,displayFullModel);

    if(displayFeatures && kltpoly->hasEnoughPoints() && kltpoly->polygon->isVisible() && kltpoly->isTracked()) {
      kltpoly->displayPrimitive(I);
//         faces[i]->displayNormal(I);
    }
  }

  for(std::list<vpMbtDistanceKltCylinder*>::const_iterator it=kltCylinders.begin(); it!=kltCylinders.end(); ++it){
    kltPolyCylinder = *it;

    kltPolyCylinder->display(I,cMo_,camera,col,thickness,displayFullModel);

    if(displayFeatures && kltPolyCylinder->isTracked() && kltPolyCylinder->hasEnoughPoints())
      kltPolyCylinder->displayPrimitive(I);
  }

  for(std::list<vpMbtDistanceCircle*>::const_iterator it=circles_disp.begin(); it!=circles_disp.end(); ++it){
    (*it)->display(I, cMo_, camera, col, thickness);
  }

#ifdef VISP_HAVE_OGRE
  if(useOgre)
    faces.displayOgre(cMo_);
#endif
}
Example #24
0
// Color pictures SetBackGroundImage
void
vpAR::setImage(vpImage<vpRGBa> &I)
{

  if ((internal_width != I.getWidth()) ||
      (internal_height != I.getHeight()))
	{
	  vpERROR_TRACE("The image size is different from the view size ");
	  throw(vpException(vpException::dimensionError),"The image size is different from the view size") ;
	}


  background = true ;

  unsigned int k =0 ;
  for (unsigned int i=0 ; i <I.getHeight()  ; i++)
    {
      k=0;
      for (unsigned int j=0 ; j <I.getWidth()   ; j++)
	//le repere image open GL est en bas a gauche donc l'image serait inverse
	{
	  image_background[i*I.getWidth()*3+k+0]=I[I.getHeight()-i-1][j].R ;
	  image_background[i*I.getWidth()*3+k+1]=I[I.getHeight()-i-1][j].G ;
	  image_background[i*I.getWidth()*3+k+2]=I[I.getHeight()-i-1][j].B ;
	  k+=3;
      }
    }

}
Example #25
0
int Conversion::convert(const vpImage<float>&dmap, Eigen::MatrixXf & depthMat)
{
      int height = dmap.getHeight();
      int width  = dmap.getWidth();
      // i <-> height and j<->width
      depthMat.resize(height,width);
      for(int i = 0 ; i< height ; i++){
       for(int j=0 ; j< width ; j++){
              depthMat(i,j) = dmap[i][j];
        }
      }
      
      return 1;
}
Example #26
0
void RotationInvertedInterpolated(vpImage<uchar> I, vpImage<uchar> &D, float alpha) {
    // Si l'image de destination est plus grande que celle d'origine on décale le centre avec offseti et offsetj
    int offseti = (D.getHeight() - I.getHeight()) / 2;
    int offsetj = (D.getWidth() - I.getWidth()) / 2;
    int a = I.getHeight() / 2;
    int b = I.getWidth() /2;
    for (int fi=0; fi<D.getHeight(); fi++)
        for (int fj=0; fj<D.getWidth(); fj++) {
            int i = fi - offseti;
            int j = fj - offsetj;
            float ti = (i-a) * cos(alpha) + (j-b) * sin(alpha) + a;
            float tj = - (i-a) * sin(alpha) + (j-b) * cos(alpha) + b;
            if((tj>0) && (ti>0) && (ti<I.getHeight()) && (tj<I.getWidth())) D[i+offseti][j+offsetj] = interpole(I,ti,tj);
        }
}
Example #27
0
void RotationBasic(vpImage<uchar> I, vpImage<uchar> &D, float alpha) {
    // Si l'image de destination est plus grande que celle d'origine on décale le centre avec offseti et offsetj
    int offseti = (D.getHeight() - I.getHeight()) / 2;
    int offsetj = (D.getWidth() - I.getWidth()) / 2;
    int a = I.getHeight() / 2;
    int b = I.getWidth() /2;
    for (int i=0; i<I.getHeight(); i++)
        for (int j=0; j<I.getWidth(); j++) {
            int ti = (i-a) * cos(alpha) - (j-b) * sin(alpha) + a;
            int tj = (i-a) * sin(alpha) + (j-b) * cos(alpha) + b;
            ti+=offseti;
            tj+=offsetj;
            if((tj>0) && (ti>0) && (ti<D.getHeight()) && (ti<D.getWidth())) D[ti][tj] = I[i][j];
        }
}
Example #28
0
/*!
  \ingroup group_imgproc_histogram

  Adjust the contrast of a grayscale image by performing an histogram equalization.
  The intensity distribution is redistributed over the full [0 - 255] range such as the cumulative histogram
  distribution becomes linear.

  \param I : The grayscale image to apply histogram equalization.
*/
void vp::equalizeHistogram(vpImage<unsigned char> &I) {
  if(I.getWidth()*I.getHeight() == 0) {
    return;
  }

  //Calculate the histogram
  vpHistogram hist;
  hist.calculate(I);

  //Calculate the cumulative distribution function
  unsigned int cdf[256];
  unsigned int cdfMin = /*std::numeric_limits<unsigned int>::max()*/ UINT_MAX, cdfMax = 0;
  unsigned int minValue = /*std::numeric_limits<unsigned int>::max()*/ UINT_MAX, maxValue = 0;
  cdf[0] = hist[0];
  
  if(cdf[0] < cdfMin && cdf[0] > 0) {
    cdfMin = cdf[0];
    minValue = 0;
  }
  
  for(unsigned int i = 1; i < 256; i++) {
    cdf[i] = cdf[i-1] + hist[i];

    if(cdf[i] < cdfMin && cdf[i] > 0) {
      cdfMin = cdf[i];
      minValue = i;
    }

    if(cdf[i] > cdfMax) {
      cdfMax = cdf[i];
      maxValue = i;
    }
  }
  
  unsigned int nbPixels = I.getWidth()*I.getHeight();
  if(nbPixels == cdfMin) {
    //Only one brightness value in the image
    return;
  }

  //Construct the look-up table
  unsigned char lut[256];
  for(unsigned int x = minValue; x <= maxValue; x++) {
    lut[x] = vpMath::round( (cdf[x]-cdfMin) / (double) (nbPixels-cdfMin) * 255.0 );
  }

  I.performLut(lut);
}
Example #29
0
void
vpDiskGrabber::open(vpImage<unsigned char> &I)
{
  long first_number = getImageNumber();

  vpDEBUG_TRACE(2, "first %ld", first_number);

  acquire(I);

  setImageNumber(first_number);

  width = I.getWidth();
  height = I.getHeight();

  init = true;
}
/*!
  Acquire a color image from the active camera.

  \param I : Image data structure (RGBa image).

  \param timestamp : The acquisition timestamp.
*/
void vpFlyCaptureGrabber::acquire(vpImage<vpRGBa> &I, FlyCapture2::TimeStamp &timestamp)
{
  this->open();

  FlyCapture2::Error error;
  // Retrieve an image
  error = m_camera.RetrieveBuffer( &m_rawImage );
  if (error != FlyCapture2::PGRERROR_OK) {
    error.PrintErrorTrace();
    throw (vpException(vpException::fatalError,
                       "Cannot retrieve image for camera with guid 0x%lx",
                       m_guid) );
  }
  timestamp = m_rawImage.GetTimeStamp();

  // Create a converted image
  FlyCapture2::Image convertedImage;

  // Convert the raw image
  error = m_rawImage.Convert( FlyCapture2::PIXEL_FORMAT_RGBU, &convertedImage );
  if (error != FlyCapture2::PGRERROR_OK) {
    error.PrintErrorTrace();
    throw (vpException(vpException::fatalError,
                       "Cannot convert image for camera with guid 0x%lx",
                       m_guid) );
  }
  height = convertedImage.GetRows();
  width = convertedImage.GetCols();
  unsigned char *data = convertedImage.GetData();
  I.resize(height, width);
  unsigned int bps = convertedImage.GetBitsPerPixel();
  memcpy(I.bitmap, data, width*height*bps/8);
}