Esempio n. 1
0
	void BazARTracker::update()
	{
		// Do not update with a null image
		if (m_imageptr == NULL) return;

		// Image format conversion for BAZAR
		for(int i=0; i<m_cparam->cparam.xsize*m_cparam->cparam.ysize*4; i++)
		{
			image->imageData[i] = m_imageptr[i];
		}
		
		// Image format change for processing color->gray
		if(image->nChannels == 4) 
		{
			cvCvtColor(image, gray, CV_RGBA2GRAY);
		} 
		else if(image->nChannels == 3) 
		{
			cvCvtColor(image, gray, CV_BGR2GRAY);
		} 
		else 
		{
			gray = image;
		}
		
		cvFlip(gray, gray);				// bazar uses hoizontially flipped input images
		
		if(getDebugMode()) cvShowImage("Gray", gray);		// show what bazar will see
		

		// if the trained planar surface is detected
		if (detector.detect(gray))  
		{
			// start on a new frame
			augment.Clear();

			// we have only 1 camera
			add_detected_homography(detector, augment);

			// bundle adjust
			augment.Accomodate(4, 1e-4);
		
			show_result(augment, image, &display);	// GL-projection matrix, if debug: render additional output
			
			if (getDebugMode()) cvShowImage("Result_BAZAR", display);

			(static_cast<BazARMarker*>(m_markerlist[0].get()))->update(matCameraRT4_4);
		} 
		else 
		{
			if (getDebugMode()) cvShowImage("Result_BAZAR", display);
			
			(static_cast<BazARMarker*>(m_markerlist[0].get()))->update(NULL);
		}
	}
Esempio n. 2
0
bool photometric_calibration(CalibModel &model, CvCapture *capture, 
			     int nbImages, bool cache)
{

  if (cache) model.map.load();

  const char *win = "BazAR";

  IplImage*gray=0;

  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
  cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE);

  IplImage* frame = 0;
  IplImage* display=cvCloneImage(cvQueryFrame(capture));

  int nbHomography =0;
  LightCollector lc(model.map.reflc);
  IplImage *lightmap = cvCreateImage(cvGetSize(model.map.map.getIm()), IPL_DEPTH_8U, 
				     lc.avgChannels);
  while (1)
    {
      // acquire image
      frame = cvQueryFrame( capture );
      /*
	if (frame) cvReleaseImage(&frame);
	frame = cvLoadImage("model.bmp",1);
      */
      if( !frame )
	break;

      // convert it to gray levels, if required
      if (frame->nChannels >1) {
	if( !gray ) 
	  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
	cvCvtColor(frame, gray, CV_RGB2GRAY);
      } else {
	gray = frame;
      }

      // run the detector
      if (model.detector.detect(gray)) {
	// 2d homography found
	nbHomography++;

	// Computes 3D pose and surface normal
	model.augm.Clear();
	add_detected_homography(model.detector, model.augm);
	model.augm.Accomodate(4, 1e-4);
	CvMat *mat = model.augm.GetObjectToWorld();
	float normal[3];
	for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
	cvReleaseMat(&mat);

	// average pixels over triangles
	lc.averageImage(frame,model.detector.H);

	// add observations
	if (!model.map.isReady())
	  model.map.addNormal(normal, lc, 0);

	if (!model.map.isReady() && nbHomography >= nbImages) {
	  if (model.map.computeLightParams()) {
	    model.map.save();
	    const float *gain = model.map.getGain(0);
	    const float *bias = model.map.getBias(0);
	    cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl;
	    cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl;
	  }
	} 
      } 
		
      if (model.map.isReady()) {
	double min, max;
	IplImage *map = model.map.map.getIm();
	cvSetImageCOI(map, 2);
	cvMinMaxLoc(map, &min, &max);
	cvSetImageCOI(map, 0);
	assert(map->nChannels == lightmap->nChannels);
	cvConvertScale(map, lightmap, 128, 0);
	cvShowImage("LightMap", lightmap);
	augment_scene(model, frame, display);
      } else {
	cvCopy(frame,display);
	if (model.detector.object_is_detected)
	  lc.drawGrid(display, model.detector.H);
      }

      cvShowImage(win, display);

      int k=cvWaitKey(10);
      if (k=='q' || k== 27)
	break;
    }

  cvReleaseImage(&lightmap);
  cvReleaseImage(&display);
  if (frame->nChannels > 1)
    cvReleaseImage(&gray);
  return 0;
}
Esempio n. 3
0
bool geometric_calibration(CalibModel &model, CvCapture *capture, bool cache)
{

  if (cache && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
    return true;
  }

  const char *win = "BazAR";

  IplImage*gray=0;

  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);

  CamCalibration calib;

  IplImage* frame = cvQueryFrame(capture);
  calib.AddCamera(frame->width, frame->height);
  IplImage* display=cvCloneImage(frame);

  bool success=false;

  int nbHomography =0;
  while (1)
    {
      // acquire image
      frame = cvQueryFrame( capture );
      if( !frame )
	break;

      // convert it to gray levels, if required
      if (frame->nChannels >1) {
	if( !gray ) 
	  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
	cvCvtColor(frame, gray, CV_RGB2GRAY);
      } else {
	gray = frame;
      }

      // run the detector
      if (model.detector.detect(gray)) {
	add_detected_homography(model.detector, calib);
	nbHomography++;
	cout << nbHomography << " homographies.\n";
	if (nbHomography >=70) {
	  if (calib.Calibrate(
			      50, // max hom
			      2,   // random
			      3,
			      3,   // padding ratio 1/2
			      0,
			      0,
			      0.0078125,	//alpha
			      0.9,		//beta
			      0.001953125,//gamma
			      12,	  // iter
			      0.05, //eps
			      3   //postfilter eps
			      )) 
	    {
	      calib.PrintOptimizedResultsToFile1();
	      success=true;
	      break;
	    }
	}
      }
      show_result(model.detector, frame, display);
      cvShowImage(win, display);

      int k=cvWaitKey(10);
      if (k=='q' || k== 27)
	break;
    }

  cvReleaseImage(&display);
  if (frame->nChannels > 1)
    cvReleaseImage(&gray);
  if (success && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
    return true;
  }
  return false;
}