String AlvarObjectTracker::trackInPicture(Mat picture, String time){
	cout << "track0" << endl;
	IplImage image = picture;
	IplImage *image_tmp = &image;
	String result;
	cout << "track2" << endl;
	Mat imageUndistortedMat;
	vector<Point2f> pointBuf;
	undistort(picture, imageUndistortedMat, cameraMatrix, distCoeffs);
	Mat homography;
	if (!this->homographyFound){
		homography = findHomography(imageUndistortedMat, Size(9,6), float(2.5), cameraMatrix, distCoeffs, pointBuf);
		homographyInternal = homography.clone();
	} else {
		homography = homographyInternal.clone();
	}
	static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
	cout << "track3" << endl;
	marker_detector.SetMarkerSize(marker_size);
	marker_detector.Detect(image_tmp, &cam, true, true);
	cout << "track4" << endl;
	cout << "Markers: " << marker_detector.markers->size() << endl;
	if(homography.data != NULL && marker_detector.markers->size() > 0){
		cout << "Found sth" << endl;
		int markerIndx = 0;
		int cornerIndx = 0;
		float markerPosX0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosY0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		cornerIndx = 1;
		float markerPosXX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosYX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		cornerIndx = 2;
		float markerPosXY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosYY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		vector<Point2f> orgPoint;
		orgPoint.push_back(Point2f(markerPosX0, markerPosY0));
		orgPoint.push_back(Point2f(markerPosXX, markerPosYX));
		orgPoint.push_back(Point2f(markerPosXY, markerPosYY));
		vector<Point2f> udorgPoint(orgPoint);
		cout << "try undistort" << endl;
		undistortPoints(orgPoint, udorgPoint, cameraMatrix, distCoeffs);
		vector<Point2f> rorgPoint(udorgPoint);
		cout << "try perspectiveTransform" << endl;
		perspectiveTransform( udorgPoint, rorgPoint, homography);
		cout << "try output" << endl;
		for (int i = 0; i < 3; ++i) {
			result += boost::lexical_cast<std::string>(i) + " " +
						boost::lexical_cast<std::string>(rorgPoint[i].x) + " " +
						boost::lexical_cast<std::string>(rorgPoint[i].y) + " " +
						time + "\n";
		}
		result += "\n";
	}
	cout << "track5" << endl;
	return result;
};
示例#2
0
	// The initialisation function
	__declspec(dllexport) void alvar_init(int width, int height)
	{
		w = width;
		h = height;

		// Calibration. See manual and ALVAR internal samples how to calibrate your camera
		// Calibration will make the marker detecting and marker pose calculation more accurate.
		if (! camera.SetCalib("Calibrations/default_calib.xml", w, h)) {
			camera.SetRes(w, h);
		}
		
		// Set projection matrix as ALVAR recommends (based on the camera calibration)
		double p[16];
		camera.GetOpenglProjectionMatrix(p, w, h);

		//Initialize the multimarker system
		for(int i = 0; i < MARKER_COUNT; ++i)
			markerIdVector.push_back(i);

		// We make the initialization for MultiMarkerBundle using a fixed marker field (can be printed from MultiMarker.ppt)
		markerDetector.SetMarkerSize(CORNER_MARKER_SIZE);
		markerDetector.SetMarkerSizeForId(0, CENTRE_MARKER_SIZE);
		
		multiMarker = new alvar::MultiMarker(markerIdVector);
		
		alvar::Pose pose;
		pose.Reset();

		// Add the 5 markers
		multiMarker->PointCloudAdd(0, CENTRE_MARKER_SIZE, pose);
		
		pose.SetTranslation(-10, 6, 0);
		multiMarker->PointCloudAdd(1, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(10, 6, 0);
		multiMarker->PointCloudAdd(2, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(-10, -6, 0);
		multiMarker->PointCloudAdd(3, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(+10, -6, 0);
		multiMarker->PointCloudAdd(4, CORNER_MARKER_SIZE, pose);

		trackerStat.Reset();
	}
vector<Point2f> AlvarObjectTracker::trackInPicturePixels(Mat picture){
	vector<Point2f> res;
	cout << "track0" << endl;
		IplImage image = picture;
		IplImage *image_tmp = &image;
		String result;
		cout << "track2" << endl;
		Mat imageUndistortedMat;
		vector<Point2f> pointBuf;
		//undistort(picture, imageUndistortedMat, cameraMatrix, distCoeffs);
		static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
		cout << "track3" << endl;
		marker_detector.SetMarkerSize(marker_size);
		marker_detector.Detect(image_tmp, &cam, true, true);
		cout << "track4" << endl;
		if(marker_detector.markers->size() > 0){
			cout << "Found sth" << endl;
			int markerIndx = 0;
			int cornerIndx = 0;
			float markerPosX0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosY0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			cornerIndx = 1;
			float markerPosXX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosYX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			cornerIndx = 2;
			float markerPosXY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosYY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			vector<Point2f> orgPoint;
			orgPoint.push_back(Point2f(markerPosX0, markerPosY0));
			orgPoint.push_back(Point2f(markerPosXX, markerPosYX));
			orgPoint.push_back(Point2f(markerPosXY, markerPosYY));
			vector<Point2f> udorgPoint(orgPoint.size());
			cout << "try undistort" << endl;
			/*undistortPoints(orgPoint, udorgPoint, cameraMatrix, distCoeffs);
			res.push_back(Point2f(udorgPoint[0].x,udorgPoint[0].y));
			res.push_back(Point2f(udorgPoint[1].x,udorgPoint[1].y));
			res.push_back(Point2f(udorgPoint[2].x,udorgPoint[2].y));*/
			res.push_back(Point2f(orgPoint[0].x,orgPoint[0].y));
			res.push_back(Point2f(orgPoint[1].x,orgPoint[1].y));
			res.push_back(Point2f(orgPoint[2].x,orgPoint[2].y));
		}
	return res;
}
/**
 * @function videocallback
 * @brief Repeat at every captured frame
 */
void videocallback( IplImage *_img ) {

  // Flip the image if needed
  bool flip_image = (_img->origin? true:false);
  if (flip_image) {
    cvFlip(_img);
    _img->origin = !_img->origin;
  }
  
  // Setup the marker detector
  static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
  marker_detector.SetMarkerSize(gMarker_size); 

  // Perform detection
  marker_detector.Detect(_img, &gCam, true, true);
  
  // Draw recognized markers
  GlutViewer::DrawableClear();

  bool detected;
  detected = false;
  for( size_t j=0; j< marker_detector.markers->size(); j++ ) {
    int id = (*(marker_detector.markers))[j].GetId();   

    std::cout << "Detected marker with id: "<< id << std::endl;

    if( gObjID == id ) {
      
      std::cout << "Detected marker with id:"<<gObjID<< std::endl;
      alvar::Pose p = (*(marker_detector.markers))[j].pose;
      double transf[16];
      p.GetMatrixGL( transf, false);
      
      for( int col = 0; col < 4; ++col ) {
	for( int row = 0; row < 4; ++row ) {
	  std::cout << transf[col+row*4] << " ";
	}
	std::cout << std::endl;
      }
      
      std::cout << std::endl;
      
      double r = 1.0 - double(id+1)/32.0;
      double g = 1.0 - double(id*3%32+1)/32.0;
      double b = 1.0 - double(id*7%32+1)/32.0;
      d[j].SetColor(r, g, b);
      
      GlutViewer::DrawableAdd(&(d[j]));
      detected = true;
      break;
    }
      
  } // end of all markers checked

    if( detected == false ) {
      std::cout << "NO detected marker with id "<< gObjID<<"("<<std::endl;
    }

    // Put image back if it was flipped
    if (flip_image) {
      cvFlip(_img);
      _img->origin = !_img->origin;
    }

    usleep(1.0*1e6);
}
示例#5
0
// main
int main(int argc, char** argv)
{
	osg::ArgumentParser arguments(&argc, argv);

	// Let's use the convenience system from ALVAR for capturing.
	// We initialize Highgui through the CaptureFactory (see manual for other options like AVI)
	alvar::CaptureFactory *factory = alvar::CaptureFactory::instance();
    alvar::CaptureFactory::CaptureDeviceVector devices = factory->enumerateDevices("highgui");

    // Check to ensure that a device was found
    if (devices.size() > 0) {
        capture = factory->createCapture(devices.front());
    }

	// Capture is central feature, so if we fail, we get out of here.
	if (capture && capture->start()) {
	   
		// Let's capture one frame to get video resolution
		IplImage *tempImg = capture->captureImage();
		videoXRes = tempImg->width;
		videoYRes = tempImg->height;

		// Calibration. See manual and ALVAR internal samples how to calibrate your camera
		// Calibration will make the marker detecting and marker pose calculation more accurate.
		if (! camera.SetCalib("calib.xml", videoXRes, videoYRes)) {
			camera.SetRes(videoXRes, videoYRes);
		}

		//Create the osg::Image for the video
		videoImage = new osg::Image;

		//Create the osg::Image for the texture (marker hiding)
		texImage = new osg::Image;
		
		//IplImage for the texture generation.
		markerHiderImage=cvCreateImage(cvSize(64, 64), 8, 4);

		// construct the viewer
		viewer = new osgViewer::Viewer(arguments);
		// Let's use window size of the video (approximate).
		viewer->setUpViewInWindow (200, 200, videoXRes, videoYRes);
		// Viewport is the same
		viewer->getCamera()->setViewport(0,0,videoXRes,videoYRes);
		viewer->setLightingMode(osg::View::HEADLIGHT);

		// Attach our own event handler to the system so we can catch the resizing events
		viewer->addEventHandler(new CSimpleWndSizeHandler(videoXRes,videoYRes ));
		
		// Set projection matrix as ALVAR recommends (based on the camera calibration)
		double p[16];
		camera.GetOpenglProjectionMatrix(p,videoXRes,videoYRes);
		viewer->getCamera()->setProjectionMatrix(osg::Matrix(p));

		// Create main root for everything
		arRoot = new osg::Group;
		arRoot->setName("ALVAR stuff (c) VTT");
		
		// Init the video background class and add it to the graph
        videoBG.Init(videoXRes,videoYRes,(tempImg->origin?true:false));
		arRoot->addChild(videoBG.GetOSGGroup());

		// Create model switch and add it the to graph
		modelSwitch = new osg::Switch;
		arRoot->addChild(modelSwitch.get());

		// Create model transformation for the markers and add them under the switch
		mtForMarkerFive = new osg::MatrixTransform;
		mtForMarkerTen = new osg::MatrixTransform;
		modelSwitch->addChild(mtForMarkerFive.get());
		modelSwitch->addChild(mtForMarkerTen.get());

		// add the texture under the marker transformation node
		mtForMarkerFive->addChild(texOnMarker.GetDrawable());

		// All models off
		modelSwitch->setAllChildrenOff();

		// load the data (models).
		modelForMarkerFive = osgDB::readNodeFile("grid.osg");
		modelForMarkerTen = osgDB::readNodeFile("axes.osg");
	    
		// If loading ok, add models under the matrixtransformation nodes.
		if(modelForMarkerFive)
			mtForMarkerFive->addChild(modelForMarkerFive.get());

		if(modelForMarkerTen)
			mtForMarkerTen->addChild(modelForMarkerTen.get());
	    
		// Tell the ALVAR the markers' size (same to all)
		// You can also specify different size for each individual markers
		markerDetector.SetMarkerSize(MARKER_SIZE);

		// Set scene data 
		viewer->setSceneData(arRoot.get());

		// And start the main loop
		while(!viewer->done()){
			//Call the rendering function over and over again.
			renderer();
		}
	} 
	// Time to close the system
	if(capture){
		capture->stop();
		delete capture;
	}

	if(markerHiderImage)
		cvReleaseImage(&markerHiderImage);
	
	return 0; // bye bye. Happy coding!
}