Пример #1
0
	// This function returns the number of detected markers.
	// It uses just OpenCV capture, but should be removed soon.
	__declspec(dllexport) int alvar_number_of_detected_markers(int* imageData)
	{
		alvar::Pose pose;

		image->imageData = tmp;

		// Detect all the markers from the frame
		markerDetector.Detect(image, &camera, false, false);
		trackerStat.Track(image);

		// Detect the markers
		if (markerDetector.Detect(image, &camera, false, false)) {
			// if ok, we have field in sight
			// Update the data
			multiMarker->Update(markerDetector.markers, &camera, pose);

			trackerStat.Reset();
		}
	
		// Clean
		delete tmp;
		delete image;

		return markerDetector.markers->size();
	}
String AlvarObjectTracker::trackInPicture(Mat picture, String time){
	cout << "track0" << endl;
	IplImage image = picture;
	IplImage *image_tmp = &image;
	String result;
	cout << "track2" << endl;
	Mat imageUndistortedMat;
	vector<Point2f> pointBuf;
	undistort(picture, imageUndistortedMat, cameraMatrix, distCoeffs);
	Mat homography;
	if (!this->homographyFound){
		homography = findHomography(imageUndistortedMat, Size(9,6), float(2.5), cameraMatrix, distCoeffs, pointBuf);
		homographyInternal = homography.clone();
	} else {
		homography = homographyInternal.clone();
	}
	static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
	cout << "track3" << endl;
	marker_detector.SetMarkerSize(marker_size);
	marker_detector.Detect(image_tmp, &cam, true, true);
	cout << "track4" << endl;
	cout << "Markers: " << marker_detector.markers->size() << endl;
	if(homography.data != NULL && marker_detector.markers->size() > 0){
		cout << "Found sth" << endl;
		int markerIndx = 0;
		int cornerIndx = 0;
		float markerPosX0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosY0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		cornerIndx = 1;
		float markerPosXX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosYX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		cornerIndx = 2;
		float markerPosXY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
		float markerPosYY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
		vector<Point2f> orgPoint;
		orgPoint.push_back(Point2f(markerPosX0, markerPosY0));
		orgPoint.push_back(Point2f(markerPosXX, markerPosYX));
		orgPoint.push_back(Point2f(markerPosXY, markerPosYY));
		vector<Point2f> udorgPoint(orgPoint);
		cout << "try undistort" << endl;
		undistortPoints(orgPoint, udorgPoint, cameraMatrix, distCoeffs);
		vector<Point2f> rorgPoint(udorgPoint);
		cout << "try perspectiveTransform" << endl;
		perspectiveTransform( udorgPoint, rorgPoint, homography);
		cout << "try output" << endl;
		for (int i = 0; i < 3; ++i) {
			result += boost::lexical_cast<std::string>(i) + " " +
						boost::lexical_cast<std::string>(rorgPoint[i].x) + " " +
						boost::lexical_cast<std::string>(rorgPoint[i].y) + " " +
						time + "\n";
		}
		result += "\n";
	}
	cout << "track5" << endl;
	return result;
};
Пример #3
0
/*
The main rendering function.
*/
void renderer()
{
	// Capture the image
	IplImage *image = capture->captureImage();

	// Check if we need to change image origin and is so, flip the image.
	bool flip_image = (image->origin?true:false);
	if (flip_image) {
		cvFlip(image);
		image->origin = !image->origin;
	}

	// Detect all the markers from the frame
	markerDetector.Detect(image, &camera, false, false);
	
	// Loop throught the list of detected markers
	for (size_t i=0; i<markerDetector.markers->size(); i++) {

		// Get the ID of the marker
		int id = (*(markerDetector.markers))[i].GetId();

		// Get the marker's pose (transformation matrix)
		double temp_mat[16];
		alvar::Pose p = (*(markerDetector.markers))[i].pose;
		p.GetMatrixGL(temp_mat);

	
		if( id == 5){ //Marker 5 is visible
			//Switch the 5 on
			modelSwitch->setChildValue(mtForMarkerFive, 1);
			// Update the matrix transformation
			mtForMarkerFive->setMatrix(osg::Matrix(temp_mat));
		}
		else if(id == 10){ //Marker 10 is visible
			//Switch the 10 on
			modelSwitch->setChildValue(mtForMarkerTen, 1);
			// Update the matrix transformation
			mtForMarkerTen->setMatrix(osg::Matrix(temp_mat));
		}
	}

	// In case we flipped the image, it's time to flip it back 
	if (flip_image) {
		cvFlip(image);
		image->origin = !image->origin;
	}

	// "copy" the raw image data from IplImage to the Osg::Image
	videoImage->setImage(image->width, image->height, 1, 4, GL_BGR, GL_UNSIGNED_BYTE, (unsigned char*)image->imageData, osg::Image::NO_DELETE);
	if(videoImage.valid()){
		// Set the latest frame to the view as an background texture
		videoBG.SetBGImage(videoImage.get());
	}
	// Draw the frame
	viewer->frame();
	
	//Swiths all the models of until next frame
	modelSwitch->setAllChildrenOff();
}
Пример #4
0
	// The initialisation function
	__declspec(dllexport) void alvar_init(int width, int height)
	{
		w = width;
		h = height;

		// Calibration. See manual and ALVAR internal samples how to calibrate your camera
		// Calibration will make the marker detecting and marker pose calculation more accurate.
		if (! camera.SetCalib("Calibrations/default_calib.xml", w, h)) {
			camera.SetRes(w, h);
		}
		
		// Set projection matrix as ALVAR recommends (based on the camera calibration)
		double p[16];
		camera.GetOpenglProjectionMatrix(p, w, h);

		//Initialize the multimarker system
		for(int i = 0; i < MARKER_COUNT; ++i)
			markerIdVector.push_back(i);

		// We make the initialization for MultiMarkerBundle using a fixed marker field (can be printed from MultiMarker.ppt)
		markerDetector.SetMarkerSize(CORNER_MARKER_SIZE);
		markerDetector.SetMarkerSizeForId(0, CENTRE_MARKER_SIZE);
		
		multiMarker = new alvar::MultiMarker(markerIdVector);
		
		alvar::Pose pose;
		pose.Reset();

		// Add the 5 markers
		multiMarker->PointCloudAdd(0, CENTRE_MARKER_SIZE, pose);
		
		pose.SetTranslation(-10, 6, 0);
		multiMarker->PointCloudAdd(1, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(10, 6, 0);
		multiMarker->PointCloudAdd(2, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(-10, -6, 0);
		multiMarker->PointCloudAdd(3, CORNER_MARKER_SIZE, pose);
		
		pose.SetTranslation(+10, -6, 0);
		multiMarker->PointCloudAdd(4, CORNER_MARKER_SIZE, pose);

		trackerStat.Reset();
	}
vector<Point2f> AlvarObjectTracker::trackInPicturePixels(Mat picture){
	vector<Point2f> res;
	cout << "track0" << endl;
		IplImage image = picture;
		IplImage *image_tmp = &image;
		String result;
		cout << "track2" << endl;
		Mat imageUndistortedMat;
		vector<Point2f> pointBuf;
		//undistort(picture, imageUndistortedMat, cameraMatrix, distCoeffs);
		static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
		cout << "track3" << endl;
		marker_detector.SetMarkerSize(marker_size);
		marker_detector.Detect(image_tmp, &cam, true, true);
		cout << "track4" << endl;
		if(marker_detector.markers->size() > 0){
			cout << "Found sth" << endl;
			int markerIndx = 0;
			int cornerIndx = 0;
			float markerPosX0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosY0 = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			cornerIndx = 1;
			float markerPosXX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosYX = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			cornerIndx = 2;
			float markerPosXY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].x;
			float markerPosYY = (*(marker_detector.markers))[markerIndx].marker_corners_img[cornerIndx].y;
			vector<Point2f> orgPoint;
			orgPoint.push_back(Point2f(markerPosX0, markerPosY0));
			orgPoint.push_back(Point2f(markerPosXX, markerPosYX));
			orgPoint.push_back(Point2f(markerPosXY, markerPosYY));
			vector<Point2f> udorgPoint(orgPoint.size());
			cout << "try undistort" << endl;
			/*undistortPoints(orgPoint, udorgPoint, cameraMatrix, distCoeffs);
			res.push_back(Point2f(udorgPoint[0].x,udorgPoint[0].y));
			res.push_back(Point2f(udorgPoint[1].x,udorgPoint[1].y));
			res.push_back(Point2f(udorgPoint[2].x,udorgPoint[2].y));*/
			res.push_back(Point2f(orgPoint[0].x,orgPoint[0].y));
			res.push_back(Point2f(orgPoint[1].x,orgPoint[1].y));
			res.push_back(Point2f(orgPoint[2].x,orgPoint[2].y));
		}
	return res;
}
/**
 * @function videocallback
 * @brief Repeat at every captured frame
 */
void videocallback( IplImage *_img ) {

  // Flip the image if needed
  bool flip_image = (_img->origin? true:false);
  if (flip_image) {
    cvFlip(_img);
    _img->origin = !_img->origin;
  }
  
  // Setup the marker detector
  static alvar::MarkerDetector<alvar::MarkerData> marker_detector;
  marker_detector.SetMarkerSize(gMarker_size); 

  // Perform detection
  marker_detector.Detect(_img, &gCam, true, true);
  
  // Draw recognized markers
  GlutViewer::DrawableClear();

  bool detected;
  detected = false;
  for( size_t j=0; j< marker_detector.markers->size(); j++ ) {
    int id = (*(marker_detector.markers))[j].GetId();   

    std::cout << "Detected marker with id: "<< id << std::endl;

    if( gObjID == id ) {
      
      std::cout << "Detected marker with id:"<<gObjID<< std::endl;
      alvar::Pose p = (*(marker_detector.markers))[j].pose;
      double transf[16];
      p.GetMatrixGL( transf, false);
      
      for( int col = 0; col < 4; ++col ) {
	for( int row = 0; row < 4; ++row ) {
	  std::cout << transf[col+row*4] << " ";
	}
	std::cout << std::endl;
      }
      
      std::cout << std::endl;
      
      double r = 1.0 - double(id+1)/32.0;
      double g = 1.0 - double(id*3%32+1)/32.0;
      double b = 1.0 - double(id*7%32+1)/32.0;
      d[j].SetColor(r, g, b);
      
      GlutViewer::DrawableAdd(&(d[j]));
      detected = true;
      break;
    }
      
  } // end of all markers checked

    if( detected == false ) {
      std::cout << "NO detected marker with id "<< gObjID<<"("<<std::endl;
    }

    // Put image back if it was flipped
    if (flip_image) {
      cvFlip(_img);
      _img->origin = !_img->origin;
    }

    usleep(1.0*1e6);
}
Пример #7
0
// main
int main(int argc, char** argv)
{
	osg::ArgumentParser arguments(&argc, argv);

	// Let's use the convenience system from ALVAR for capturing.
	// We initialize Highgui through the CaptureFactory (see manual for other options like AVI)
	alvar::CaptureFactory *factory = alvar::CaptureFactory::instance();
    alvar::CaptureFactory::CaptureDeviceVector devices = factory->enumerateDevices("highgui");

    // Check to ensure that a device was found
    if (devices.size() > 0) {
        capture = factory->createCapture(devices.front());
    }

	// Capture is central feature, so if we fail, we get out of here.
	if (capture && capture->start()) {
	   
		// Let's capture one frame to get video resolution
		IplImage *tempImg = capture->captureImage();
		videoXRes = tempImg->width;
		videoYRes = tempImg->height;

		// Calibration. See manual and ALVAR internal samples how to calibrate your camera
		// Calibration will make the marker detecting and marker pose calculation more accurate.
		if (! camera.SetCalib("calib.xml", videoXRes, videoYRes)) {
			camera.SetRes(videoXRes, videoYRes);
		}

		//Create the osg::Image for the video
		videoImage = new osg::Image;

		//Create the osg::Image for the texture (marker hiding)
		texImage = new osg::Image;
		
		//IplImage for the texture generation.
		markerHiderImage=cvCreateImage(cvSize(64, 64), 8, 4);

		// construct the viewer
		viewer = new osgViewer::Viewer(arguments);
		// Let's use window size of the video (approximate).
		viewer->setUpViewInWindow (200, 200, videoXRes, videoYRes);
		// Viewport is the same
		viewer->getCamera()->setViewport(0,0,videoXRes,videoYRes);
		viewer->setLightingMode(osg::View::HEADLIGHT);

		// Attach our own event handler to the system so we can catch the resizing events
		viewer->addEventHandler(new CSimpleWndSizeHandler(videoXRes,videoYRes ));
		
		// Set projection matrix as ALVAR recommends (based on the camera calibration)
		double p[16];
		camera.GetOpenglProjectionMatrix(p,videoXRes,videoYRes);
		viewer->getCamera()->setProjectionMatrix(osg::Matrix(p));

		// Create main root for everything
		arRoot = new osg::Group;
		arRoot->setName("ALVAR stuff (c) VTT");
		
		// Init the video background class and add it to the graph
        videoBG.Init(videoXRes,videoYRes,(tempImg->origin?true:false));
		arRoot->addChild(videoBG.GetOSGGroup());

		// Create model switch and add it the to graph
		modelSwitch = new osg::Switch;
		arRoot->addChild(modelSwitch.get());

		// Create model transformation for the markers and add them under the switch
		mtForMarkerFive = new osg::MatrixTransform;
		mtForMarkerTen = new osg::MatrixTransform;
		modelSwitch->addChild(mtForMarkerFive.get());
		modelSwitch->addChild(mtForMarkerTen.get());

		// add the texture under the marker transformation node
		mtForMarkerFive->addChild(texOnMarker.GetDrawable());

		// All models off
		modelSwitch->setAllChildrenOff();

		// load the data (models).
		modelForMarkerFive = osgDB::readNodeFile("grid.osg");
		modelForMarkerTen = osgDB::readNodeFile("axes.osg");
	    
		// If loading ok, add models under the matrixtransformation nodes.
		if(modelForMarkerFive)
			mtForMarkerFive->addChild(modelForMarkerFive.get());

		if(modelForMarkerTen)
			mtForMarkerTen->addChild(modelForMarkerTen.get());
	    
		// Tell the ALVAR the markers' size (same to all)
		// You can also specify different size for each individual markers
		markerDetector.SetMarkerSize(MARKER_SIZE);

		// Set scene data 
		viewer->setSceneData(arRoot.get());

		// And start the main loop
		while(!viewer->done()){
			//Call the rendering function over and over again.
			renderer();
		}
	} 
	// Time to close the system
	if(capture){
		capture->stop();
		delete capture;
	}

	if(markerHiderImage)
		cvReleaseImage(&markerHiderImage);
	
	return 0; // bye bye. Happy coding!
}
Пример #8
0
/*
	Get pose from marker field.
*/
double GetMultiMarkerPose(IplImage *image, alvar::Camera *cam, alvar::Pose &pose)
{
    static bool init = true;
	bool add_measurement = curr_meas<20;
	bool optimize = !add_measurement && !optimize_done;

    if (init) 
	{
        std::cout << "Using manual multimarker approach with MultiMarkerInitializer and" << std::endl;
        std::cout << "MultiMarkerBundle. Point the camera towards the markers 0-" << nof_markers-1 << std::endl;
		std::cout << " (marker " << pose_marker << " is required, others are optional). " << std::endl;
        std::cout << "20 frames will be acquired." << std::endl;
        init = false;
		std::vector<int> id_vector;
		id_vector.push_back(pose_marker);
        for(int i = 0; i < nof_markers; ++i)
			if( i!=pose_marker )
				id_vector.push_back(i);
        // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer
        // Each marker needs to be visible in at least two images and at most 32 image are used.
		multi_marker_init = new alvar::MultiMarkerInitializer(id_vector, 2, 32);
        pose.Reset();
        multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose);
        multi_marker_bundle = new alvar::MultiMarkerBundle(id_vector);
    }

    double error = -1;
    if (!optimize_done) 
	{
        if (marker_detector.Detect(image, cam, true, visualize, 0.0)) 
		{
            if (!visualize)
                error = multi_marker_init->Update(marker_detector.markers, cam, pose, image);
            else
                error = multi_marker_init->Update(marker_detector.markers, cam, pose);
        }
    } 
	else 
	{
        if (marker_detector.Detect(image, cam, true, visualize, 0.0))
		{
            if (!visualize)
                error = multi_marker_bundle->Update(marker_detector.markers, cam, pose, image);
            else 
                error = multi_marker_bundle->Update(marker_detector.markers, cam, pose);
            if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, image) > 0) &&
                (marker_detector.DetectAdditional(image, cam, !visualize) > 0))
            {
                if (!visualize)
                    error = multi_marker_bundle->Update(marker_detector.markers, cam, pose, image);
                else
                    error = multi_marker_bundle->Update(marker_detector.markers, cam, pose);
            }
        }
    }

    if (add_measurement && every_20th>=20) 
	{
        if (marker_detector.markers->size() >= 2) 
		{
            std::cout << "Adding measurement... (" << curr_meas+1 << "/20) " << std::endl;
            multi_marker_init->MeasurementsAdd(marker_detector.markers);
            add_measurement = false;
			curr_meas++;
        }
		every_20th = 0;
    }
	every_20th++;

    if (optimize) 
	{
        std::cout << "Initializing optimization..." << std::endl;
        if (!multi_marker_init->Initialize(cam)) {
            std::cout << "Initialization failed, this config needs more measurements." << std::endl;

        } 
		else 
		{
            // Reset the bundle adjuster.
            multi_marker_bundle->Reset();
            multi_marker_bundle->MeasurementsReset();
            // Copy all measurements into the bundle adjuster.
            for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) 
			{
				alvar::Pose pose;
                multi_marker_init->getMeasurementPose(i, cam, pose);
				const std::vector<alvar::MultiMarkerInitializer::MarkerMeasurement> markers 
                    = multi_marker_init->getMeasurementMarkers(i);
                multi_marker_bundle->MeasurementsAdd(&markers, pose);
            }
            // Initialize the bundle adjuster with initial marker poses.
            multi_marker_bundle->PointCloudCopy(multi_marker_init);
            std::cout << "Optimizing..." << std::endl;
            std::cout << "(this may take more than a minute, please wait...)" << std::endl;
            if (multi_marker_bundle->Optimize(cam, 0.01, 20)) 
			{
                std::cout << "Optimizing done" << std::endl;
                optimize_done = true;
				visualize = false;
            } 
			else 
			{
                std::cout << "Optimizing FAILED!" << std::endl;
            }
        }
        optimize = false;
    }
    return error;
}
Пример #9
0
	// This function detects markers in the image passed from Unity
	__declspec(dllexport) void alvar_process(int* imageData, double* transMatrix)
	{
		alvar::Pose pose;

		// Initialisation of the image
		image = new IplImage();
		image->nSize = sizeof(IplImage);
		image->ID = 0;
		image->nChannels = 3;
		image->alphaChannel = 0;
		image->depth = IPL_DEPTH_8U;

		memcpy(&image->colorModel, "RGB", sizeof(char) * 4);
		memcpy(&image->channelSeq, "RGB", sizeof(char) * 4);
		image->dataOrder = 0;

		image->origin = 0;
		image->align = 4;
		image->width = w;
		image->height = h;

		image->roi = NULL;
		image->maskROI = NULL;
		image->imageId = NULL;
		image->tileInfo = NULL;
		image->widthStep = w * 3;
		image->imageSize = h * image->widthStep;

		image->imageDataOrigin = NULL;

		int n = w * h * 3;
		tmp = new char[n];

		// We put the image from Unity in an IplImage
		// The Unity image is in RGB, and the OpenCV is BGR.
		// Moreover, the Unity one begins in the corner lower-left
		// while the OpenCV one begins in the upper-left corner.
		for (int i = 0; i < (w*3); ++i)
			for (int j = 0; j < h; ++j)
				tmp[i + j * (w*3)] = (char)imageData[i + (h - j - 1) * (w*3)];

		// We put the data in the image
		image->imageData = tmp;

		// Detect all the markers from the frame
		markerDetector.Detect(image, &camera, false, false);
		trackerStat.Track(image);

		// Detect the markers
		if (markerDetector.Detect(image, &camera, false, false)) {
			// if ok, we have field in sight
			// Update the data
			multiMarker->Update(markerDetector.markers, &camera, pose);

			// get the field's matrix
			pose.GetMatrixGL(transMatrix, false);
			trackerStat.Reset();
		}
	
		// Clean
		delete tmp;
		delete image;
	}