void calcLine(Point& centroid,Vector& axis) // Returns the least-squares line
		{
		/* Calculate the processed points' centroid: */
		centroid=pca.calcCentroid();
		
		/* Calculate the point set's covariance matrix: */
		pca.calcCovariance();
		
		/* Calculate the covariance matrix' eigenvalues: */
		double evs[3];
		pca.calcEigenvalues(evs);
		
		/* Get the "longest" eigenvector: */
		axis=pca.calcEigenvector(evs[0]);
		};
	/* Methods: */
	void operator()(const LidarPoint& lp) // Process the given LiDAR point
		{
		/* Add the node point to the bounding box: */
		bb.addPoint(lp);
		
		/* Add the point to the PCA calculator: */
		pca.accumulatePoint(lp);
		};
void DepthCorrectionTool::buttonCallback(int buttonSlotIndex,Vrui::InputDevice::ButtonCallbackData* cbData)
	{
	if(cbData->newButtonState)
		{
		if(buttonSlotIndex==0)
			{
			/* Add a new averaged depth frame and calculate the best-fitting plane: */
			DepthFrame df;
			df.frame=Kinect::FrameBuffer(application->depthFrameSize[0],application->depthFrameSize[1],application->depthFrameSize[0]*application->depthFrameSize[1]*sizeof(float));
			float foregroundCutoff=float(application->averageNumFrames)*0.5f;
			float* afdPtr=application->averageFrameDepth;
			float* affPtr=application->averageFrameForeground;
			float* dfPtr=static_cast<float*>(df.frame.getBuffer());
			typedef Geometry::PCACalculator<3>::Point PPoint;
			typedef Geometry::PCACalculator<3>::Vector PVector;
			Geometry::PCACalculator<3> pca;
			for(unsigned int y=0;y<application->depthFrameSize[1];++y)
				for(unsigned int x=0;x<application->depthFrameSize[0];++x,++afdPtr,++affPtr,++dfPtr)
					{
					if(*affPtr>=foregroundCutoff)
						{
						/* Calculate the average depth value: */
						*dfPtr=(*afdPtr)/(*affPtr);
						
						/* Add the depth pixel to the PCA calculator: */
						pca.accumulatePoint(PPoint(double(x)+0.5,double(y)+0.5,double(*dfPtr)));
						}
					else
						*dfPtr=2047.0f;
					}
			
			/* Calculate the best-fitting plane: */
			PPoint centroid=pca.calcCentroid();
			pca.calcCovariance();
			double evs[3];
			pca.calcEigenvalues(evs);
			PVector normal=pca.calcEigenvector(evs[2]);
			df.plane=Plane(normal,centroid);
			depthFrames.push_back(df);
			}
		else
			{
			/* Calculate the per-pixel affine depth correction coefficients: */
			Kinect::FrameSource::PixelDepthCorrection* coefficients=new Kinect::FrameSource::PixelDepthCorrection[application->depthFrameSize[1]*application->depthFrameSize[0]];
			Kinect::FrameSource::PixelDepthCorrection* cPtr=coefficients;
			unsigned int pixelOffset=0;
			for(unsigned int y=0;y<application->depthFrameSize[1];++y)
				{
				for(unsigned int x=0;x<application->depthFrameSize[0];++x,++cPtr,++pixelOffset)
					{
					/* Build the least-squares linear regression system: */
					Math::Matrix ata(2,2,0.0);
					Math::Matrix atb(2,1,0.0);
					unsigned int numFrames=0;
					for(std::vector<DepthFrame>::iterator dfIt=depthFrames.begin();dfIt!=depthFrames.end();++dfIt)
						{
						double actual=double(static_cast<float*>(dfIt->frame.getBuffer())[pixelOffset]);
						if(actual!=2047.0)
							{
							ata(0,0)+=actual*actual;
							ata(0,1)+=actual;
							ata(1,0)+=actual;
							ata(1,1)+=1.0;
							double expected=(dfIt->plane.getOffset()-(double(x)+0.5)*dfIt->plane.getNormal()[0]-(double(y)+0.5)*dfIt->plane.getNormal()[1])/dfIt->plane.getNormal()[2];
							atb(0)+=actual*expected;
							atb(1)+=expected;
							++numFrames;
							}
						}
					
					if(numFrames>=2)
						{
						/* Solve for the regression coefficients: */
						Math::Matrix x=atb/ata;
						cPtr->scale=float(x(0));
						cPtr->offset=float(x(1));
						}
					else
						{
						/* Use identity correction if the pixel is underdetermined: */
						cPtr->scale=1.0f;
						cPtr->offset=0.0f;
						}
					}
				}
			
			/* Save the depth correction image: */
			std::string depthCorrectionFileName=KINECT_CONFIG_DIR;
			depthCorrectionFileName.push_back('/');
			depthCorrectionFileName.append(KINECT_CAMERA_DEPTHCORRECTIONFILENAMEPREFIX);
			depthCorrectionFileName.push_back('-');
			depthCorrectionFileName.append(application->camera->getSerialNumber());
			depthCorrectionFileName.append(".dat");
			std::cout<<"Writing depth correction file "<<depthCorrectionFileName<<std::endl;
			IO::FilePtr depthCorrectionFile(Vrui::openFile(depthCorrectionFileName.c_str(),IO::File::WriteOnly));
			depthCorrectionFile->setEndianness(Misc::LittleEndian);
			cPtr=coefficients;
			for(unsigned int y=0;y<application->depthFrameSize[1];++y)
				for(unsigned int x=0;x<application->depthFrameSize[0];++x,++cPtr)
					{
					depthCorrectionFile->write<float>(cPtr->scale);
					depthCorrectionFile->write<float>(cPtr->offset);
					}
			
			/* Clean up: */
			delete[] coefficients;
			}
		}
	}
ModelTracker::Transform ModelTracker::epnp(const ModelTracker::ImgPoint imagePoints[])
	{
	/*********************************************************************
	Step 1: Calculate four control points enveloping the model points by
	running Principal Component Analysis on the set of model points.
	*********************************************************************/
	
	Point cps[4]; // The four control points
	Geometry::Matrix<Scalar,3,3> cpm; // The matrix to calculate barycentric control point weights for model points
	
	Geometry::PCACalculator<3> pca;
	for(unsigned int mpi=0;mpi<numModelPoints;++mpi)
		pca.accumulatePoint(modelPoints[mpi]);
	
	/* First control point is model point set's centroid: */
	cps[0]=Point(pca.calcCentroid());
	
	/* Next three control points are aligned with the model point set's principal axes: */
	pca.calcCovariance();
	double pcaEvals[3];
	pca.calcEigenvalues(pcaEvals);
	Vector pcaEvecs[3];
	for(int i=0;i<3;++i)
		pcaEvecs[i]=Vector(pca.calcEigenvector(pcaEvals[i]));
	if((pcaEvecs[0]^pcaEvecs[1])*pcaEvecs[2]<0.0)
		{
		// std::cout<<"World control points are left-handed!"<<std::endl;
		pcaEvecs[2]*=Scalar(-1);
		}
	for(int i=0;i<3;++i)
		for(int j=0;j<3;++j)
			cpm(i,j)=pcaEvecs[i][j];
	Transform worldToModel(Vector(cpm*(Point::origin-cps[0])),Transform::Rotation::fromMatrix(cpm));
	for(int i=0;i<3;++i)
		{
		Scalar scale=Scalar(Math::sqrt(pcaEvals[i])); // Scale principal components to the model
		cps[1+i]=cps[0]+pcaEvecs[i]*scale; // Should add a check for zero Eigenvalue here
		
		/* Calculate the inverse control point matrix directly, as it's orthogonal: */
		for(int j=0;j<3;++j)
			cpm(i,j)/=scale;
		}
	
	#if EPNP_DEBUG
	std::cout<<"Principal components: "<<Math::sqrt(pcaEvals[0])<<", "<<Math::sqrt(pcaEvals[1])<<", "<<Math::sqrt(pcaEvals[2])<<std::endl;
	std::cout<<"Control points: "<<cps[0]<<", "<<cps[1]<<", "<<cps[2]<<", "<<cps[3]<<std::endl;
	#endif
	
	/*********************************************************************
	Step 2: Calculate the linear system M^T*M.
	*********************************************************************/
	
	Math::Matrix mtm(12,12,0.0);
	const Projection::Matrix& pm=projection.getMatrix();
	Scalar fu=pm(0,0);
	Scalar sk=pm(0,1);
	Scalar uc=pm(0,2);
	Scalar fv=pm(1,1);
	Scalar vc=pm(1,2);
	for(unsigned int mpi=0;mpi<numModelPoints;++mpi)
		{
		/* Calculate the model point's control point weights: */
		Vector mpc=modelPoints[mpi]-cps[0];
		Scalar alphai[4];
		for(int i=0;i<3;++i)
			alphai[1+i]=cpm(i,0)*mpc[0]+cpm(i,1)*mpc[1]+cpm(i,2)*mpc[2];
		alphai[0]=Scalar(1)-alphai[1]-alphai[2]-alphai[3];
		
		/* Calculate the coefficients of the model point / image point association's two linear equations: */
		double eqs[2][12];
		for(int i=0;i<4;++i)
			{
			/* Equation for image point's u coordinate: */
			eqs[0][i*3+0]=alphai[i]*fu;
			eqs[0][i*3+1]=alphai[i]*sk;
			eqs[0][i*3+2]=alphai[i]*(uc-imagePoints[mpi][0]);
			
			/* Equation for image point's v coordinate: */
			eqs[1][i*3+0]=0.0;
			eqs[1][i*3+1]=alphai[i]*fv;
			eqs[1][i*3+2]=alphai[i]*(vc-imagePoints[mpi][1]);
			}
		
		/* Enter the model point / image point association's two linear equations into the least-squares matrix: */
		for(unsigned int i=0;i<12;++i)
			for(unsigned int j=0;j<12;++j)
				mtm(i,j)+=eqs[0][i]*eqs[0][j]+eqs[1][i]*eqs[1][j];
		}
	
	/*********************************************************************
	Step 3: Find four potential solutions to the pose estimation problem
	by assuming that either 1, 2, 3, or 4 Eigenvalues of the least-squares
	linear system are zero or very small, and calculate a scale-preserving
	transformation for all cases. Then pick the one that minimizes
	reprojection error.
	*********************************************************************/
	
	/* Get the full set of eigenvalues and eigenvectors of the least-squares matrix: */
	std::pair<Math::Matrix,Math::Matrix> qe=mtm.jacobiIteration();
	
	/* Find the indices of the four smallest Eigenvalues: */
	unsigned int evIndices[12];
	for(unsigned int i=0;i<12;++i)
		evIndices[i]=i;
	for(unsigned int i=0;i<4;++i)
		{
		/* Find the next-smallest Eigenvalue: */
		int minI=i;
		double minE=Math::abs(qe.second(evIndices[i],0));
		for(unsigned int j=i+1;j<12;++j)
			{
			double e=Math::abs(qe.second(evIndices[j],0));
			if(minE>e)
				{
				minI=j;
				minE=e;
				}
			}
		
		/* Move the found Eigenvalue to the front: */
		int ti=evIndices[i];
		evIndices[i]=evIndices[minI];
		evIndices[minI]=ti;
		}
	
	#if EPNP_DEBUG
	std::cout<<"MTM Eigenvalues:";
	for(unsigned int i=0;i<12;++i)
		std::cout<<' '<<qe.second(evIndices[i],0);
	std::cout<<std::endl;
	#endif
	
	/* Calculate the pairwise distances between the four control points in world space: */
	Scalar cpDists[6];
	cpDists[0]=Geometry::sqrDist(cps[0],cps[1]);
	cpDists[1]=Geometry::sqrDist(cps[0],cps[2]);
	cpDists[2]=Geometry::sqrDist(cps[0],cps[3]);
	cpDists[3]=Geometry::sqrDist(cps[1],cps[2]);
	cpDists[4]=Geometry::sqrDist(cps[1],cps[3]);
	cpDists[5]=Geometry::sqrDist(cps[2],cps[3]);
	
	/*********************************************************************
	Step 3a: Calculate the solution vector for the assumed case of one
	very small Eigenvalue:
	*********************************************************************/
	
	/* Extract the positions of the four control points in camera space from the smallest Eigenvector: */
	Point cpcs[4];
	for(unsigned int cpi=0;cpi<4;++cpi)
		for(unsigned int i=0;i<3;++i)
			cpcs[cpi][i]=Scalar(qe.first(cpi*3+i,evIndices[0]));
	
	/* Calculate the pairwise distances between the four control points in camera space: */
	Scalar cpcDists[6];
	cpcDists[0]=Geometry::sqrDist(cpcs[0],cpcs[1]);
	cpcDists[1]=Geometry::sqrDist(cpcs[0],cpcs[2]);
	cpcDists[2]=Geometry::sqrDist(cpcs[0],cpcs[3]);
	cpcDists[3]=Geometry::sqrDist(cpcs[1],cpcs[2]);
	cpcDists[4]=Geometry::sqrDist(cpcs[1],cpcs[3]);
	cpcDists[5]=Geometry::sqrDist(cpcs[2],cpcs[3]);
	
	/* Calculate the scaling factor: */
	Scalar betaCounter(0);
	Scalar betaDenominator(0);
	for(int i=0;i<6;++i)
		{
		betaCounter+=Math::sqrt(cpcDists[i]*cpDists[i]);
		betaDenominator+=cpcDists[i];
		}
	Scalar beta=-betaCounter/betaDenominator;
	#if EPNP_DEBUG
	std::cout<<"Scaling factor: "<<beta<<std::endl;
	#endif
	
	/* Recalculate the camera-space control points: */
	for(int i=0;i<4;++i)
		for(int j=0;j<3;++j)
			cpcs[i][j]*=beta;
	
	/* Check if the control point order was flipped: */
	if(((cpcs[1]-cpcs[0])^(cpcs[2]-cpcs[0]))*(cpcs[3]-cpcs[0])<0.0)
		{
		// std::cout<<"Control points got done flipped"<<std::endl;
		// cpcs[3]=cpcs[0]-(cpcs[3]-cpcs[0]);
		}
	
	#if EPNP_DEBUG
	/* Print the camera-space control points: */
	for(int i=0;i<4;++i)
		std::cout<<"CCP "<<i<<": "<<cpcs[i][0]<<", "<<cpcs[i][1]<<", "<<cpcs[i][2]<<std::endl;
	std::cout<<Math::sqrt((cpcs[1]-cpcs[0])*(cpcs[1]-cpcs[0]));
	std::cout<<' '<<(cpcs[1]-cpcs[0])*(cpcs[2]-cpcs[0]);
	std::cout<<' '<<(cpcs[1]-cpcs[0])*(cpcs[3]-cpcs[0]);
	std::cout<<' '<<Math::sqrt((cpcs[2]-cpcs[0])*(cpcs[2]-cpcs[0]));
	std::cout<<' '<<(cpcs[2]-cpcs[0])*(cpcs[3]-cpcs[0]);
	std::cout<<' '<<Math::sqrt((cpcs[3]-cpcs[0])*(cpcs[3]-cpcs[0]))<<std::endl;
	
	std::cout<<Math::sqrt((cps[1]-cps[0])*(cps[1]-cps[0]));
	std::cout<<' '<<(cps[1]-cps[0])*(cps[2]-cps[0]);
	std::cout<<' '<<(cps[1]-cps[0])*(cps[3]-cps[0]);
	std::cout<<' '<<Math::sqrt((cps[2]-cps[0])*(cps[2]-cps[0]));
	std::cout<<' '<<(cps[2]-cps[0])*(cps[3]-cps[0]);
	std::cout<<' '<<Math::sqrt((cps[3]-cps[0])*(cps[3]-cps[0]))<<std::endl;
	#endif
	
	/* Calculate the transformation from camera control point space to camera space: */
	Vector cbase[3];
	for(int i=0;i<3;++i)
		cbase[i]=cpcs[1+i]-cpcs[0];
	cbase[0].normalize();
	cbase[1]-=(cbase[0]*cbase[1])*cbase[0];
	cbase[1].normalize();
	cbase[2]-=(cbase[0]*cbase[2])*cbase[0];
	cbase[2]-=(cbase[1]*cbase[2])*cbase[1];
	cbase[2].normalize();
	Geometry::Matrix<Scalar,3,3> camera;
	for(int i=0;i<3;++i)
		for(int j=0;j<3;++j)
			camera(i,j)=cbase[j][i];
	Transform modelToCamera(cpcs[0]-Point::origin,Transform::Rotation::fromMatrix(camera));
	
	return modelToCamera*worldToModel;
	}
	size_t getNumPoints(void) const // Returns the number of processed points
		{
		return pca.getNumPoints();
		}
Exemple #6
0
ScreenCalibrator::ScreenCalibrator(int& argc,char**& argv,char**& appDefaults)
	:Vrui::Application(argc,argv,appDefaults),
	 trackingPointsMover(0)
	{
	/* Create and register the point query tool class: */
	PointQueryToolFactory* pointQueryToolFactory=new PointQueryToolFactory("PointQueryTool","Point Query",0,*Vrui::getToolManager());
	pointQueryToolFactory->setNumButtons(1);
	pointQueryToolFactory->setButtonFunction(0,"Query Point");
	Vrui::getToolManager()->addClass(pointQueryToolFactory,Vrui::ToolManager::defaultToolFactoryDestructor);
	
	/* Parse the command line: */
	const char* optitrackFileName=0;
	bool optitrackFlipZ=false;
	const char* totalstationFileName=0;
	int screenPixelSize[2]={-1,-1};
	int screenSquareSize=200;
	double unitScale=1.0;
	for(int i=1;i<argc;++i)
		{
		if(argv[i][0]=='-')
			{
			if(strcasecmp(argv[i]+1,"screenSize")==0)
				{
				for(int j=0;j<2;++j)
					{
					++i;
					screenPixelSize[j]=atoi(argv[i]);
					}
				}
			else if(strcasecmp(argv[i]+1,"squareSize")==0)
				{
				++i;
				screenSquareSize=atoi(argv[i]);
				}
			else if(strcasecmp(argv[i]+1,"metersToInches")==0)
				unitScale=1000.0/25.4;
			else if(strcasecmp(argv[i]+1,"unitScale")==0)
				{
				++i;
				unitScale=atof(argv[i]);
				}
			else if(strcasecmp(argv[i]+1,"flipZ")==0)
				optitrackFlipZ=true;
			else
				{
				}
			}
		else if(totalstationFileName==0)
			totalstationFileName=argv[i];
		else if(optitrackFileName==0)
			optitrackFileName=argv[i];
		else
			{
			}
		}
	
	/* Read the Optitrack sample file: */
	if(optitrackFileName!=0)
		{
		readOptitrackSampleFile(optitrackFileName,optitrackFlipZ);
		std::cout<<"Read "<<trackingPoints.size()<<" ball points from Optitrack sample file"<<std::endl;
		}
	
	/* Read relevant point classes from the Totalstation survey file: */
	if(totalstationFileName!=0)
		{
		screenPoints=readTotalstationSurveyFile(totalstationFileName,"SCREEN");
		floorPoints=readTotalstationSurveyFile(totalstationFileName,"FLOOR");
		ballPoints=readTotalstationSurveyFile(totalstationFileName,"BALLS");
		std::cout<<"Read "<<ballPoints.size()<<" ball points from TotalStation survey file"<<std::endl;
		}
	
	/*********************************************************************
	Establish a normalized coordinate system with the floor at the z=0
	plane, the screen in a plane about orthogonal to the y axis, and the
	screen center above the origin.
	*********************************************************************/
	
	/* Fit a plane to the floor points: */
	Geometry::PCACalculator<3> floorPca;
	for(PointList::const_iterator fpIt=floorPoints.begin();fpIt!=floorPoints.end();++fpIt)
		floorPca.accumulatePoint(*fpIt);
	Point floorCentroid=floorPca.calcCentroid();
	floorPca.calcCovariance();
	double floorEv[3];
	floorPca.calcEigenvalues(floorEv);
	Geometry::PCACalculator<3>::Vector floorNormal=floorPca.calcEigenvector(floorEv[2]);
	
	/* Fit a plane to the screen points: */
	Geometry::PCACalculator<3> screenPca;
	for(PointList::const_iterator spIt=screenPoints.begin();spIt!=screenPoints.end();++spIt)
		screenPca.accumulatePoint(*spIt);
	Point screenCentroid=screenPca.calcCentroid();
	screenPca.calcCovariance();
	double screenEv[3];
	screenPca.calcEigenvalues(screenEv);
	Geometry::PCACalculator<3>::Vector screenNormal=screenPca.calcEigenvector(screenEv[2]);
	
	/* Flip the floor normal such that it points towards the screen points: */
	if((screenCentroid-floorCentroid)*floorNormal<Scalar(0))
		floorNormal=-floorNormal;
	
	/* Flip the screen normal such that it points away from the ball points: */
	Point::AffineCombiner ballC;
	for(PointList::const_iterator bpIt=ballPoints.begin();bpIt!=ballPoints.end();++bpIt)
		ballC.addPoint(*bpIt);
	if((ballC.getPoint()-screenCentroid)*screenNormal>Scalar(0))
		screenNormal=-screenNormal;
	
	/* Project the screen centroid onto the floor plane to get the coordinate system origin: */
	Point origin=screenCentroid-floorNormal*(((screenCentroid-floorCentroid)*floorNormal)/Geometry::sqr(floorNormal));
	
	/* Orthonormalize the screen normal against the floor normal: */
	Vector y=screenNormal-floorNormal*((screenNormal*floorNormal)/Geometry::sqr(floorNormal));
	Vector x=Geometry::cross(y,floorNormal);
	
	#if 0
	/* Calculate a rotation to align the floor normal with +z and the (horizontal) screen normal with +y: */
	ONTransform::Rotation rot=ONTransform::Rotation::fromBaseVectors(x,y);
	#endif
	
	/*********************************************************************
	Calculate a transformation to move the Totalstation survey points into
	the normalized coordinate system:
	*********************************************************************/
	
	ONTransform transform(origin-Point::origin,ONTransform::Rotation::fromBaseVectors(x,y));
	transform.doInvert();
	
	/* Transform all survey points: */
	for(PointList::iterator spIt=screenPoints.begin();spIt!=screenPoints.end();++spIt)
		*spIt=transform.transform(*spIt);
	for(PointList::iterator fpIt=floorPoints.begin();fpIt!=floorPoints.end();++fpIt)
		*fpIt=transform.transform(*fpIt);
	for(PointList::iterator bpIt=ballPoints.begin();bpIt!=ballPoints.end();++bpIt)
		*bpIt=transform.transform(*bpIt);
	
	if(screenPixelSize[0]>0&&screenPixelSize[1]>0&&screenSquareSize>0)
		{
		/*********************************************************************
		Calculate the optimal projective transformation and screen
		transformation (orthonormal transformation plus non-uniform scaling in
		x and y) from theoretical  screen points to surveyed screen points:
		*********************************************************************/
		
		/* Create a list of theoretical screen points: */
		PointList screen;
		int screenPixelOffset[2];
		for(int i=0;i<2;++i)
			screenPixelOffset[i]=((screenPixelSize[i]-1)%screenSquareSize)/2;
		for(int y=screenPixelOffset[1];y<screenPixelSize[1];y+=screenSquareSize)
			for(int x=screenPixelOffset[0];x<screenPixelSize[0];x+=screenSquareSize)
				screen.push_back(Point((Scalar(x)+Scalar(0.5))/Scalar(screenPixelSize[0]),Scalar(1)-(Scalar(y)+Scalar(0.5))/Scalar(screenPixelSize[1]),0));
		if(screen.size()!=screenPoints.size())
			Misc::throwStdErr("Wrong number of screen points, got %d instead of %d",int(screenPoints.size()),int(screen.size()));
		
		/* Find the best-fitting projective transformation for the measured screen points: */
		PTransformFitter ptf(screen.size(),&screen[0],&screenPoints[0]);
		PTransformFitter::Scalar screenResult2=LevenbergMarquardtMinimizer<PTransformFitter>::minimize(ptf);
		std::cout<<"Projective transformation fitting final distance: "<<screenResult2<<std::endl;
		pScreenTransform=ptf.getTransform();
		
		/* Print the screen transformation matrix: */
		std::cout<<"Projective transformation matrix:"<<std::endl;
		std::cout<<std::setprecision(6)<<pScreenTransform<<std::endl;
		
		/* Find the best-fitting screen transformation for the measured screen points: */
		ScreenTransformFitter stf(screen.size(),&screen[0],&screenPoints[0]);
		ScreenTransformFitter::Scalar screenResult1=LevenbergMarquardtMinimizer<ScreenTransformFitter>::minimize(stf);
		std::cout<<"Screen transformation fitting final distance: "<<screenResult1<<std::endl;
		screenTransform=stf.getTransform();
		screenSize[0]=stf.getSize(0);
		screenSize[1]=stf.getSize(1);
		std::cout<<"Optimal screen size: "<<screenSize[0]<<", "<<screenSize[1]<<std::endl;
		std::cout<<"Optimal screen origin: "<<screenTransform.getOrigin()<<std::endl;
		std::cout<<"Optimal horizontal screen axis: "<<screenTransform.getDirection(0)<<std::endl;
		std::cout<<"Optimal vertical screen axis: "<<screenTransform.getDirection(1)<<std::endl;
		
		/*********************************************************************
		Calculate a homography matrix from the optimal screen transformation
		to the optimal projective transformation to correct screen
		misalignments:
		*********************************************************************/
		
		Point sCorners[4];
		Point pCorners[4];
		for(int i=0;i<4;++i)
			{
			sCorners[i][0]=i&0x1?screenSize[0]*unitScale:0.0;
			sCorners[i][1]=i&0x2?screenSize[1]*unitScale:0.0;
			sCorners[i][2]=0.0;
			pCorners[i][0]=i&0x1?1.0:0.0;
			pCorners[i][1]=i&0x2?1.0:0.0;
			pCorners[i][2]=0.0;
			pCorners[i]=screenTransform.inverseTransform(pScreenTransform.transform(pCorners[i]));
			pCorners[i][0]*=unitScale;
			pCorners[i][1]*=unitScale;
			}
		Geometry::ProjectiveTransformation<double,2> sHom=calcHomography(sCorners);
		Geometry::ProjectiveTransformation<double,2> pHom=calcHomography(pCorners);
		Geometry::ProjectiveTransformation<double,2> hom=pHom;
		hom.leftMultiply(Geometry::invert(sHom));
		for(int i=0;i<3;++i)
			for(int j=0;j<3;++j)
				hom.getMatrix()(i,j)/=hom.getMatrix()(2,2);
		
		#if 0
		std::cout<<"Homography matrix for projective transform: "<<pHom<<std::endl;
		std::cout<<"Homography matrix for screen transform: "<<sHom<<std::endl;
		std::cout<<"Screen correction homography matrix: "<<hom<<std::endl;
		#endif
		
		#if 0
		
		/* Do some experiments: */
		Geometry::ProjectiveTransformation<double,3> hom3=Geometry::ProjectiveTransformation<double,3>::identity;
		for(int i=0;i<3;++i)
			for(int j=0;j<3;++j)
				hom3.getMatrix()(i<2?i:3,j<2?j:3)=hom.getMatrix()(i,j);
		hom3.getMatrix()(2,0)=hom3.getMatrix()(3,0);
		hom3.getMatrix()(2,1)=hom3.getMatrix()(3,1);
		
		std::cout<<hom3<<std::endl;
		std::cout<<Geometry::invert(hom3)<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>(-1.0,-1.0,-1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>( 1.0,-1.0,-1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>(-1.0, 1.0,-1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>( 1.0, 1.0,-1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>(-1.0,-1.0, 1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>( 1.0,-1.0, 1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>(-1.0, 1.0, 1.0,1.0)).toPoint()<<std::endl;
		std::cout<<hom3.transform(Geometry::HVector<double,3>( 1.0, 1.0, 1.0,1.0)).toPoint()<<std::endl;
		
		#endif
		
		/* Print a configuration file section for the screen: */
		std::cout<<std::endl<<"Configuration settings for screen:"<<std::endl;
		std::cout<<"origin "<<screenTransform.getTranslation()*unitScale<<std::endl;
		std::cout<<"horizontalAxis "<<screenTransform.getDirection(0)<<std::endl;
		std::cout<<"width "<<screenSize[0]*unitScale<<std::endl;
		std::cout<<"verticalAxis "<<screenTransform.getDirection(1)<<std::endl;
		std::cout<<"height "<<screenSize[1]*unitScale<<std::endl;
		std::cout<<"offAxis true"<<std::endl;
		std::cout<<"homography ( ";
		for(int j=0;j<3;++j)
			{
			if(j>0)
				std::cout<<", \\"<<std::endl<<"             ";
			std::cout<<"( ";
			for(int i=0;i<3;++i)
				{
				if(i>0)
					std::cout<<", ";
				std::cout<<pHom.getMatrix()(i,j);
				}
			std::cout<<" )";
			}
		std::cout<<" )"<<std::endl;
		std::cout<<std::endl;
		}
	
	if(optitrackFileName!=0&&totalstationFileName!=0)
		{
		/*********************************************************************
		Calculate the optimal orthonormal transformation from tracking system
		coordinates to the normalized coordinate system by aligning ball
		positions observed by the tracking system with ball positions measured
		using the total station:
		*********************************************************************/
		
		/* Find an orthonormal transformation to align the tracking points with the ball points: */
		size_t numPoints=trackingPoints.size();
		if(numPoints>ballPoints.size())
			numPoints=ballPoints.size();
		
		/* Calculate the centroid of the tracking points: */
		Point::AffineCombiner tpCc;
		for(size_t i=0;i<numPoints;++i)
			tpCc.addPoint(trackingPoints[i]);
		Vector tpTranslation=tpCc.getPoint()-Point::origin;
		for(size_t i=0;i<numPoints;++i)
			trackingPoints[i]-=tpTranslation;
		ONTransformFitter ontf(numPoints,&trackingPoints[0],&ballPoints[0]);
		//ontf.setTransform(ONTransformFitter::Transform::rotate(ONTransformFitter::Transform::Rotation::rotateX(Math::rad(Scalar(90)))));
		ONTransformFitter::Scalar result=LevenbergMarquardtMinimizer<ONTransformFitter>::minimize(ontf);
		ONTransform tsCal=ontf.getTransform();
		tsCal*=ONTransform::translate(-tpTranslation);
		
		std::cout<<"Final distance: "<<result<<std::endl;
		std::cout<<"Tracking system calibration transformation: "<<tsCal<<std::endl;
		
		std::cout<<"Configuration settings for tracking calibrator: "<<std::endl;
		std::cout<<"transformation translate "<<tsCal.getTranslation()*unitScale<<" \\"<<std::endl;
		std::cout<<"               * scale "<<unitScale<<" \\"<<std::endl;
		std::cout<<"               * rotate "<<tsCal.getRotation().getAxis()<<", "<<Math::deg(tsCal.getRotation().getAngle())<<std::endl;
		
		/* Transform the tracking points with the result transformation: */
		for(PointList::iterator tpIt=trackingPoints.begin();tpIt!=trackingPoints.end();++tpIt)
			*tpIt=tsCal.transform(*tpIt+tpTranslation);
		}
	
	/* Initialize the navigation transformation: */
	Geometry::Box<Scalar,3> bbox=Geometry::Box<Scalar,3>::empty;
	for(PointList::const_iterator tpIt=trackingPoints.begin();tpIt!=trackingPoints.end();++tpIt)
		bbox.addPoint(*tpIt);
	for(PointList::const_iterator spIt=screenPoints.begin();spIt!=screenPoints.end();++spIt)
		bbox.addPoint(*spIt);
	for(PointList::const_iterator fpIt=floorPoints.begin();fpIt!=floorPoints.end();++fpIt)
		bbox.addPoint(*fpIt);
	for(PointList::const_iterator bpIt=ballPoints.begin();bpIt!=ballPoints.end();++bpIt)
		bbox.addPoint(*bpIt);
	
	Vrui::setNavigationTransformation(Geometry::mid(bbox.min,bbox.max),Geometry::dist(bbox.min,bbox.max));
	
	/* Create a virtual input device to move the tracking points interactively: */
	trackingPointsMover=Vrui::addVirtualInputDevice("TrackingPointsMover",0,0);
	// Vrui::getInputGraphManager()->setNavigational(trackingPointsMover,true);
	Vrui::NavTrackerState scaledDeviceT=Vrui::getInverseNavigationTransformation();
	scaledDeviceT*=trackingPointsMover->getTransformation();
	trackingPointsTransform=Vrui::TrackerState(scaledDeviceT.getTranslation(),scaledDeviceT.getRotation());
	trackingPointsTransform.doInvert();
	}