void GetHomographyInliers( MatchArray& aryInlier, const MatchArray& aryMatch, const CFeatureArray& set1, const CFeatureArray& set2, const CHomography& h**o, float tol ) { float SQR_TOL = tol*tol; aryInlier.resize( aryMatch.size() ); int k=0; for( int i=0; i<aryMatch.size(); ++i ) { const CFeature* ft1 = set1[ aryMatch[i].first ]; const CFeature* ft2 = set2[ aryMatch[i].second ]; CvPoint2D64f pt = h**o * cvPoint2D64f( ft1->x, ft1->y ); double dx = pt.x -ft2->x; double dy = pt.y -ft2->y; if( dx*dx +dy*dy < SQR_TOL ) // a homography inlier aryInlier[k++] = aryMatch[i]; } aryInlier.resize(k); }
/** * OptimizePair: * Input: * cam1 - the first camera (already optimized) * cam2 - the second camera with its intrinsic matrix initialized * dR - an initial relative 3x3 camera rotation matrix * set1 - SIFT features in the first image * set2 - SIFT features in the second image * aryInlier - the homography iniliers * * Ouput: * cam2 - update cam2's optimized focal length and pose */ void OptimizeSingle( const CCamera& cam1, CCamera& cam2, double* dR, const CFeatureArray& set1, const CFeatureArray& set2, const MatchArray& aryInlier ) { // Step 1. Initialize the camera pose of cam2 // cam2's relative rotation to cam1 CvMat matR = cvMat( 3, 3, CV_64F, dR ); // cam1's absolute rotation double dRod1[3]; CvMat matRod1 = cvMat( 3, 1, CV_64F, dRod1 ); cam1.GetPose( dRod1 ); double dRot1[9]; CvMat matRot1 = cvMat( 3, 3, CV_64F, dRot1 ); cvRodrigues2( &matRod1, &matRot1 ); // compose R and Rot1 to get cam2's initial absolute rotation cvMatMul( &matR, &matRot1, &matR ); double dRod2[3]; CvMat matRod2 = cvMat( 3, 1, CV_64F, dRod2 ); cvRodrigues2( &matR, &matRod2 ); cam2.SetPose( dRod2 ); // Step 2. Now we can perform bundle adjustment for cam2 CBundleAdjust ba( 1, BA_ITER ); ba.SetCamera( &cam2, 0 ); // set points for( int i=0; i<aryInlier.size(); ++i ) { const CFeature* ft1 = set1[ aryInlier[i].first ]; const CFeature* ft2 = set2[ aryInlier[i].second ]; double dir[3]; cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) ); // the 3d position CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius ); ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft2->x, ft2->y ) ); } ba.DoMotion(); ba.GetAdjustedCamera( &cam2, 0 ); }
/** * OptimizePair: * Input: * cam1 - the first camera with its intrinsic matrix and pose initialized ([R|T]=[I|0]) * cam2 - the second camera with its intrinsic matrix initialized * dR - an initial relative 3x3 camera rotation matrix * set1 - SIFT features in the first image * set2 - SIFT features in the second image * aryInlier - the homography iniliers * * Ouput: * cam1 - update cam1's optimized folcal length * cam2 - update cam2's optimized focal length and pose */ void OptimizePair( CCamera& cam1, CCamera& cam2, double* dR, const CFeatureArray& set1, const CFeatureArray& set2, const MatchArray& aryInlier ) { CBundleAdjust ba( 2, BA_ITER ); // Step 1. To perform bundle adjustment, we initialize cam1 and cam2 // as [K][I|0} and [K][R|0] respectively and optimize R using // bundle adjustment. double dRod[3]; CvMat matRod = cvMat( 3, 1, CV_64F, dRod ); CvMat matR = cvMat( 3, 3, CV_64F, dR ); cvRodrigues2( &matR, &matRod ); cam2.SetPose( dRod ); // Set cameras ba.SetCamera( &cam1, 0 ); ba.SetCamera( &cam2, 1 ); // Step 2. We still need to create a set of 3D points. From each homography inlier, // a 3D point can be initialized by locating it on the ray that goes through // its projection. for( int i=0; i<aryInlier.size(); ++i ) { const CFeature* ft1 = set1[ aryInlier[i].first ]; const CFeature* ft2 = set2[ aryInlier[i].second ]; double dir[3]; cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) ); // the initialized 3d position CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius ); // set the 3d point and its projections in both images ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft1->x, ft1->y ) ); ba.SetPointProjection( pt3, 1, cvPoint2D64f( ft2->x, ft2->y ) ); } // perform bundle adjustment ba.DoMotionAndStructure(); // retrieve the optimized cameras ba.GetAdjustedCamera( &cam1, 0 ); ba.GetAdjustedCamera( &cam2, 1 ); }
/** * RansacHomography: * Input: * aryMatch - an array of potential matches between two images * inlierTol - the tolerance to regard a match as an inlier * numIter - number of iterations for Ransac * * Ouput: * h**o - the best estimated homography (with the max nubmer of inliers) */ void RansacHomography( CHomography& h**o, const MatchArray& aryMatch, const CFeatureArray& set1, const CFeatureArray& set2, float inlierTol, int numIter ) { const float SQR_TOL = inlierTol*inlierTol; const int NUM_SAMP = 6; int maxInlier = 0; double dA[ NUM_SAMP*2*8 ]; double dB[ NUM_SAMP*2 ]; // ToDo2: Find homography using RANSAC printf( "homography inliers: %d(%d)\n", maxInlier, aryMatch.size() ); }
/** * RansacHomography: * Input: * aryMatch - an array of potential matches between two images * inlierTol - the tolerance to regard a match as an inlier * numIter - number of iterations for Ransac * * Ouput: * h**o - the best estimated homography (with the max nubmer of inliers) */ void RansacHomography( CHomography& h**o, const MatchArray& aryMatch, const CFeatureArray& set1, const CFeatureArray& set2, float inlierTol, int numIter ) { const float SQR_TOL = inlierTol*inlierTol; const int NUM_SAMP = 6; int maxInlier = 0; double dA[ NUM_SAMP*2*8 ]; double dB[ NUM_SAMP*2 ]; double dH[8]; struct timeval tv; gettimeofday(&tv, NULL); srand48(tv.tv_usec); long int index; int flag; vector < int > dup_index; int test_index[] = {100, 150, 200, 250, 300, 350}; // ToDo2: Find homography using RANSAC for(int i=0; i < NUM_SAMP; i++) { dB[i] = 0; } for(int i=0; i< numIter; i++) { dup_index.clear(); for(int j =0 ; j< NUM_SAMP; j++ ) { index = lrand48(); index = index % aryMatch.size(); // index = test_index[j]; flag = 1; for(unsigned int k=0; k < dup_index.size(); k++) { if(dup_index[k] == index) { flag = 0; } } if(!flag) { --j; continue; } // std::cout << "Index " << index << std::endl; int ind1 = aryMatch[index].first; int ind2 = aryMatch[index].second; dA[16*j + 0] = -1 * set1[ind1]->x; dA[16*j + 1] = -1 * set1[ind1]->y; dA[16*j + 2] = -1; dA[16*j + 3] = 0; dA[16*j + 4] = 0; dA[16*j + 5] = 0; dA[16*j + 6] = set2[ind2]->x * set1[ind1]->x; dA[16*j + 7] = set2[ind2]->x * set1[ind1]->y; dA[16*j + 8 + 0] = 0; dA[16*j + 8 + 1] = 0; dA[16*j + 8 + 2] = 0; dA[16*j + 8 + 3] = -1 * set1[ind1]->x; dA[16*j + 8 + 4] = -1 * set1[ind1]->y; dA[16*j + 8 + 5] = -1; dA[16*j + 8 + 6] = set2[ind2]->y * set1[ind1]->x; dA[16*j + 8 + 7] = set2[ind2]->y * set1[ind1]->y; dB[2*j] = -1*set2[ind2]->x; dB[2*j + 1] = -1*set2[ind2]->y; } for(int j = 0; j < 8; j++) dH[j] = 0; CvMat mA = cvMat( NUM_SAMP * 2, 8, CV_64FC1, dA); CvMat mB = cvMat( NUM_SAMP * 2, 1, CV_64FC1, dB); CvMat mH = cvMat( 8, 1, CV_64FC1, dH); int ret = cvSolve(&mA, &mB, &mH, CV_SVD); // std::cout << ret << std::endl; for(int j = 0; j < 8; j++) { dH[j] = cvmGet(&mH, j, 0); // std::cout << "H is " << std::endl; // std::cout << dH[j] << std::endl; } //continue; int inlier_count = 0; for(unsigned int j = 0; j< aryMatch.size(); j++) { int ind1 = aryMatch[j].first; int ind2 = aryMatch[j].second; double x2 = set2[ind2]->x; double y2 = set2[ind2]->y; double x1 = set1[ind1]->x; double y1 = set1[ind1]->y; double _x1p = dH[0] * x1 + dH[1] * y1 + dH[2]*1; double _y1p = dH[3] * x1 + dH[4] * y1 + dH[5]*1; double h1p = dH[6] * x1 + dH[7] * y1 + 1; double x1p = _x1p / h1p; double y1p = _y1p / h1p; double dist = (x1p - x2)*(x1p - x2) + (y1p - y2)*(y1p - y2); // std::cout << "Distance is " << sqrt(dist) << std::endl; if( sqrt(dist) < SQR_TOL) { // std::cout << "Coming here" << std::endl; inlier_count++; } } if(inlier_count > maxInlier) { maxInlier = inlier_count; for(int k = 0; k < 8; k++) h**o.d[k] = dH[k]; h**o.d[8] = 1; } } printf( "homography inliers: %d(%d)\n", maxInlier, aryMatch.size() ); }