void CV_UndistortPointsBadArgTest::run_func() { if (useCPlus) { cv::undistortPoints(src_points,dst_points,camera_mat,distortion_coeffs,R,P); } else { cvUndistortPoints(_src_points,_dst_points,_camera_mat,_distortion_coeffs,matR,matP); } }
//----------------------------------------------------------------- void Triangulation::undistort_point(const CameraCalibData& iCalibData, double& ioX, double& ioY) { // Store distorted point observation, and allocate memory for the ideal point observation CvMat* distorted = cvCreateMat(1, 1, CV_64FC2); CvMat* undistorted = cvCreateMat(1, 1, CV_64FC2); ((CvPoint2D64f*)distorted->data.db)->x = ioX; ((CvPoint2D64f*)distorted->data.db)->y = ioY; // Perform undistortion cvUndistortPoints(distorted, undistorted, &iCalibData.intrinsics, &iCalibData.distCoeffs, 0, &iCalibData.intrinsics); // Store the ideal location for output ioX = ((CvPoint2D64f*)undistorted->data.db)->x; ioY = ((CvPoint2D64f*)undistorted->data.db)->y; // Release temporary memory cvReleaseMat(&distorted); cvReleaseMat(&undistorted); }
void CTWithWater::imageAndDepthToWorld(double u, double v, double d, double* x, double* y, double* z, bool undistort) { double xx, yy, t; CvMat* r = cvCreateMat(3, 1, CV_32FC1); if(undistort) { CvMat* I = cvCreateMat(1, 1, CV_32FC2); CvMat* Io = cvCreateMat(1, 1, CV_32FC2); cvSet2D(I, 0, 0, cvScalar(u,v)); cvUndistortPoints(I, Io, m_CameraMatrix, m_DistCoeffs, NULL, m_CameraMatrixNorm); CvScalar s = cvGet2D(Io, 0, 0); xx = s.val[0];//cvGetReal1D(Io, 0); yy = s.val[1];//cvGetReal1D(Io, 1); cvReleaseMat(&I); cvReleaseMat(&Io); } else { xx = u; yy = v; } xx = (xx - cvGetReal2D(m_CameraMatrixNorm, 0, 2))/cvGetReal2D(m_CameraMatrixNorm, 0, 0); yy = (yy - cvGetReal2D(m_CameraMatrixNorm, 1, 2))/cvGetReal2D(m_CameraMatrixNorm, 1, 1); cvSetReal1D(r, 0, xx); cvSetReal1D(r, 1, yy); cvSetReal1D(r, 2, 1.0); /* Rt_(3,:)*r = sum of third column of R times elements of r */ t = xx*cvGetReal2D(m_R, 0, 2) + yy*cvGetReal2D(m_R, 1, 2) + cvGetReal2D(m_R, 2, 2); if(t == 0) { t = 1.0; } if(d <= 0) { /* d<= 0 => above water surface */ t = (-m_dCameraHeightAboveWater-d)/t; /* r = t*R'*r + C */ cvGEMM(m_R, r, t, m_CameraWorld, 1.0, r, CV_GEMM_A_T); } else { /* d > 0 => below water surface */ t = -m_dCameraHeightAboveWater/t; /* S = t*R'*r */ cvGEMM(m_R, r, t, NULL, 0, m_S, CV_GEMM_A_T); double Sx = cvGetReal1D(m_S, 0); double Sy = cvGetReal1D(m_S, 1); double phi = atan2(Sy, Sx); double rS = sqrt(Sx*Sx + Sy*Sy); double rP = calculateRpFromRs(rS, d, m_dCameraHeightAboveWater); cvSetReal1D(r, 0, rP*cos(phi)); cvSetReal1D(r, 1, rP*sin(phi)); cvSetReal1D(r, 2, -m_dCameraHeightAboveWater-d); cvAdd(r, m_CameraWorld, r); } *x = cvGetReal1D(r, 0); *y = cvGetReal1D(r, 1); *z = cvGetReal1D(r, 2); cvReleaseMat(&r); }
//图片处理函数 void CImageHandleThread_R::ImageHandleThread_R_Message(WPARAM wParam, LPARAM lParam) { ////记录图片的处理时间 //int Time_Temp = (int)(GETTIME() + 0.5); static int Nframe = -1; static int T_OF_TRACK = 0; void *data = (void *)wParam; static int NFrame = 1; static IplImage* Image1 = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Image2 = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Image3 = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Imask1 = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Imask2 = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Result = cvCreateImage(cvSize(640, 480), 8, 1); static IplImage* Temp;//用于转换储存地址,省去拷贝。 if (NFrame == 1) { memcpy(Image1->imageData, data, 640 * 480); NFrame++; } else { if (NFrame == 2) { memcpy(Image2->imageData, data, 640 * 480); cvAbsDiff(Image1, Image2, Imask1); cvThreshold(Imask1, Imask1, BinaryTherod, 255, CV_THRESH_BINARY); NFrame++; } else { memcpy(Image3->imageData, data, 640 * 480); cvAbsDiff(Image2, Image3, Imask2); cvThreshold(Imask2, Imask2, BinaryTherod, 255, CV_THRESH_BINARY); cvAnd(Imask1, Imask2, Src_Right_Gray[T_OF_TRACK]);//会修改内存中的值 while (To_Handle_Right[T_OF_TRACK] == true) { cout << "R_配对线程处理不过来" << endl; Sleep(1); //system("pause"); } //存时间 //时间也得写在处理完之后!!! //Time_R[T_OF_TRACK] = int(lParam); Time_R[T_OF_TRACK] = GETTIME(); //cvDilate(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK]); //cvDilate(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK]); //cvErode(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK]); //cvErode(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK]); ////用2x2模板腐蚀,用3x3模板膨胀 可行 //static int Mask[4] = { 1, 1, 1, 1 }; //static IplConvKernel* THelementstatic = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT, Mask); //cvErode(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK], THelementstatic); //cvDilate(Src_Right_Gray[T_OF_TRACK], Src_Right_Gray[T_OF_TRACK]); PointDensity_R(Src_Right_Gray[T_OF_TRACK], PointDensityTherod); //一定要注意使用不同的函数找连通域 FindTheCounter_define_R(Src_Right_Gray[T_OF_TRACK], TheCounter_Right[T_OF_TRACK], &NumOfCounter_Right[T_OF_TRACK], KofCounter, MinCounter, MaxCounter, MaxCounterNum, GIRTHAREA);//9m的时候,球只有 6-7个像素点 if (NumOfCounter_Right[T_OF_TRACK]) { //对找到连通域的中心进行畸变矫正 //数组赋值 for (int i = 0; i < NumOfCounter_Right[T_OF_TRACK]; i++) { CenterPointOfCounter_R[T_OF_TRACK][i][0] = TheCounter_Right[T_OF_TRACK][i].x; CenterPointOfCounter_R[T_OF_TRACK][i][1] = TheCounter_Right[T_OF_TRACK][i].y; } //对找到连通域的中心进行畸变矫正 //数组赋值 for (int i = 0; i < NumOfCounter_Right[T_OF_TRACK]; i++) { CenterPointOfCounter_R[T_OF_TRACK][i][0] = TheCounter_Right[T_OF_TRACK][i].x; CenterPointOfCounter_R[T_OF_TRACK][i][1] = TheCounter_Right[T_OF_TRACK][i].y; } static CvMat ORGpoint = cvMat(1, NumOfCounter_Right[T_OF_TRACK], CV_64FC2, &CenterPointOfCounter_R[T_OF_TRACK][0][0]);//double 对应CV_64FC2 ORGpoint.cols = NumOfCounter_Right[T_OF_TRACK]; ORGpoint.data.db = CenterPointOfCounter_R[T_OF_TRACK][0]; //点的畸变矫正 cvUndistortPoints(&ORGpoint, &ORGpoint, &CameraMatri2, &dist_coeffs2, Rr, Pr); } //置标志位 To_Handle_Right[T_OF_TRACK] = true; //向上级线程发送图片处理完毕函数 ::PostMessage(m_hWnd, WM_DEAL_WIHT_IMAGE_TEMP, T_OF_TRACK, 1);//1确定是R //转换储存地址 //省去拷贝 Temp = Image1; Image1 = Image2; Image2 = Image3; Image3 = Temp; //Imask2可以作为下一次的Imask1,省去一帧的处理 Temp = Imask1; Imask1 = Imask2; Imask2 = Temp; T_OF_TRACK++; if (T_OF_TRACK == NUM_T)T_OF_TRACK = 0; } } //double a = (GETTIME() - Time_Temp); //if (a > 13) //{ // cout << "右相机图片处理进程时间危险" << a << endl; //} }
int StereoVision::calibrationEnd() { calibrationStarted = false; // ARRAY AND VECTOR STORAGE: double M1[3][3], M2[3][3], D1[5], D2[5]; double R[3][3], T[3], E[3][3], F[3][3]; CvMat _M1,_M2,_D1,_D2,_R,_T,_E,_F; _M1 = cvMat(3, 3, CV_64F, M1 ); _M2 = cvMat(3, 3, CV_64F, M2 ); _D1 = cvMat(1, 5, CV_64F, D1 ); _D2 = cvMat(1, 5, CV_64F, D2 ); _R = cvMat(3, 3, CV_64F, R ); _T = cvMat(3, 1, CV_64F, T ); _E = cvMat(3, 3, CV_64F, E ); _F = cvMat(3, 3, CV_64F, F ); // HARVEST CHESSBOARD 3D OBJECT POINT LIST: objectPoints.resize(sampleCount*cornersN); for(int k=0; k<sampleCount; k++) for(int i = 0; i < cornersY; i++ ) for(int j = 0; j < cornersX; j++ ) objectPoints[k*cornersY*cornersX + i*cornersX + j] = cvPoint3D32f(i, j, 0); npoints.resize(sampleCount,cornersN); int N = sampleCount * cornersN; CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] ); CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] ); CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] ); CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] ); cvSetIdentity(&_M1); cvSetIdentity(&_M2); cvZero(&_D1); cvZero(&_D2); //CALIBRATE THE STEREO CAMERAS cvStereoCalibrate( &_objectPoints, &_imagePoints1, &_imagePoints2, &_npoints, &_M1, &_D1, &_M2, &_D2, imageSize, &_R, &_T, &_E, &_F, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5), CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + CV_CALIB_SAME_FOCAL_LENGTH ); //Always work in undistorted space cvUndistortPoints( &_imagePoints1, &_imagePoints1,&_M1, &_D1, 0, &_M1 ); cvUndistortPoints( &_imagePoints2, &_imagePoints2,&_M2, &_D2, 0, &_M2 ); //COMPUTE AND DISPLAY RECTIFICATION double R1[3][3], R2[3][3]; CvMat _R1 = cvMat(3, 3, CV_64F, R1); CvMat _R2 = cvMat(3, 3, CV_64F, R2); //HARTLEY'S RECTIFICATION METHOD double H1[3][3], H2[3][3], iM[3][3]; CvMat _H1 = cvMat(3, 3, CV_64F, H1); CvMat _H2 = cvMat(3, 3, CV_64F, H2); CvMat _iM = cvMat(3, 3, CV_64F, iM); cvStereoRectifyUncalibrated( &_imagePoints1,&_imagePoints2, &_F, imageSize, &_H1, &_H2, 3 ); cvInvert(&_M1, &_iM); cvMatMul(&_H1, &_M1, &_R1); cvMatMul(&_iM, &_R1, &_R1); cvInvert(&_M2, &_iM); cvMatMul(&_H2, &_M2, &_R2); cvMatMul(&_iM, &_R2, &_R2); //Precompute map for cvRemap() cvReleaseMat(&mx1); cvReleaseMat(&my1); cvReleaseMat(&mx2); cvReleaseMat(&my2); mx1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F ); my1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F ); mx2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F ); my2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F ); cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1); cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_M2,mx2,my2); calibrationDone = true; return RESULT_OK; }