void CamCalib::CalibrateCamera(CvSize &image_size) { if (_intrinsic_matrix) cvReleaseMat(&_intrinsic_matrix); if (_distortion_coeffs) cvReleaseMat(&_distortion_coeffs); if (_mapx) cvReleaseImage(&_mapx); if (_mapy) cvReleaseImage(&_mapy); _intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1); _distortion_coeffs = cvCreateMat(4, 1, CV_32FC1); // 초점 거리 비율을 1.0으로 설정하여 내부행렬을 초기화 CV_MAT_ELEM( *_intrinsic_matrix, float, 0, 0 ) = 1.0f; CV_MAT_ELEM( *_intrinsic_matrix, float, 1, 1 ) = 1.0f; // 실제 카메라 보정함수 cvCalibrateCamera2 (_object_points, _image_points, _point_counts, image_size, _intrinsic_matrix, _distortion_coeffs, NULL, NULL, 0); // 내부 행렬과 왜곡 계수를 파일로 저장 cvSave("Intrinsics.xml", _intrinsic_matrix); cvSave("Distortion.xml", _distortion_coeffs); // 왜곡 제거를 위한 지도를 생성 _mapx = cvCreateImage( image_size, IPL_DEPTH_32F, 1 ); _mapy = cvCreateImage( image_size, IPL_DEPTH_32F, 1 ); // 왜곡 제거를 위한 지도를 구성 cvInitUndistortMap (_intrinsic_matrix, _distortion_coeffs, _mapx, _mapy); }
bool Classifier::kmeans(DataSet *data) { cout << "------------------------------------------" << endl; cout << "\t\tK-Means" << endl; if (kmeans_load) { cout << "Loading..." << endl; centers = (CvMat *)cvLoad("centers.dat"); data->samples = (CvMat *)cvLoad("samples.dat"); data->responses = (CvMat *)cvLoad("responses.dat"); data->centers = centers; cout << "Loaded Successfully" << endl; return true; } CvMat *desc = data->kmeans_input(); data->clusters = cvCreateMat(data->num_samples, 1, CV_32SC1); centers = cvCreateMat(num_clusters, SURF_SIZE, CV_32FC1); data->centers = centers; cout << "Running with k = " << num_clusters << endl; flush(cout); cvKMeans2( desc, // samples num_clusters, // clusters data->clusters, // labels cvTermCriteria( CV_TERMCRIT_EPS|CV_TERMCRIT_ITER, // End criteria 10, // Max iter 0.1), //accuracy 1, // attempts &rng, //rng 0, // flags centers, // centers NULL // compactness ); if (kmeans_save) { cvSave("centers.dat", centers); cvSave("samples.dat", data->cluster_samples() ); cvSave("responses.dat", data->cluster_responses() ); cout << "Saved!" << endl; } cvReleaseMat(&desc); data->to_kmeans = NULL; return true; }
int main(int argc, char * argv[]) { char keys[1<<12]; sprintf(keys, "{ s | solver | | location of solver file }" "{ tr | trainsize | 5000 | number of training samples }" "{ ts | testsize | 1000 | number of testing samples }" "{ h | help | false | display this help message }"); CvCommandLineParser parser(argc,argv,keys); const int display_help = parser.get<bool>("help"); if (display_help){parser.printParams();return 0;} const char * solver_filename = parser.get<string>("solver").c_str(); Network * cnn = new Network(); cnn->loadSolver(solver_filename); const char * training_filename = "data/svhn/train/%d.png"; const char * response_filename = "data/svhn/train/digitStruct.xml"; const char * testing_filename = "data/svhn/test/%d.png"; const char * expected_filename = "data/svhn/test/digitStruct.xml"; const char * training_filename_xml = cnn->solver()->training_filename(); const char * response_filename_xml = cnn->solver()->response_filename(); const char * testing_filename_xml = cnn->solver()->testing_filename(); const char * expected_filename_xml = cnn->solver()->expected_filename(); const int n_train_samples = parser.get<int>("trainsize"); const int n_test_samples = parser.get<int>("testsize"); const int ndigits = 3; fprintf(stderr,"Loading SVHN Images ...\n"); CvMat * response = icvReadSVHNLabels((char*)response_filename,n_train_samples); CvMat * training = icvReadSVHNImages((char*)training_filename,response); assert(CV_MAT_TYPE(training->type)==CV_32F); CvMat * expected = icvReadSVHNLabels((char*)expected_filename,n_test_samples); CvMat * testing = icvReadSVHNImages((char*) testing_filename,expected); CvMat * response_mat = icvConvertLabels(response); CvMat * expected_mat = icvConvertLabels(expected); fprintf(stderr,"%d training samples generated!\n", training->rows); fprintf(stderr,"%d testing samples generated!\n", testing->rows); cvSave(training_filename_xml,training); cvSave(response_filename_xml,response_mat); cvSave( testing_filename_xml,testing); cvSave(expected_filename_xml,expected_mat); cvReleaseMat(&training); cvReleaseMat(&response); cvReleaseMat(&response_mat); cvReleaseMat(&testing); cvReleaseMat(&expected); cvReleaseMat(&expected_mat); return 0; }
int mixImages(char *filename, char *filename2, char *output) { image1 = cvLoadImage(filename, 1); assert( image1 != 0 ); image2 = cvLoadImage(filename2, 1); assert( image2 != 0 ); // размер шаблона int width = image2->width; int height = image2->height; // устанавливаем область интереса cvSetImageROI(image1, cvRect(0, 0 , width, height)); // взвешенная сумма cvAddWeighted(image1, 0.5, image2, 0.5, 0.0, result); // освобождаем область интереса cvResetImageROI(image1); cvSave("output.jpeg", output); // освобождаем ресурсы cvReleaseImage( &image1 ); cvReleaseImage( &image2 ); cvReleaseImage( &result ); return 0; }
int main( int argc, char** argv ) { const char* size_opt = "--size="; char comment[1024]; CvHaarClassifierCascade* cascade = 0; CvSize size; help(); if( argc != 4 || strncmp( argv[1], size_opt, strlen(size_opt) ) != 0 ) { help(); return -1; } sscanf( argv[1], "--size=%ux%u", &size.width, &size.height ); cascade = cvLoadHaarClassifierCascade( argv[2], size ); if( !cascade ) { fprintf( stderr, "Input cascade could not be found/opened\n" ); return -1; } sprintf( comment, "Automatically converted from %s, window size = %dx%d", argv[2], size.width, size.height ); cvSave( argv[3], cascade, 0, comment, cvAttrList(0,0) ); return 0; }
CvMat* BackgroundModel::_computeHomography() { int n = 4; // 4 vertices CvMat *src = cvCreateMat(n, 2, CV_32FC1); CvMat *dst = cvCreateMat(n, 2, CV_32FC1); CvMat *homography = cvCreateMat(3, 3, CV_32FC1); CV_MAT_ELEM(*src, float, 0, 0) = _vertex[0].x; // topleft CV_MAT_ELEM(*src, float, 0, 1) = _vertex[0].y; CV_MAT_ELEM(*src, float, 1, 0) = _vertex[1].x; // topright CV_MAT_ELEM(*src, float, 1, 1) = _vertex[1].y; CV_MAT_ELEM(*src, float, 2, 0) = _vertex[2].x; // bottomright CV_MAT_ELEM(*src, float, 2, 1) = _vertex[2].y; CV_MAT_ELEM(*src, float, 3, 0) = _vertex[3].x; // bottomleft CV_MAT_ELEM(*src, float, 3, 1) = _vertex[3].y; CV_MAT_ELEM(*dst, float, 0, 0) = 0.0f; // topleft CV_MAT_ELEM(*dst, float, 0, 1) = 0.0f; CV_MAT_ELEM(*dst, float, 1, 0) = 1.0f; // topright CV_MAT_ELEM(*dst, float, 1, 1) = 0.0f; CV_MAT_ELEM(*dst, float, 2, 0) = 1.0f; // bottomright CV_MAT_ELEM(*dst, float, 2, 1) = 1.0f; CV_MAT_ELEM(*dst, float, 3, 0) = 0.0f; // bottomleft CV_MAT_ELEM(*dst, float, 3, 1) = 1.0f; cvFindHomography(src, dst, homography); cvSave("data/bgmodel/homography.yaml", homography); cvReleaseMat(&src); cvReleaseMat(&dst); return homography; }
bool VisionPipeLine::storeLocalizationCalibrationData() { if (_calibrationData.bindedPoints.size() < 4) { return false; } cv::Mat_<float> mat(_calibrationData.bindedPoints.size(),2); for (int pos = 0; pos < _calibrationData.bindedPoints.size() ;pos ++) { mat(pos, 0) = _calibrationData.bindedPoints[pos].x; mat(pos, 1) = _calibrationData.bindedPoints[pos].y; } std::string filepath = getPlatformConfigPrefix(); filepath+= FILEPATH_LOC_CALIB_FOLDER; if (g_config_bundle.model_id == 1) { filepath+= FILEPATH_CONFIG_NEWMODEL_PREFIX; } filepath+=FILEPATH_LOC_CALIB_DATA; try { CvMat tmpMat = (CvMat)mat; cvSave(filepath.c_str(), &tmpMat); } catch (...) { printf("Cannot save the localization data to external files.\n"); } return true; }
void CvMatrix::save( const char* filename, const char* matname, const int* params ) { if( !matrix ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, matrix, matname ); else cvSaveImage( filename, matrix, params ); }
void CvImage::save( const char* filename, const char* imgname, const int* params ) { if( !image ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, image, imgname ); else cvSaveImage( filename, image, params ); }
int calcSSIM :: print_map() { if (ssim_map == NULL) { cout<<"Error>> No Index_map_created.\n"; return 0; } cvSave("imgSSIM.xml", ssim_map, NULL, "TESTing Index map"); return 1; }
int calcQualityIndex :: print_map() { if (image_quality_map == NULL) { cout<<"Error>> No Index_map_created.\n"; return 0; } cvSave("imgQI.xml", image_quality_map, NULL, "TESTing Index map"); return 1; }
int main( int argc, char* argv[] ) { const char* filenameVi = argc == 2 ? argv[1] : "..\\..\\..\\MVI_0033.MOV"; captVi = cvCreateFileCapture( filenameVi ); CvMat* intrinsic = cvCreateMat( 3, 3, CV_32FC1 ); CvMat* distortion = cvCreateMat( 5, 1, CV_32FC1 ); const int board_w = /*atoi(argv[2])*/ 8; const int board_h = /*atoi(argv[3])*/ 6; int n_boards = /*atoi(argv[4])*/ (int)cvGetCaptureProperty( captVi, CV_CAP_PROP_FRAME_COUNT ); int nb = (int)(n_boards/1000); calibrCam( captVi, board_w, board_h, nb, intrinsic, distortion ); cvSave( "Intrinsics_3500_18_fK3.xml", intrinsic ); cvSave( "Distortion_3500_18_fK3.xml", distortion ); return 0; }
bool BackgroundModel::save() { ofstream f("data/bgmodel/vertices.txt"); if(!f.is_open()) { std::cerr << "[error] BackgroundModel::save - can't open vertices file" << std::endl; return false; } for(int i=0; i<4; i++) f << _vertex[i].x << " " << _vertex[i].y << std::endl; f << std::endl; for(int i=0; i<4; i++) f << _3dvertex[i].x << " " << _3dvertex[i].y << " " << _3dvertex[i].z << std::endl; cvSave("data/bgmodel/image.yaml", _image); cvSave("data/bgmodel/mask.yaml", _mask); cvSave("data/bgmodel/mean.yaml", _mean); cvSave("data/bgmodel/sd.yaml", _sd); cvSave("data/bgmodel/unmasked.yaml", _unmaskedImage); return true; }
void CvMatrix::save( const char* filename, const char* matname, const int* params ) { if( !matrix ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, matrix, matname ); #ifdef HAVE_OPENCV_HIGHGUI else cvSaveImage( filename, matrix, params ); #else (void)params; #endif }
void CvImage::save( const char* filename, const char* imgname, const int* params ) { if( !image ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, image, imgname ); #ifdef HAVE_OPENCV_HIGHGUI else cvSaveImage( filename, image, params ); #else (void)params; #endif }
// Prints all index maps of all the levels into different xml files int calcMSSSIM :: print_map() { if (ms_ssim_map == NULL) { cout<<"Error>> No Index_map_created.\n"; return 0; } char file_name[50]; // Printing the MS-SSIM_Map for (int i=0; i < level; i++) { sprintf(file_name, "img_MS-SSIM_map_level_%d.xml", i); cvSave(file_name, ms_ssim_map[i], NULL, "Testing MS-SSIM Index map"); } return 1; }
void CvMatrix::save( const char* filename, const char* matname, const int* params ) { if( !matrix ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, matrix, matname ); else { if( save_image ) save_image( filename, matrix, params ); else CV_Error( CV_StsNotImplemented, "Saving a matrixe in such a format requires HigGUI.\n" "Link it to your program and call any function from it\n" ); } }
void ShapeClassifier::Save() { if (!isTrained) return; Classifier::Save(); USES_CONVERSION; WCHAR filename[MAX_PATH]; // save the contour data wcscpy(filename,directoryName); wcscat(filename, FILE_CONTOUR_NAME); const char* contour_attrs[] = { "recursive", "1", 0 }; cvSave(W2A(filename), templateContours, 0, 0, cvAttrList(contour_attrs,0)); }
void saveFramePoses(const string& dirname, vector<FramePose>& framePoses) { // TODO: for now, turn poses into a CvMat of numOfKeyFrames x 7 (index, rod[3], shift[3]) double _poses[framePoses.size()*7]; CvMat _framePoses = cvMat(framePoses.size(), 7, CV_64FC1, _poses); int i=0; for (vector<FramePose>::const_iterator iter= framePoses.begin(); iter!=framePoses.end(); iter++,i++) { _poses[i*7 + 0] = iter->mIndex; _poses[i*7 + 1] = iter->mRod.x; _poses[i*7 + 2] = iter->mRod.y; _poses[i*7 + 3] = iter->mRod.z; _poses[i*7 + 4] = iter->mShift.x; _poses[i*7 + 5] = iter->mShift.y; _poses[i*7 + 6] = iter->mShift.z; } if (i>0) { string framePosesFilename("framePoses.xml"); cvSave((dirname+framePosesFilename).c_str(), &_framePoses, "index-rod3-shift3", "indices, rodrigues and shifts w.r.t. starting frame"); } }
void CvMatrix::save( const char* filename, const char* matname ) { CV_FUNCNAME( "CvMatrix::write" ); __BEGIN__; if( !matrix ) return; if( icvIsXmlOrYaml( filename ) ) cvSave( filename, matrix, matname ); else { if( save_image ) save_image( filename, matrix ); else CV_ERROR( CV_StsNotImplemented, "Saving a matrixe in such a format requires HigGUI.\n" "Link it to your program and call any function from it\n" ); } __END__; }
bool ImageIO::saveTexture(const string &filename, Texture *texture) { ensureImageSize(texture->getWidth(), texture->getHeight(), texture->getChannelCount()); bool saved = false; if(GL_UNSIGNED_BYTE == texture->getDataType()) { texture->transferFromTexture(m_imageHandle.get()); bool reorderChannels = texture->getFormat() == GL_RGBA || texture->getFormat() == GL_RGB; cvFlip(m_imageHandle.get(), 0); saved = saveImage(filename, m_imageHandle.get(), reorderChannels); } else if(GL_FLOAT == texture->getDataType()) { shared_ptr<IplImage> floatImage = shared_ptr<IplImage>( cvCreateImage(cvSize(texture->getWidth(), texture->getHeight()), IPL_DEPTH_32F, 3), [](IplImage* ptr){ cvReleaseImage(&ptr); }); texture->transferFromTexture(floatImage.get()); cvSave(filename.c_str(), floatImage.get()); } return saved; }
/*void saveInputTraining() * *inputs: Cvmat trainInputs CvMat trainOutputs CvMat validation inputs CvMat Validation Outputs * *saves files with training data . Input file * * */ void saveInputAndOutput(CvMat* input,CvMat* output,char* filenameInput,char*filenameOutput ){ cvSave( filenameInput, input); cvSave(filenameOutput, output); }
void bird_eye() { int board_n = board_w * board_h; CvSize board_sz = cvSize(board_w, board_h); CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml"); CvMat *distortion = (CvMat*) cvLoad("Distortion.xml"); IplImage* image = cvLoadImage("./Resource/bird-eye.jpg", 1); IplImage* gray_image = cvCreateImage(cvGetSize(image), 8, 1); cvCvtColor(image, gray_image, CV_BGR2GRAY); IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); cvInitUndistortMap( intrinsic, distortion, mapx, mapy ); IplImage* t = cvCloneImage(image); cvRemap(t, image, mapx, mapy); cvNamedWindow("Chessboard"); cvShowImage("Chessboard", image); int c = cvWaitKey(-1); CvPoint2D32f* corners = new CvPoint2D32f[board_n]; int corner_count = 0; int found = cvFindChessboardCorners( image, board_sz, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS ); if(!found){ printf("couldn't aquire chessboard!\n"); return; } cvFindCornerSubPix( gray_image, corners, corner_count, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1) ); CvPoint2D32f objPts[4], imgPts[4]; objPts[0].x = 0; objPts[0].y = 0; objPts[1].x = board_w - 1; objPts[1].y = 0; objPts[2].x = 0; objPts[2].y = board_h - 1; objPts[3].x = board_w - 1; objPts[3].y = board_h - 1; imgPts[0] = corners[0]; imgPts[1] = corners[board_w - 1]; imgPts[2] = corners[(board_h - 1) * board_w]; imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1]; cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3); cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3); cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3); cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3); cvDrawChessboardCorners( image, board_sz, corners, corner_count, found ); cvShowImage("Chessboard", image); CvMat *H = cvCreateMat(3, 3, CV_32F); cvGetPerspectiveTransform(objPts, imgPts, H); float z = 25; int key = 0; IplImage * birds_image = cvCloneImage(image); cvNamedWindow("Birds_Eye"); while(key != 27) { CV_MAT_ELEM(*H, float, 2, 2) = z; cvWarpPerspective( image, birds_image, H, CV_INTER_LINEAR| CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS ); cvShowImage("Birds_Eye", birds_image); key = cvWaitKey(); if(key == 'u') z += 0.5; if(key == 'd') z -= 0.5; } cvSave("H.xml", H); }
void MyLable::set_scaled_button() { int i, j, comp_count = 0; CvMat* color_tab = 0; CvSeq* contours = 0; cvSave("markes.xml",marker_mask); cvClearMemStorage(storage); cvFindContours( marker_mask, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); cvZero( markes ); for( ; contours != 0; contours = contours->h_next, comp_count++ ) { //cvDrawContours(markers, contours, cvScalarAll(comp_count+1), //cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) ); cvDrawContours(markes, contours, cvScalarAll(comp_count+1), cvScalarAll(comp_count+1), 1, -1, 8, cvPoint(0,0) ); } color_tab = cvCreateMat( 1, comp_count, CV_8UC3 );//创建随机颜色列表 for( i = 0; i < comp_count; i++ ) //不同的整数标记 { uchar* ptr = color_tab->data.ptr + i*3; ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50); ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50); ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50); } double t = (double)cvGetTickCount(); cvSave("img0.xml",markes); cvWatershed( img0, markes ); cvSave("img1.xml",markes); // cvShowImage("imgo",img0); t = (double)cvGetTickCount() - t; printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) ); /*********/ for( i = 0; i < markes->height; i++ ) for( j = 0; j < markes->width; j++ ) { int idx = CV_IMAGE_ELEM( markes, int, i, j );//markers的数据类型为IPL_DEPTH_32S uchar* dst = &CV_IMAGE_ELEM( wshed, uchar, i, j*3 );//BGR三个通道的数是一起的,故要j*3 uchar* t_body = &CV_IMAGE_ELEM( tongue_body, uchar, i, j*3 ); uchar* src_pic = &CV_IMAGE_ELEM( img0, uchar, i, j*3 ); if( idx == -1 ) //输出时若为-1,表示各个部分的边界 { //dst[0] = dst[1] = dst[2] = (uchar)128; dst[2] = (uchar)255; } else if( idx <= 0 || idx > comp_count ) //异常情况 { //dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here } else if( idx == 1 ) //异常情况 { //green first dst[0] = (uchar)255; t_body[0] = src_pic[0]; t_body[1] = src_pic[1]; t_body[2] = src_pic[2]; } else if( idx == 2 ) //异常情况 { //blue second dst[1] = (uchar)255; } else if( idx == 3 ) //异常情况 { //red third dst[2] = (uchar)255; } else //正常情况 { uchar* ptr = color_tab->data.ptr + (idx-1)*3; dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2]; } } cvShowImage("img_gray",img_gray); cvShowImage("wshed",wshed); cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed ); //wshed.x.y=0.5*wshed.x.y+0.5*img_gray+0加权融合图像 //cvShowImage("swhed",wshed); //cvShowImage("img_gray",img_gray); //cvShowImage( "watershed transform", wshed ); //cvShowImage("img_final",img_final); cvShowImage( "watershed transform", wshed ); img = Function::CjwIplToQImg(tongue_body); image = *img; cvReleaseMat( &color_tab ); update(); clear_list(); }
int main() { if(run_tests_only) { MyLine3D::runTest(); return 0; } //CvMat *camera_inner_calibration_matrix; bool show_surf_example=false; bool show_calibration_from_camera_and_undistortion=false; if(show_calibration_from_camera_and_undistortion) { CvMat *object_points_all=0; CvMat *image_points_all=0; CvMat *points_count_all=0; CvMat *camera_matr=0; CvMat *distor_coefs=0; CvMat *rotation_vecs=0; CvMat *transpose_vecs=0; vector<CvPoint2D32f> qu_calibr_points; IplImage* frameCam1; cvNamedWindow("WindowCam1",CV_WINDOW_KEEPRATIO); CvCapture *captureCam1=cvCreateCameraCapture(0); IplImage *quarterFrame; CvPoint2D32f *cornersFounded= new CvPoint2D32f[100]; int cornersCount=0; int result_Found=0; // getting snapshots for inner camera calibration from video camera bool capture_flag=false; while(true) { frameCam1=cvQueryFrame(captureCam1); quarterFrame=cvCreateImage(cvSize((frameCam1->width),(frameCam1->height)),IPL_DEPTH_8U,3); cvCopy(frameCam1,quarterFrame); if(capture_flag) { result_Found=cvFindChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,&cornersCount);//,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS |CV_CALIB_CB_FAST_CHECK); cvDrawChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,cornersCount,result_Found); if(result_Found>0) AddPointsToInnerCalibrate(qu_calibr_points,cornersFounded,cornersCount); capture_flag=false; cvShowImage("WindowCam1",quarterFrame); if(result_Found>0) cvWaitKey(0); } char c=cvWaitKey(33); if(c==27) break; if(c==32 || c=='y' || c=='Y') capture_flag=true; cvShowImage("WindowCam1",quarterFrame); cvReleaseImage(&quarterFrame); } cvReleaseImage(&quarterFrame); cvReleaseCapture(&captureCam1); cvDestroyWindow("WindowCam1"); PrintAllPointsForInnerCalibrate(qu_calibr_points,chess_b_szW*chess_b_szH); InitCvMatPointsParametersForInnerCallibration_part1(qu_calibr_points,chess_b_szW*chess_b_szH,object_points_all,image_points_all,points_count_all,chess_b_szW,chess_b_szH); InitOtherCameraParametersForInnerCallibration_part2(qu_calibr_points.size()/(chess_b_szW*chess_b_szH),camera_matr,distor_coefs,rotation_vecs,transpose_vecs); double calibration_error_result=cvCalibrateCamera2(object_points_all, image_points_all, points_count_all, cvSize(imgW,imgH), camera_matr, distor_coefs, rotation_vecs, transpose_vecs, CV_CALIB_FIX_PRINCIPAL_POINT|CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_ZERO_TANGENT_DIST ); WriteMatrixCoef(camera_matr); WriteMatrixCoef(distor_coefs); //camera_inner_calibration_matrix=cvCreateMat(3,3,CV_32FC1); //cvCopy(camera_matr,camera_inner_calibration_matrix); cvSave("camera_calibration_inner.txt",camera_matr,"camera_inner_calibration_matrix"); cvSave("camera_calibration_dist.txt",distor_coefs,"distor_coefs","coeficients of distortions"); cout<<"Total Error:"<<calibration_error_result<<endl; cout<<"Average Calibration Error :"<<(calibration_error_result)/qu_calibr_points.size()<<endl; //undistortion example IplImage *frame_cur; IplImage *undistor_image; cvNamedWindow("cameraUndistor",CV_WINDOW_KEEPRATIO); CvCapture *captureCam2=cvCreateCameraCapture(0); bool undist_flag=false; while(true) { frame_cur= cvQueryFrame(captureCam2); undistor_image=cvCreateImage(cvSize((frame_cur->width),(frame_cur->height)),IPL_DEPTH_8U,3); if(undist_flag) { cvUndistort2(frame_cur,undistor_image,camera_matr,distor_coefs); } else { cvCopy(frame_cur,undistor_image); } cvShowImage("cameraUndistor",undistor_image); char c=cvWaitKey(33); if(c==27) break; if(c=='u'||c=='U') undist_flag=!undist_flag; cvReleaseImage(&undistor_image); } cvReleaseImage(&undistor_image); cvReleaseCapture(&captureCam2); cvDestroyWindow("cameraUndistor"); }//ending undistortion_example if(show_surf_example) { //using SURF initModule_nonfree();// added at 16.04.2013 CvCapture* capture_cam_3=cvCreateCameraCapture(0); cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO); cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1); IplImage* buf_frame_3=0; IplImage* gray_copy=0; IplImage* buf_frame_3_copy=0; CvSeq *kp1,*descr1; CvMemStorage *storage=cvCreateMemStorage(0); CvSURFPoint *surf_pt; bool surf_flag=false; while(true) { buf_frame_3=cvQueryFrame(capture_cam_3); if(surf_flag) { surf_flag=false; gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1); buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3); cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY); //cvSetImageROI(gray_copy,cvRect(280,200,40,40)); cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0)); cvReleaseImage(&gray_copy); re_draw=true; while(true) { if(re_draw) { cvCopy(buf_frame_3,buf_frame_3_copy); double pi=acos(-1.0); for(int i=0;i<kp1->total;i++) { surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i); if(surf_pt->hessian<min_hessian) continue; int pt_x,pt_y; pt_x=(int)(surf_pt->pt.x); pt_y=(int)(surf_pt->pt.y); int sz=surf_pt->size; int rad_angle=(surf_pt->dir*pi)/180; cvCircle(buf_frame_3_copy,cvPoint(pt_x,pt_y),1/*sz*/,CV_RGB(0,255,0)); cvLine(buf_frame_3_copy,cvPoint(pt_x,pt_y),cvPoint(pt_x+sz*cosl(rad_angle),pt_y-sz*sinl(rad_angle)),CV_RGB(0,0,255)); } cvShowImage("SURF from Cam",buf_frame_3_copy); } char c=cvWaitKey(33); if(c==27) { break; } } cvReleaseImage(&buf_frame_3_copy); } cvShowImage("SURF from Cam",buf_frame_3); char ch=cvWaitKey(33); if(ch==27) break; if(ch==32) surf_flag=true; } if(gray_copy!=0) cvReleaseImage(&gray_copy); cvReleaseCapture(&capture_cam_3); cvDestroyWindow("SURF from Cam"); }//ending SURF_example CvFont my_font=cvFont(1,1); cvInitFont(&my_font,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0); cvNamedWindow("twoSnapshots",CV_WINDOW_KEEPRATIO); cvCreateTrackbar("Select LLine","twoSnapshots",0,1000,onTrackbarSlideSelectLine); CvCapture *capture_4 = 0; IplImage* left_img=0; IplImage* right_img=0; IplImage* cur_frame_buf=0; IplImage* gray_img_left=0; IplImage* gray_img_right=0; IplImage* merged_images=0; IplImage* merged_images_copy=0; CvMat *fundamentalMatrix = 0; vector<KeyPoint> key_points_left; Mat descriptors_left; vector<KeyPoint> key_points_right; Mat descriptors_right; //CvMemStorage *mem_stor=cvCreateMemStorage(0);*/ float min_hessian_value=1001.0f; double startValueOfFocus = 350; char* left_image_file_path = "camera_picture_left.png"; char* right_image_file_path = "camera_picture_right.png"; Array left_points, right_points; left_points.init(1,1); right_points.init(1,1); Array forReconstructionLeftPoints, forReconstructionRightPoints; forReconstructionLeftPoints.init(1,1); forReconstructionRightPoints.init(1,1); while(true) { char ch=cvWaitKey(33); if(ch==27) break; // open left and right images if(ch == 'o' || ch == 'O') { openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img ); MergeTwoImages(left_img,right_img,merged_images); } // save both left and right images from camera if(ch == 's' || ch == 'S') { if( left_img != 0 ) cvSaveImage(left_image_file_path, left_img); if( right_img != 0) cvSaveImage(right_image_file_path, right_img); } if(ch=='l'||ch=='L') { if(capture_4 == 0) { capture_4=cvCreateCameraCapture(0); } cur_frame_buf=cvQueryFrame(capture_4); if(left_img==0) left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3); cvCopy(cur_frame_buf,left_img); if(right_img == 0) { right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3); cvCopy(cur_frame_buf,right_img); } MergeTwoImages(left_img,right_img,merged_images); } if(ch=='r'||ch=='R') { if(capture_4 == 0) { capture_4=cvCreateCameraCapture(0); } cur_frame_buf=cvQueryFrame(capture_4); if(right_img==0) right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3); cvCopy(cur_frame_buf,right_img); if(left_img == 0) { left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3); cvCopy(cur_frame_buf,left_img); } MergeTwoImages(left_img,right_img,merged_images); } if(ch=='b'||ch=='B') { if(capture_4 == 0) { capture_4=cvCreateCameraCapture(0); } cur_frame_buf=cvQueryFrame(capture_4); cvCopy(cur_frame_buf,left_img); cvCopy(cur_frame_buf,right_img); } if(ch=='q'||ch=='Q' && left_img!=0) { //proceed left extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left); } if(ch=='w'||ch=='W' && right_img!=0) { //proceed right extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right); } if(ch=='m'||ch=='M' && left_img!=0 && right_img!=0) { //merge two images in to bigger one MergeTwoImages(left_img,right_img,merged_images); } if(ch=='c'||ch=='C' && merged_images!=0) { //comparison of two images if(fundamentalMatrix != 0) { cvReleaseMat(& fundamentalMatrix); fundamentalMatrix = 0; } left_to_right_corresponding_points.clear(); right_to_left_corresponding_points.clear(); GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points); } if(ch == 'E' || ch == 'e') { //drawing lines for corresponding points KeyPoint *leftPoint,*rightPoint,*leftPoint2,*rightPoint2; int width_part=merged_images->width>>1; /*for(int iL=0;iL<left_to_right_corresponding_points.size();iL++) { leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left,left_to_right_corresponding_points[iL].first); rightPoint=(CvSURFPoint*)cvGetSeqElem(key_points_right,left_to_right_corresponding_points[iL].second); cvLine(merged_images,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(255,0,0)); }*/ int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size(); bool* acceptedLeftToRightCorrespondings = 0; getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points, key_points_left, key_points_right, fundamentalMatrix, acceptedLeftToRightCorrespondings, sizeOfAccepptedLeftToRightCorrespondings); while(true) { merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3); cvCopy(merged_images,merged_images_copy); int iL=selectedLeftLine; int iR=iL; if(iL>=left_to_right_corresponding_points.size()) iL=left_to_right_corresponding_points.size()-1; if(iR>=right_to_left_corresponding_points.size()) iR=right_to_left_corresponding_points.size()-1; char str[100]={0}; if(iL >= 0 ) { bool isLeftToRightLineIsAccepted = acceptedLeftToRightCorrespondings[iL]; // difference value sprintf(str,"%f",left_to_right_corresponding_points[iL].comparer_value); cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-40),&my_font,CV_RGB(0,255,0)); // count of Matches sprintf(str,"%d",left_to_right_corresponding_points[iL].counterOfMatches); cvPutText(merged_images_copy,str,cvPoint(200,merged_images_copy->height-40),&my_font,CV_RGB(255,255,0)); // median of compared values sprintf(str,"%lf",left_to_right_corresponding_points[iL].medianOfComparedMatches); cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0)); // Variance of compared values sprintf(str,"V=%lf",left_to_right_corresponding_points[iL].Variance()); cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0)); // Standard deviation of compared values sprintf(str,"SD=%lf",sqrt( left_to_right_corresponding_points[iL].Variance() )); cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0)); double SD = sqrt( left_to_right_corresponding_points[iL].Variance() ) ; double median = left_to_right_corresponding_points[iL].medianOfComparedMatches; double compValue = left_to_right_corresponding_points[iL].comparer_value; double mark_1_5 = median - 1.5 * SD - compValue; // Mark 1.5 sprintf(str,"m1.5=%lf", mark_1_5); cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-120),&my_font,CV_RGB(0,255,0)); sprintf(str,"angle=%lf", left_to_right_corresponding_points[iL].degreesBetweenDeltaVector); cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-150),&my_font,CV_RGB(0,255,0)); leftPoint= &(key_points_left[ left_to_right_corresponding_points[iL].comp_pair.first ]); rightPoint=&(key_points_right[ left_to_right_corresponding_points[iL].comp_pair.second ]); cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(0,255,0)); drawEpipolarLinesOnLeftAndRightImages(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), cvPoint(rightPoint->pt.x,rightPoint->pt.y), fundamentalMatrix); CvScalar color = CV_RGB(255, 0, 0); if(isLeftToRightLineIsAccepted) { color = CV_RGB(0,255,0); } cvCircle(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), 5, color); cvCircle(merged_images_copy, cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y), 5, color); } //cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x,rightPoint->pt.y),CV_RGB(255,0,255)); if(iR >= 0 ) { sprintf(str,"%f",right_to_left_corresponding_points[iR].comparer_value); cvPutText(merged_images_copy,str,cvPoint(width_part,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0)); rightPoint2= &(key_points_right [right_to_left_corresponding_points[iR].comp_pair.first]); leftPoint2= &(key_points_left [right_to_left_corresponding_points[iR].comp_pair.second]); cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,0)); } //cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x+width_part,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,255)); cvShowImage("twoSnapshots",merged_images_copy); cvReleaseImage(&merged_images_copy); char ch2=cvWaitKey(33); if(ch2==27) break; if(ch2=='z' && selectedLeftLine>0) { selectedLeftLine--; } if(ch2=='x' && selectedLeftLine<1000) { selectedLeftLine++; } if( ch2 == 'a' || ch2 == 'A') { acceptedLeftToRightCorrespondings[selectedLeftLine] = true; } if( ch2 == 'd' || ch2 == 'D') { acceptedLeftToRightCorrespondings[selectedLeftLine] = false; } }//end of while(true) SaveAcceptedCorresspondings( left_to_right_corresponding_points, right_to_left_corresponding_points, key_points_left, key_points_right, acceptedLeftToRightCorrespondings, sizeOfAccepptedLeftToRightCorrespondings ); ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points, right_to_left_corresponding_points, key_points_left, key_points_right, acceptedLeftToRightCorrespondings, sizeOfAccepptedLeftToRightCorrespondings, left_points, right_points ); delete[] acceptedLeftToRightCorrespondings; } if( ch == 'T' || ch == 't') { clock_t startTime = clock(); openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img ); // proceed left extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left); //proceed right extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right); //comparison of two images if(fundamentalMatrix != 0) { cvReleaseMat(& fundamentalMatrix); fundamentalMatrix = 0; } left_to_right_corresponding_points.clear(); right_to_left_corresponding_points.clear(); GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points); // searching fundamental matrix and corresponding points findFundamentalMatrixAndCorrespondingPointsForReconstruction( left_to_right_corresponding_points, right_to_left_corresponding_points, fundamentalMatrix, key_points_left, key_points_right, descriptors_left, descriptors_right, left_img, right_img, gray_img_left, gray_img_right, forReconstructionLeftPoints, forReconstructionRightPoints, min_hessian_value, 450); // selecting points for finding model parameters int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size(); bool* acceptedLeftToRightCorrespondings = 0; getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points, key_points_left, key_points_right, fundamentalMatrix, acceptedLeftToRightCorrespondings, sizeOfAccepptedLeftToRightCorrespondings); ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points, right_to_left_corresponding_points, key_points_left, key_points_right, acceptedLeftToRightCorrespondings, sizeOfAccepptedLeftToRightCorrespondings, left_points, right_points ); delete[] acceptedLeftToRightCorrespondings; // start process of determination parameters of model and reconstruction of scene cv::Mat mat_left_img(left_img, true); cv::Mat mat_right_img(right_img, true); mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, mat_left_img, mat_right_img, forReconstructionLeftPoints, forReconstructionRightPoints); mat_left_img.release(); mat_right_img.release(); cout << "Code execution time: "<< double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << endl; } if( ch == 'I' || ch == 'i') { //-- Step 3: Matching descriptor vectors using FLANN matcher FlannBasedMatcher matcher; std::vector< DMatch > matches; matcher.match( descriptors_left, descriptors_right, matches ); //double max_dist = 0; double min_dist = 100; ////-- Quick calculation of max and min distances between keypoints //for( int i = 0; i < descriptors_left.rows; i++ ) //{ double dist = matches[i].distance; // if( dist < min_dist ) min_dist = dist; // if( dist > max_dist ) max_dist = dist; //} //printf("-- Max dist : %f \n", max_dist ); //printf("-- Min dist : %f \n", min_dist ); //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist, //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very //-- small) //-- PS.- radiusMatch can also be used here. //std::vector< DMatch > good_matches; left_to_right_corresponding_points.clear(); right_to_left_corresponding_points.clear(); for( int i = 0; i < descriptors_left.rows; i++ ) { //if( matches[i].distance <= max(2*min_dist, 0.02) ) { //good_matches.push_back( matches[i]); left_to_right_corresponding_points.push_back( ComparedIndexes(matches[i].distance, pair<int, int> (i, matches[i].trainIdx)) ); } } cout<< "Count of good matches :" << left_to_right_corresponding_points.size() << endl; stable_sort(left_to_right_corresponding_points.begin(),left_to_right_corresponding_points.end(),my_comparator_for_stable_sort); } //if( ch == 'K' || ch == 'k') //{ // CvSURFPoint *leftPoint; // //proceed left // gray_img_left=cvCreateImage(cvSize((left_img->width),(left_img->height)),IPL_DEPTH_8U,1); // cvCvtColor(left_img,gray_img_left,CV_RGB2GRAY); // cvExtractSURF(gray_img_left,NULL,&key_points_left,&descriptors_left,mem_stor,cvSURFParams(min_hessian_value,0)); // cv::Mat mat_gray_leftImage(gray_img_left, true); // cvReleaseImage(&gray_img_left); // // proceed right // gray_img_right=cvCreateImage(cvSize((right_img->width),(right_img->height)),IPL_DEPTH_8U,1); // cvCvtColor(right_img,gray_img_right,CV_RGB2GRAY); // cv::Mat mat_gray_rightImage(gray_img_right, true); // cvReleaseImage(&gray_img_right); // vector<Point2f> LK_left_points; // vector<Point2f> LK_right_points; // LK_right_points.resize(key_points_left->total); // for( int i = 0; i < key_points_left->total; i++) // { // leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left, i); // LK_left_points.push_back(Point2f( leftPoint->pt.x, leftPoint->pt.y)); // } // // vector<uchar> status; // vector<float> err; // cv::calcOpticalFlowPyrLK( // mat_gray_leftImage, // mat_gray_rightImage, // LK_left_points, // LK_right_points, // status, // err); // int width_part=merged_images->width>>1; // // float minErr = err[0]; // for(int k = 0; k < err.size(); k++) // { // if(status[k] && err[k] < minErr) // { // minErr = err[k]; // } // } // cout<< "Lucass Kanade min error: " << minErr<< endl; // int i = 0; // merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3); // cvCopy(merged_images,merged_images_copy); // for(; i < LK_left_points.size(); ++i) // { // if(err[i] < 5 * minErr && status[i]) // { // cvLine(merged_images_copy,cvPoint(LK_left_points[i].x,LK_left_points[i].y),cvPoint(LK_right_points[i].x+width_part,LK_right_points[i].y), // CV_RGB(100 + (( i *3) % 155), 100+ ((i*7)%155), 100+ ((i*13)%155))); // } // } // cvShowImage("twoSnapshots",merged_images_copy); // // while(true) // { // char ch2=cvWaitKey(33); // if(ch2==27) // break; // // } // // cvReleaseImage(&merged_images_copy); // status.clear(); // err.clear(); // LK_left_points.clear(); // LK_right_points.clear(); // mat_gray_leftImage.release(); // mat_gray_rightImage.release(); //} if( ch == 'F' || ch == 'f') { findFundamentalMatrixAndCorrespondingPointsForReconstruction( left_to_right_corresponding_points, right_to_left_corresponding_points, fundamentalMatrix, key_points_left, key_points_right, descriptors_left, descriptors_right, left_img, right_img, gray_img_left, gray_img_right, forReconstructionLeftPoints, forReconstructionRightPoints, min_hessian_value); } if( ch == 'P' || ch == 'p') { cv::Mat mat_left_img(left_img, true); cv::Mat mat_right_img(right_img, true); mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, mat_left_img, mat_right_img, forReconstructionLeftPoints, forReconstructionRightPoints); mat_left_img.release(); mat_right_img.release(); } if(merged_images!=0) { cvShowImage("twoSnapshots",merged_images); } }
int main(int argc, char *argv[]) { if (argc != 6) { printf("\nERROR: too few parameters\n"); help(); return -1; } help(); //INPUT PARAMETERS: int board_w = atoi(argv[1]); int board_h = atoi(argv[2]); int board_n = board_w * board_h; CvSize board_sz = cvSize(board_w, board_h); CvMat *intrinsic = (CvMat *) cvLoad(argv[3]); CvMat *distortion = (CvMat *) cvLoad(argv[4]); IplImage *image = 0, *gray_image = 0; if ((image = cvLoadImage(argv[5])) == 0) { printf("Error: Couldn't load %s\n", argv[5]); return -1; } gray_image = cvCreateImage(cvGetSize(image), 8, 1); cvCvtColor(image, gray_image, CV_BGR2GRAY); //UNDISTORT OUR IMAGE IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); cvInitUndistortMap(intrinsic, distortion, mapx, mapy); IplImage *t = cvCloneImage(image); cvRemap(t, image, mapx, mapy); //GET THE CHECKERBOARD ON THE PLANE cvNamedWindow("Checkers"); CvPoint2D32f *corners = new CvPoint2D32f[board_n]; int corner_count = 0; int found = cvFindChessboardCorners(image, board_sz, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS); if (!found) { printf ("Couldn't aquire checkerboard on %s, only found %d of %d corners\n", argv[5], corner_count, board_n); return -1; } //Get Subpixel accuracy on those corners cvFindCornerSubPix(gray_image, corners, corner_count, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); //GET THE IMAGE AND OBJECT POINTS: //Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1) //That means corners are at: corners[r*board_w + c] CvPoint2D32f objPts[4], imgPts[4]; objPts[0].x = 0; objPts[0].y = 0; objPts[1].x = board_w - 1; objPts[1].y = 0; objPts[2].x = 0; objPts[2].y = board_h - 1; objPts[3].x = board_w - 1; objPts[3].y = board_h - 1; imgPts[0] = corners[0]; imgPts[1] = corners[board_w - 1]; imgPts[2] = corners[(board_h - 1) * board_w]; imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1]; //DRAW THE POINTS in order: B,G,R,YELLOW cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3); cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3); cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3); cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3); //DRAW THE FOUND CHECKERBOARD cvDrawChessboardCorners(image, board_sz, corners, corner_count, found); cvShowImage("Checkers", image); //FIND THE HOMOGRAPHY CvMat *H = cvCreateMat(3, 3, CV_32F); CvMat *H_invt = cvCreateMat(3, 3, CV_32F); cvGetPerspectiveTransform(objPts, imgPts, H); //LET THE USER ADJUST THE Z HEIGHT OF THE VIEW float Z = 25; int key = 0; IplImage *birds_image = cvCloneImage(image); cvNamedWindow("Birds_Eye"); while (key != 27) { //escape key stops CV_MAT_ELEM(*H, float, 2, 2) = Z; // cvInvert(H,H_invt); //If you want to invert the homography directly // cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS ); //USE HOMOGRAPHY TO REMAP THE VIEW cvWarpPerspective(image, birds_image, H, CV_INTER_LINEAR + CV_WARP_INVERSE_MAP + CV_WARP_FILL_OUTLIERS); cvShowImage("Birds_Eye", birds_image); key = cvWaitKey(); if (key == 'u') Z += 0.5; if (key == 'd') Z -= 0.5; } //SHOW ROTATION AND TRANSLATION VECTORS CvMat *image_points = cvCreateMat(4, 1, CV_32FC2); CvMat *object_points = cvCreateMat(4, 1, CV_32FC3); for (int i = 0; i < 4; ++i) { CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i]; CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0); } CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F); CvMat *Rot = cvCreateMat(3, 3, CV_32F); CvMat *Trans = cvCreateMat(3, 1, CV_32F); cvFindExtrinsicCameraParams2(object_points, image_points, intrinsic, distortion, RotRodrigues, Trans); cvRodrigues2(RotRodrigues, Rot); //SAVE AND EXIT cvSave("Rot.xml", Rot); cvSave("Trans.xml", Trans); cvSave("H.xml", H); cvInvert(H, H_invt); cvSave("H_invt.xml", H_invt); //Bottom row of H invert is horizon line return 0; }
int main(int argc, char* argv[]) { int height,width,step,channels; //parameters of the image we are working on int i,j,k,t1min=0,t1max=0,t2min=0,t2max=0,t3min=0,t3max=0; // other variables used CvMat* threshold_matrix = cvCreateMat(2,3,CV_32FC1); CvFileStorage* temp = cvOpenFileStorage("threshold_matrix.xml",NULL,CV_STORAGE_READ); // Load the previous values of the threshold if they exist if(temp) { threshold_matrix = (CvMat*)cvLoad("threshold_matrix.xml"); t1min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,0) ;t2min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,1) ;t3min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,2); t1max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,0) ;t2max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,1) ;t3max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,2) ; } // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc. CvCapture* capture = cvCaptureFromCAM( 1 ); if( !capture ) { fprintf( stderr, "ERROR: capture is NULL \n" ); getchar(); return -1; } // grab an image from the capture IplImage* frame = cvQueryFrame( capture ); // Create a window in which the captured images will be presented cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F1", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F2", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F3", CV_WINDOW_AUTOSIZE ); //cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE ); /// Create Trackbars char TrackbarName1[50]="t1min"; char TrackbarName2[50]="t1max"; char TrackbarName3[50]="t2min"; char TrackbarName4[50]="t2max"; char TrackbarName5[50]="t3min"; char TrackbarName6[50]="t3max"; cvCreateTrackbar( TrackbarName1, "F1", &t1min, 260 , NULL ); cvCreateTrackbar( TrackbarName2, "F1", &t1max, 260, NULL ); cvCreateTrackbar( TrackbarName3, "F2", &t2min, 260 , NULL ); cvCreateTrackbar( TrackbarName4, "F2", &t2max, 260, NULL ); cvCreateTrackbar( TrackbarName5, "F3", &t3min, 260 , NULL ); cvCreateTrackbar( TrackbarName6, "F3", &t3max, 260, NULL ); // Load threshold from the slider bars in these 2 parameters CvScalar hsv_min = cvScalar(t1min, t2min, t3min, 0); CvScalar hsv_max = cvScalar(t1max, t2max ,t3max, 0); // get the image data height = frame->height; width = frame->width; step = frame->widthStep; // capture size - CvSize size = cvSize(width,height); // Initialize different images that are going to be used in the program IplImage* hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3); // image converted to HSV plane IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U, 1); // final thresholded image IplImage* thresholded1 = cvCreateImage(size, IPL_DEPTH_8U, 1); // Component image threshold IplImage* thresholded2 = cvCreateImage(size, IPL_DEPTH_8U, 1); IplImage* thresholded3 = cvCreateImage(size, IPL_DEPTH_8U, 1); IplImage* filtered = cvCreateImage(size, IPL_DEPTH_8U, 1); //smoothed image while( 1 ) { // Load threshold from the slider bars in these 2 parameters hsv_min = cvScalar(t1min, t2min, t3min, 0); hsv_max = cvScalar(t1max, t2max ,t3max, 0); // Get one frame frame = cvQueryFrame( capture ); if( !frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } // Covert color space to HSV as it is much easier to filter colors in the HSV color-space. cvCvtColor(frame, hsv_frame, CV_BGR2HSV); // Filter out colors which are out of range. cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded); // the below lines of code is for visual purpose only remove after calibration //--------------FROM HERE----------------------------------- //Split image into its 3 one dimensional images cvSplit( hsv_frame,thresholded1, thresholded2, thresholded3, NULL ); // Filter out colors which are out of range. cvInRangeS(thresholded1,cvScalar(t1min,0,0,0) ,cvScalar(t1max,0,0,0) ,thresholded1); cvInRangeS(thresholded2,cvScalar(t2min,0,0,0) ,cvScalar(t2max,0,0,0) ,thresholded2); cvInRangeS(thresholded3,cvScalar(t3min,0,0,0) ,cvScalar(t3max,0,0,0) ,thresholded3); //-------------REMOVE OR COMMENT AFTER CALIBRATION TILL HERE ------------------ // Memory for hough circles CvMemStorage* storage = cvCreateMemStorage(0); // hough detector works better with some smoothing of the image cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 ); //hough transform to detect circle CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2, thresholded->height/4, 100, 50, 10, 400); for (int i = 0; i < circles->total; i++) { //get the parameters of circles detected float* p = (float*)cvGetSeqElem( circles, i ); printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] ); // draw a circle with the centre and the radius obtained from the hough transform cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])), //plot centre 2, CV_RGB(255,255,255),-1, 8, 0 ); cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])), //plot circle cvRound(p[2]), CV_RGB(0,255,0), 2, 8, 0 ); } /* for testing purpose you can show all the images but when done with calibration only show frame to keep the screen clean */ cvShowImage( "Camera", frame ); // Original stream with detected ball overlay cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering cvShowImage( "F1", thresholded1 ); // individual filters cvShowImage( "F2", thresholded2 ); cvShowImage( "F3", thresholded3 ); //cvShowImage( "filtered", thresholded ); cvReleaseMemStorage(&storage); //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if( (cvWaitKey(10) & 255) == 27 ) break; } //Save the threshold values before exiting *((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 0 ) ) = t1min ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 1 ) ) = t2min ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 2 ) ) = t3min ; *((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 0 ) ) = t1max ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 1 ) ) = t2max ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 2 ) ) = t3max ; cvSave("threshold_matrix.xml",threshold_matrix); // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyWindow( "mywindow" ); return 0; }
/*! \fn CvBinGabAdaFeatureSelect::saveweights(const char* filename) */ void CvBinGabAdaFeatureSelect::saveweights(const char* filename) { cvSave( filename, weights, NULL, NULL, cvAttrList()); }
void PerformanceMatrix::saveDatabase(const char * fileName){ cvSave(fileName, pm); }
int main(int argc, char* argv[]) { int i, j; CvMemStorage* storage = cvCreateMemStorage(0); IplImage* img = cvCreateImage( cvSize(w,w), 8, 1 ); IplImage* img32f = cvCreateImage( cvSize(w,w), IPL_DEPTH_32F, 1 ); IplImage* img32s = cvCreateImage( cvSize(w,w), IPL_DEPTH_32S, 1 ); IplImage* img3 = cvCreateImage( cvSize(w,w), 8, 3 ); (void)argc; (void)argv; help(); cvZero( img ); for( i=0; i < 6; i++ ) { int dx = (i%2)*250 - 30; int dy = (i/2)*150; CvScalar white = cvRealScalar(255); CvScalar black = cvRealScalar(0); if( i == 0 ) { for( j = 0; j <= 10; j++ ) { double angle = (j+5)*CV_PI/21; cvLine(img, cvPoint(cvRound(dx+100+j*10-80*cos(angle)), cvRound(dy+100-90*sin(angle))), cvPoint(cvRound(dx+100+j*10-30*cos(angle)), cvRound(dy+100-30*sin(angle))), white, 3, 8, 0); } } cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(100,70), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(30,20), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(15,15), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+115, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+185, dy+70), cvSize(5,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+100), cvSize(10,5), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+150, dy+150), cvSize(40,10), 0, 0, 360, black, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+27, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); cvEllipse( img, cvPoint(dx+273, dy+100), cvSize(20,35), 0, 0, 360, white, -1, 8, 0 ); } cvNamedWindow( "image", 1 ); cvShowImage( "image", img ); cvConvert( img, img32f ); findCComp( img32f ); cvConvert( img32f, img32s ); cvFindContours( img32s, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //cvFindContours( img, storage, &contours, sizeof(CvContour), // CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); { const char* attrs[] = {"recursive", "1", 0}; cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0)); contours = (CvSeq*)cvLoad("contours.xml", storage, 0, 0); } // comment this out if you do not want approximation contours = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 ); cvNamedWindow( "contours", 1 ); cvCreateTrackbar( "levels+3", "contours", &levels, 7, on_trackbar ); { CvRNG rng = cvRNG(-1); CvSeq* tcontours = contours; cvCvtColor( img, img3, CV_GRAY2BGR ); while( tcontours->h_next ) tcontours = tcontours->h_next; for( ; tcontours != 0; tcontours = tcontours->h_prev ) { CvScalar color; color.val[0] = cvRandInt(&rng) % 256; color.val[1] = cvRandInt(&rng) % 256; color.val[2] = cvRandInt(&rng) % 256; color.val[3] = cvRandInt(&rng) % 256; cvDrawContours(img3, tcontours, color, color, 0, -1, 8, cvPoint(0,0)); if( tcontours->v_next ) { color.val[0] = cvRandInt(&rng) % 256; color.val[1] = cvRandInt(&rng) % 256; color.val[2] = cvRandInt(&rng) % 256; color.val[3] = cvRandInt(&rng) % 256; cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0)); } } } cvShowImage( "colored", img3 ); on_trackbar(0); cvWaitKey(0); cvReleaseMemStorage( &storage ); cvReleaseImage( &img ); cvReleaseImage( &img32f ); cvReleaseImage( &img32s ); cvReleaseImage( &img3 ); return 0; }