int main( int argc, char **argv ){ int key; // キー入力用の変数 CvCapture *capture = 0; // カメラキャプチャ用の構造体 IplImage *frameImage; // キャプチャ画像用IplImage IplImage *frameImage2; // キャプチャ画像用IplImage2 // 画像を生成する IplImage *backgroundImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //背景画像用IplImage IplImage *grayImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //グレースケール画像用IplImage IplImage *differenceImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //差分画像用IplImage IplImage *hsvImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 ); //HSV画像用IplImage IplImage *hueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //色相(H)情報用IplImage IplImage *saturationImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //彩度(S)情報用IplImage IplImage *valueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //明度(V)情報用IplImage IplImage *thresholdImage1 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //明度がTHRES_BOTTOMより大きい領域用IplImage // IplImage *thresholdImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //明度がTHRES_TOP以下の領域用IplImage // IplImage *thresholdImage3 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //thresholdImage1とthresholdImage2のAND演算結果用IplImage IplImage *lightImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 ); //光っている部分の領域の抽出結果用IplImage char windowNameCapture[] = "Capture"; //キャプチャした画像を表示するウィンドウの名前 char windowNameLight[] = "Light"; //光っている部分の領域を表示するウィンドウの名前 char windowNameCapture2[] = "Capture2"; //キャプチャした画像を表示するウィンドウの名前 char windowNameThreshold[] = "Threshold"; //thresholdImage1を表示するウィンドウの名前 CvMoments moment; double m_00; double m_10; double m_01; int gravityX; int gravityY; unsigned char h,s,v; int x, y; int m,d,ss; uchar h0,s0,v0,h1,s1,v1,h2,s2,v2,h3,s3,v3,vv; int rr,gg,bb; // カメラを初期化する if ( ( capture = cvCreateCameraCapture( 0 ) ) == NULL ) { // カメラが見つからなかった場合 printf( "カメラが見つかりません\n" ); return -1; } cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, WIDTH); cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, HEIGHT); // ウィンドウを生成する cvNamedWindow( windowNameCapture, CV_WINDOW_AUTOSIZE ); cvNamedWindow( windowNameLight, CV_WINDOW_AUTOSIZE ); cvNamedWindow( windowNameCapture2, CV_WINDOW_AUTOSIZE ); cvNamedWindow( windowNameThreshold, CV_WINDOW_AUTOSIZE ); // 初期背景を設定するためにカメラから画像を取得 frameImage = cvQueryFrame( capture ); // frameImageをグレースケール化し、背景画像とする cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY ); frameImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 ); cvSet (frameImage2, CV_RGB(0,0,0)); //黒色で塗りつぶす rr=0; gg=150; bb=0; v=0; m=0; d=0; ss=0; // メインループ while( 1 ) { frameImage = cvQueryFrame( capture ); cvCvtColor( frameImage, hsvImage, CV_BGR2HSV ); /* 画素値を直接操作する*/ x = 160; y = 120; h0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3]; // B s0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1]; // G v0 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2]; // R frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200; x = 161; y = 120; h1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3]; // B s1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1]; // G v1 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2]; // R frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200; x = 160; y = 121; h2 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3]; // B s2= hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1]; // G v2 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2]; // R frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200; x = 161; y = 121; h3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3]; // B s3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 1]; // G v3 = hsvImage ->imageData[hsvImage ->widthStep * y + x * 3 + 2]; // R frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200; frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200; vv=(v0+v1+v2+v3)/4; if (vv<200) { if(m==0) ss=1; if(ss) m=m+1; printf("0 m= %d,d=%02X \n",m,d); } else { // printf("%d \n",m); if(ss){ d=d+(1<<(m-1)); m=m+1; } printf("1 m= %d,d=%02X \n",m,d); } if(m>8){ printf("コード d= %c \n",d); if(d==97){ rr=0; gg=0; bb=150; } if(d==98){ rr=150; gg=0; bb=0; } m=0; d=0; ss=0; } // captureの入力画像フレームをframeImageに格納する // frameImage = cvQueryFrame( capture ); // frameImageをグレースケール化したものを、grayImageに格納する cvCvtColor( frameImage, grayImage, CV_BGR2GRAY ); // grayImageと背景画像との差分をとる cvAbsDiff( grayImage, backgroundImage, differenceImage ); // frameImageをBGRからHSVに変換する // cvCvtColor( frameImage, hsvImage, CV_BGR2HSV ); // HSV画像をH、S、V画像に分ける cvSplit( hsvImage, hueImage, saturationImage, valueImage, NULL ); // 明度が明るい部分を抽出、その部分のみ出力する cvThreshold( valueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY ); // cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV ); // cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, NULL ); // 背景差分画像と明るい領域とのANDをとる cvAnd( differenceImage, thresholdImage1, lightImage, NULL ); // 光っている領域の重心を算出する cvMoments( lightImage, &moment, 0 ); m_00 = cvGetSpatialMoment( &moment, 0, 0 ); m_10 = cvGetSpatialMoment( &moment, 1, 0 ); m_01 = cvGetSpatialMoment( &moment, 0, 1 ); gravityX = m_10 / m_00; gravityY = m_01 / m_00; if (0<gravityX){ h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ]; // B s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1]; // G v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2]; // R // printf ("x= %d ,y= %d v= %d,s= %d,h= %d \n" ,gravityX,gravityY,v,s,h); // 画像上に円を描画する if (v>200){ cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 ); } } // 画像を表示する cvShowImage( windowNameCapture, frameImage ); cvShowImage( windowNameLight, lightImage ); cvShowImage( windowNameCapture2, frameImage2); cvShowImage( windowNameThreshold, thresholdImage1); // キー入力判定 key = cvWaitKey( 10 ); if( key == 'q' ) // 'q'キーが押されたらループを抜ける break; else if( key == 'b' ) { // 'b'キーが押されたら、その時点での画像を背景画像とする frameImage = cvQueryFrame( capture ); cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY ); } else if(key == 'c') { // 'c'キーが押されたら画像を保存 cvSaveImage( "image/frame.bmp", frameImage ); cvSaveImage( "image/light.bmp", lightImage ); } } // キャプチャを解放する cvReleaseCapture( &capture ); // メモリを解放する cvReleaseImage( &backgroundImage ); cvReleaseImage( &grayImage ); cvReleaseImage( &differenceImage ); cvReleaseImage( &hsvImage ); cvReleaseImage( &hueImage ); cvReleaseImage( &saturationImage ); cvReleaseImage( &valueImage ); cvReleaseImage( &thresholdImage1 ); // cvReleaseImage( &thresholdImage2 ); // cvReleaseImage( &thresholdImage3 ); cvReleaseImage( &lightImage ); // ウィンドウを破棄する cvDestroyWindow( windowNameCapture ); cvDestroyWindow( windowNameLight ); cvDestroyWindow( windowNameThreshold ); cvDestroyWindow( windowNameCapture2 ); return 0; }
int main(int argc, char ** argv) { IplImage* image = cvLoadImage("1.png"); IplImage* gray = cvCreateImage(cvGetSize(image), 8, 1); IplImage* bin = cvCreateImage(cvGetSize(image), 8, 1); IplImage* contourBeatle = cvCreateImage(cvGetSize(image), 8, 1); IplImage* contourScan = cvCreateImage(cvGetSize(image), 8, 1); IplImage* contourLane = cvCreateImage(cvGetSize(image), 8, 1); IplImage* rgbBeatle = cvCloneImage(image); IplImage* rgbScan = cvCloneImage(image); IplImage* rgbLane = cvCloneImage(image); cvCvtColor(image, gray, CV_BGR2GRAY); cvThreshold(gray, bin, 0, 255, CV_THRESH_OTSU); Beatle(bin, contourBeatle); Scan(bin, contourScan); Lane(bin, contourLane); for (int r = 0; r < image->height; ++r) { for (int c = 0; c < image->width; ++c) { if (*cvPtr2D(contourBeatle, r, c) == 0) { *cvPtr2D(rgbBeatle, r, c) = 0; // B *(cvPtr2D(rgbBeatle, r, c) + 1) = 0; // G *(cvPtr2D(rgbBeatle, r, c) + 2) = 0; // R } if (*cvPtr2D(contourScan, r, c) == 0) { *cvPtr2D(rgbScan, r, c) = 0; // B *(cvPtr2D(rgbScan, r, c) + 1) = 0; // G *(cvPtr2D(rgbScan, r, c) + 2) = 0; // R } if (*cvPtr2D(contourLane, r, c) == 0) { *cvPtr2D(rgbLane, r, c) = 0; // B *(cvPtr2D(rgbLane, r, c) + 1) = 0; // G *(cvPtr2D(rgbLane, r, c) + 2) = 0; // R } } } cvShowImage("image", image); cvShowImage("gray", gray); cvShowImage("bin", bin); cvShowImage("contourBeatle", contourBeatle); cvShowImage("contourScan", contourScan); cvShowImage("contourLane", contourLane); cvShowImage("rgbBeatle", rgbBeatle); cvShowImage("rgbScan", rgbScan); cvShowImage("rgbLane", rgbLane); cvSaveImage("im_contourBeatle.bmp", contourBeatle); cvSaveImage("im_contourScan.bmp", contourScan); cvSaveImage("im_contourLane.bmp", contourLane); cvSaveImage("im_rgbBeatle.bmp", rgbBeatle); cvSaveImage("im_rgbScan.bmp", rgbScan); cvSaveImage("im_rgbLane.bmp", rgbLane); while (true) { int c = cvWaitKey(); if ((char)c == 27) break; } }
int main( int argc, char** argv ) { forceUSLocaleToKeepOurSanity(); CvSize board_size = {0,0}; float square_size = 1.f, aspect_ratio = 1.f; const char* out_filename = "out_camera_data.yml"; const char* input_filename = 0; int i, image_count = 10; int write_extrinsics = 0, write_points = 0; int flags = 0; CvCapture* capture = 0; FILE* f = 0; char imagename[1024]; CvMemStorage* storage; CvSeq* image_points_seq = 0; int elem_size, flip_vertical = 0; int delay = 1000; clock_t prev_timestamp = 0; CvPoint2D32f* image_points_buf = 0; CvFont font = cvFont( 1, 1 ); double _camera[9], _dist_coeffs[4]; CvMat camera = cvMat( 3, 3, CV_64F, _camera ); CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs ); CvMat *extr_params = 0, *reproj_errs = 0; double avg_reproj_err = 0; int mode = DETECTION; int undistort_image = 0; CvSize img_size = {0,0}; const char* live_capture_help = "When the live video from camera is used as input, the following hot-keys may be used:\n" " <ESC>, 'q' - quit the program\n" " 'g' - start capturing images\n" " 'u' - switch undistortion on/off\n"; if( argc < 2 ) { printf( "This is a camera calibration sample.\n" "Usage: calibration\n" " -w <board_width> # the number of inner corners per one of board dimension\n" " -h <board_height> # the number of inner corners per another board dimension\n" " [-n <number_of_frames>] # the number of frames to use for calibration\n" " # (if not specified, it will be set to the number\n" " # of board views actually available)\n" " [-d <delay>] # a minimum delay in ms between subsequent attempts to capture a next view\n" " # (used only for video capturing)\n" " [-s <square_size>] # square size in some user-defined units (1 by default)\n" " [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n" " [-op] # write detected feature points\n" " [-oe] # write extrinsic parameters\n" " [-zt] # assume zero tangential distortion\n" " [-a <aspect_ratio>] # fix aspect ratio (fx/fy)\n" " [-p] # fix the principal point at the center\n" " [-v] # flip the captured images around the horizontal axis\n" " [input_data] # input data, one of the following:\n" " # - text file with a list of the images of the board\n" " # - name of video file with a video of the board\n" " # if input_data not specified, a live view from the camera is used\n" "\n" ); printf( "%s", live_capture_help ); return 0; } for( i = 1; i < argc; i++ ) { const char* s = argv[i]; if( strcmp( s, "-w" ) == 0 ) { if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 ) return fprintf( stderr, "Invalid board width\n" ), -1; } else if( strcmp( s, "-h" ) == 0 ) { if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 ) return fprintf( stderr, "Invalid board height\n" ), -1; } else if( strcmp( s, "-s" ) == 0 ) { if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 ) return fprintf( stderr, "Invalid board square width\n" ), -1; } else if( strcmp( s, "-n" ) == 0 ) { if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 ) return printf("Invalid number of images\n" ), -1; } else if( strcmp( s, "-a" ) == 0 ) { if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 ) return printf("Invalid aspect ratio\n" ), -1; } else if( strcmp( s, "-d" ) == 0 ) { if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 ) return printf("Invalid delay\n" ), -1; } else if( strcmp( s, "-op" ) == 0 ) { write_points = 1; } else if( strcmp( s, "-oe" ) == 0 ) { write_extrinsics = 1; } else if( strcmp( s, "-zt" ) == 0 ) { flags |= CV_CALIB_ZERO_TANGENT_DIST; } else if( strcmp( s, "-p" ) == 0 ) { flags |= CV_CALIB_FIX_PRINCIPAL_POINT; } else if( strcmp( s, "-v" ) == 0 ) { flip_vertical = 1; } else if( strcmp( s, "-o" ) == 0 ) { out_filename = argv[++i]; } else if( s[0] != '-' ) input_filename = s; else return fprintf( stderr, "Unknown option %s", s ), -1; } if( input_filename ) { fprintf( stderr, "Trying to open %s \n" , input_filename ); capture = cvCreateFileCapture( input_filename ); if( !capture ) { fprintf(stderr,"Warning , cvCreateFileCapture failed to open %s \n",input_filename); f = fopen( input_filename, "rt" ); if( !f ) return fprintf( stderr, "The input file could not be opened\n" ), -1; image_count = -1; } mode = CAPTURING; } else capture = cvCreateCameraCapture(0); if( !capture && !f ) return fprintf( stderr, "Could not initialize video capture\n" ), -2; if( capture ) printf( "%s", live_capture_help ); elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]); storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 )); image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size ); image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage ); cvNamedWindow( "Image View", 1 ); for(;;) { IplImage *view = 0, *view_gray = 0; int count = 0, found, blink = 0; CvPoint text_origin; CvSize text_size = {0,0}; int base_line = 0; char s[100]; int key; if( f && fgets( imagename, sizeof(imagename)-2, f )) { int l = strlen(imagename); if( l > 0 && imagename[l-1] == '\n' ) imagename[--l] = '\0'; if( l > 0 ) { if( imagename[0] == '#' ) continue; view = cvLoadImage( imagename, 1 ); } } else if( capture ) { IplImage* view0 = cvQueryFrame( capture ); if( view0 ) { view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels ); if( view0->origin == IPL_ORIGIN_BL ) cvFlip( view0, view, 0 ); else cvCopy( view0, view ); } } if( !view ) { if( image_points_seq->total > 0 ) { image_count = image_points_seq->total; goto calibrate; } break; } if( flip_vertical ) cvFlip( view, view, 0 ); img_size = cvGetSize(view); found = cvFindChessboardCorners( view, board_size, image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH ); #if 1 // improve the found corners' coordinate accuracy view_gray = cvCreateImage( cvGetSize(view), 8, 1 ); cvCvtColor( view, view_gray, CV_BGR2GRAY ); cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11), cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); cvReleaseImage( &view_gray ); #endif if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) ) { cvSeqPush( image_points_seq, image_points_buf ); prev_timestamp = clock(); blink = !f; #if 1 if( capture ) { sprintf( imagename, "view%05d.png", image_points_seq->total - 1 ); cvSaveImage( imagename, view ); } #endif } cvDrawChessboardCorners( view, board_size, image_points_buf, count, found ); cvGetTextSize( "100/100", &font, &text_size, &base_line ); text_origin.x = view->width - text_size.width - 10; text_origin.y = view->height - base_line - 10; if( mode == CAPTURING ) { if( image_count > 0 ) sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count ); else sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 ); } else if( mode == CALIBRATED ) sprintf( s, "Calibrated" ); else sprintf( s, "Press 'g' to start" ); cvPutText( view, s, text_origin, &font, mode != CALIBRATED ? CV_RGB(255,0,0) : CV_RGB(0,255,0)); if( blink ) cvNot( view, view ); if( mode == CALIBRATED && undistort_image ) { IplImage* t = cvCloneImage( view ); cvUndistort2( t, view, &camera, &dist_coeffs ); cvReleaseImage( &t ); } cvShowImage( "Image View", view ); key = cvWaitKey(capture ? 50 : 500); if( key == 27 ) break; if( key == 'u' && mode == CALIBRATED ) undistort_image = !undistort_image; if( capture && key == 'g' ) { mode = CAPTURING; cvClearMemStorage( storage ); image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage ); } if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count ) { calibrate: cvReleaseMat( &extr_params ); cvReleaseMat( &reproj_errs ); int code = run_calibration( image_points_seq, img_size, board_size, square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params, &reproj_errs, &avg_reproj_err ); // save camera parameters in any case, to catch Inf's/NaN's save_camera_params( out_filename, image_count, img_size, board_size, square_size, aspect_ratio, flags, &camera, &dist_coeffs, write_extrinsics ? extr_params : 0, write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err ); if( code ) mode = CALIBRATED; else mode = DETECTION; } if( !view ) break; cvReleaseImage( &view ); } if( capture ) cvReleaseCapture( &capture ); if( storage ) cvReleaseMemStorage( &storage ); return 0; }
int calc_hsv_colors(IplImage *frame) { if(!frame) return -1; IplImage* image=0, *hsv=0, *dst=0, *dst2=0, *color_indexes=0, *dst3=0; image = cvCloneImage(frame); hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); cvCvtColor( image, hsv, CV_BGR2HSV ); // for store results dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); // store color indexes // для хранения RGB-х цветов CvScalar rgb_colors[NUM_COLOR_TYPES]; int i=0, j=0, x=0, y=0; // reset colors memset(colorCount, 0, sizeof(colorCount)); for(i=0; i<NUM_COLOR_TYPES; i++) { rgb_colors[i] = cvScalarAll(0); } for (y=0; y<hsv->height; y++) { for (x=0; x<hsv->width; x++) { // get HSV pixel uchar H = CV_PIXEL(uchar, hsv, x, y)[0]; // Hue uchar S = CV_PIXEL(uchar, hsv, x, y)[1]; // Saturation uchar V = CV_PIXEL(uchar, hsv, x, y)[2]; // Value (Brightness) // define pixel color type int ctype = getPixelColorType(H, S, V); // set values CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype]; // Hue CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype]; // Saturation CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype]; // Value // collect RGB rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R // сохраняем к какому типу относится цвет CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype; // подсчитываем :) colorCount[ctype]++; } } // усреднение RGB-составляющих for(i=0; i<NUM_COLOR_TYPES; i++) { rgb_colors[i].val[0] /= colorCount[i]; rgb_colors[i].val[1] /= colorCount[i]; rgb_colors[i].val[2] /= colorCount[i]; } // теперь загоним массив в вектор и отсортируем :) std::vector< std::pair< int, uint > > colors; colors.reserve(NUM_COLOR_TYPES); for(i=0; i<NUM_COLOR_TYPES; i++){ std::pair< int, uint > color; color.first = i; color.second = colorCount[i]; colors.push_back( color ); } // сортируем std::sort( colors.begin(), colors.end(), colors_sort ); // для отладки - выводим коды, названия цветов и их количество for(i=0; i<colors.size(); i++){ printf("[i] color %d (%s) - %d\n", colors[i].first, sCTypes[colors[i].first], colors[i].second ); } // выдаём код первых цветов printf("[i] color code: \n"); for(i=0; i<NUM_COLOR_TYPES; i++) printf("%02d ", colors[i].first); printf("\n"); printf("[i] color names: \n"); for(i=0; i<NUM_COLOR_TYPES; i++) printf("%s ", sCTypes[colors[i].first]); printf("\n"); #if 0 cvSaveImage("image.bmp", image); #endif cvReleaseImage(&image); cvReleaseImage(&hsv); cvReleaseImage(&dst); cvReleaseImage(&dst2); cvReleaseImage(&color_indexes); cvReleaseImage(&dst3); return 0; }
//响应时间事件 void CFaceProcess::OnTimer(UINT nIDEvent){ //AfxMessageBox("In OnTimer()"); CString temp=""; const char *pszStr=""; int match=0; switch(nIDEvent){ //定时器1的相应处理部分 case timer: if(m_Video){ if( !cvGrabFrame( m_Video)) return ; m_GrabFrame = cvRetrieveFrame(m_Video ); if( !m_GrabFrame) return ; if( !m_SaveFrame) m_SaveFrame = cvCreateImage( cvSize(m_GrabFrame->width,m_GrabFrame->height), IPL_DEPTH_8U, m_GrabFrame->nChannels ); if( m_GrabFrame->origin == IPL_ORIGIN_TL ) cvCopy( m_GrabFrame, m_SaveFrame, 0 ); else cvFlip( m_GrabFrame, m_SaveFrame, 0 ); // m_GrabFrame=cvQueryFrame(m_Video); // if(m_GrabFrame->origin==0) // m_GrabFrame->origin=1; /**************对获取的帧进行人脸检测处理并且显示*****************/ faceDetector.detect_and_draw(m_SaveFrame); //m_GrabFrame=faceDetector.getSrcImage(); m_CvvImage.CopyOf(m_SaveFrame,1); m_CvvImage.DrawToHDC(hDC,&rect); //cvReleaseImage(&m_SaveFrame); } // return; /************************************************************************/ /* 获得b_Process状态 */ /************************************************************************/ if(m_Video && b_flagProcess==true) b_Process=1; else if(m_Video && b_flagProcess==false) b_Process=2; else b_Process=0; /************************************************************************/ /* 根据b_Process状态动态设置控件有效性 */ /************************************************************************/ if(b_Process==1)//此时可以关摄像头、保存图片 { GetDlgItem(ID_CLOSE_CAPTURE)->EnableWindow(true);//使控件有效 GetDlgItem(IDC_SAVE_PIC)->EnableWindow(true); //使控件有效 } else if(b_Process==2)//此时可以人脸识别、匹配率显示,不能保存图片 { //更新状态 GetDlgItem(ID_CLOSE_CAPTURE)->EnableWindow(true); //使控件有效 GetDlgItem(IDC_RECOGNISE)->EnableWindow(true); //使控件有效 GetDlgItem(IDC_SAVE_PIC)->EnableWindow(false); //使控件wu效 /******此时可保存供人脸识别用的图片************************** // 每次保存五张 // 定时器而负责定时训练图片,并更新匹配率 // 保存图片不断覆盖以前的图片 ****************************/ if((count % 5)==0) count=1; pszStr = testFileName[count].GetBuffer(testFileName[count].GetLength()); m_snap=cvCreateImage(cvGetSize(m_SaveFrame),m_SaveFrame->depth,m_SaveFrame->nChannels); cvCopy(m_SaveFrame,m_snap,NULL); //m_snap->origin=1;//等于0保存倒立图向 //检测人脸并保存测试图片 //AfxMessageBox(CString(pszStr)+CString(itoa(count,chEdit,10))); try{ if(faceDetector.detect_and_draw(m_snap)){ faceImage=faceDetector.getFaceImage(); if(faceImage){ //faceImage->origin=1; //化简图片 if(faceSimplifier.Simplify(faceImage)){ faceGray=faceSimplifier.getFaceImage(); //faceGray->origin=1;//等于0保存倒立图向 cvSaveImage(pszStr,faceGray); //把图像写入文件 //AfxMessageBox(CString(pszStr)+CString(itoa(count,chEdit,10))+"t1"); count++; } }// if(faceImage) } }catch(...) //重要,避免检测不到人脸时的异常终止 { /* AfxMessageBox("保存图片失败!!");*/ return ; } }else //b_Process==0) //此时只可以进行打开摄像头操或者退出 { GetDlgItem(IDC_STATIC_OTHER)->EnableWindow(FALSE);//使控件无效 GetDlgItem(IDC_SAVE_PIC)->EnableWindow(FALSE);//使控件无效 GetDlgItem(IDC_RECOGNISE)->EnableWindow(FALSE);//使控件无效 GetDlgItem(ID_CLOSE_CAPTURE)->EnableWindow(FALSE);//使控件无效 } /************************************************************************/ /* 显示属性 需要随时更新的在次显示,更新慢的在慢定时器facetimer中更新*/ /************************************************************************/ m_fameCount++; // m_vieoProtery=cvGetCaptureProperty(m_Video,CV_CAP_PROP_FRAME_COUNT); itoa(m_fameCount,chEdit,10); SetDlgItemText(IDC_STATIC_FRAME_COUNT,chEdit); break; //定时器2的相应处理部分 case faceTimer: //****************//人脸识别 correct=correct+faceRecognitor.recognize(); totalTest+=5; matchPercent=float(correct)/totalTest; match=int(matchPercent*100); /************************************************************************/ /* 显示属性 需要随时更新的在次显示,更新慢的在慢定时器facetimer中更新*/ /************************************************************************/ //匹配率的更新 itoa(match,chEdit,10); temp=" "+CString(chEdit)+" %"; SetDlgItemText(IDC_STATIC_CORRECT,temp); GetDlgItem(IDC_STATIC_CORRECT)->EnableWindow(TRUE);//使控件有效 //根据匹配判断是否可以进入下一步 if(matchPercent>0.75 && totalTest>20) GetDlgItem(IDC_OK)->EnableWindow(TRUE);//使控件有效 //其他更新 m_vieoProtery=cvGetCaptureProperty(m_Video,CV_CAP_PROP_POS_MSEC); itoa(m_vieoProtery,chEdit,10); SetDlgItemText(IDC_STATIC_OTHER,chEdit); m_vieoProtery=cvGetCaptureProperty(m_Video,CV_CAP_PROP_FRAME_WIDTH); itoa(m_vieoProtery,chEdit,10); SetDlgItemText(IDC_STATIC_Width,chEdit); m_vieoProtery=cvGetCaptureProperty(m_Video,CV_CAP_PROP_FRAME_HEIGHT); itoa(m_vieoProtery,chEdit,10); SetDlgItemText(IDC_STATIC_HEIGHT,chEdit); m_vieoProtery=cvGetCaptureProperty(m_Video,CV_CAP_PROP_FPS); itoa(m_vieoProtery,chEdit,10); SetDlgItemText(IDC_STATIC_FPS,chEdit); break; default: break; } //调用基类时间 CDialog::OnTimer(nIDEvent); }
void Capture::saveFrame(const char* filepath, IplImage* image) { cvSaveImage(filepath, image); }
int main(int argc, const char **argv) { //Variables int degrees,PosRelX,PosRelY; float radians,Dlaser,ODM_ang, ang; int width = 500, height = 500; //Create the size of the map here (in pixel) int centroX = (width / 2); int centroY = (height / 2); playerc_client_t *client; playerc_laser_t *laser; playerc_position2d_t *position2d; CvPoint pt,pt1,pt2; CvScalar cinzaE,preto,cinzaC; char window_name[] = "Map"; IplImage* image = cvCreateImage( cvSize(width,height), 8, 3 ); cvNamedWindow(window_name, 1 ); preto = CV_RGB(0, 0, 0); //for indicating obstacles cinzaE = CV_RGB(92, 92, 92); //To indicate the stranger cinzaC = CV_RGB(150, 150, 150); //To indicate free spaces client = playerc_client_create(NULL, "localhost", 6665); if (playerc_client_connect(client) != 0) return -1; laser = playerc_laser_create(client, 0); if (playerc_laser_subscribe(laser, PLAYERC_OPEN_MODE)) return -1; position2d = playerc_position2d_create(client, 0); if (playerc_position2d_subscribe(position2d, PLAYERC_OPEN_MODE) != 0) { fprintf(stderr, "error: %s\n", playerc_error_str()); return -1; } if (playerc_client_datamode (client, PLAYERC_DATAMODE_PULL) != 0) { fprintf(stderr, "error: %s\n", playerc_error_str()); return -1; } if (playerc_client_set_replace_rule (client, -1, -1, PLAYER_MSGTYPE_DATA, -1, 1) != 0) { fprintf(stderr, "error: %s\n", playerc_error_str()); return -1; } playerc_position2d_enable(position2d, 1); // initialise motors playerc_position2d_set_odom(position2d, 0, 0, 0); // Set odometer to zero cvSet(image, cinzaE,0); //set the image colour to dark pt.x = centroX; // Zero coordinate for x pt.y = centroY; // Zero coordinate for y while(1) { playerc_client_read(client); cvSaveImage("mapa.jpg",image); playerc_client_read(client); for (degrees = 2; degrees <= 360; degrees+=2) { Dlaser = laser->scan[degrees][0]; if (Dlaser < 8) { radians = graus2rad (degrees/2); //Convert the angle of the laser to radians ODM_ang = position2d->pa; //Obtain the angle relative to the robot ang = ((1.5*PI)+radians+ODM_ang); //Converte the angle relative to the world PosRelX = arredonda(position2d->px); //Position x relative to robot PosRelY = arredonda(position2d->py); //Position y relative to robot pt1.y = (centroY-PosRelY); //Co-ordinated global y of the robot pt1.x = (centroX+PosRelX); //Co-ordinated global x of the robot //t converts polar coordinates for rectangular (global) pt.y = (int)(pt1.y-(sin(ang)*Dlaser*10)); pt.x = (int)(pt1.x+(cos(ang)*Dlaser*10)); //The free area draws cvline cvLine(image, pt1,pt,cinzaC, 1,4,0); //marks the object in the map cvLine(image, pt,pt,preto, 1,4,0); //Shows the result of the map to the screen cvShowImage(window_name, image ); cvWaitKey(10); } } } //Disconnect player playerc_laser_unsubscribe(laser); playerc_laser_destroy(laser); playerc_client_disconnect(client); playerc_client_destroy(client); //Destroy the OpenCV window cvReleaseImage cvReleaseImage(&image); cvDestroyWindow(window_name); return 0; }
/* Function: DetectAndPreProcess Purpose: Given a file and a name, try to find the face in the image (largest face) Save this file as the name to disk Notes: Throws std::string if somthing goes wrong Returns: true if face found and data saved, false if no face found */ bool DetectAndPreProcess(const char *image, const char* name) { IplImage* faceImage = NULL; bool bRes = false; // try to open the given file containing the face // the one indicates that we assume the image is color faceImage = cvLoadImage(image,1); if ( image ) { try { FaceDetector* fd = new FaceDetector(faceImage, true); // find the largest face in the image fd->Detect(true); IplImage *tempFace; // did we find the face? if ( !fd->GetFaceVec().empty() ) { // get the face from the face detector IplImage* face = fd->GetFaceVec()[0]; // now perform the rest of the preprocessing on the face tempFace = cvCreateImage(cvSize(face->width, face->height), face->depth, 1); ConvertToGreyScale(face, tempFace); Resize(face, tempFace); // do histogram equalization on the found face cvEqualizeHist(tempFace, tempFace); // try to save it to disk if ( !cvSaveImage( name, tempFace ) ) { std::string err; err = "Error: DetectAndPreProcess could not save "; err += image; err += " as "; err += name; throw err; } bRes = true; } delete fd; } catch (...) { throw; } } else { // could not open image std::string err; err = "Error: DetectAndPreProcess could not open "; err += image; throw err; } return bRes; }
int main(int argc, const char *argv[]) { /* RUN AS DAEMON pid_t pid; if((pid = fork())) return(pid < 0); */ int ret_val = EXIT_FAILURE; int is_tracking = 0; int has_face; //XLIB VAR Init Display* display = XOpenDisplay(NULL); assert(display); //int Screen_Count = XScreenCount(display); int Screen_Count = XScreenCount(display); //For laptop Window* window = (Window *)malloc(sizeof(Window)*Screen_Count); Window ret; Mouse mouse; unsigned int mask; int i; //Capture Init CvCapture* capture = cvCaptureFromCAM(-1); CvMemStorage* mem_storage = cvCreateMemStorage(0); CvHaarClassifierCascade* haarclassifier_face = (CvHaarClassifierCascade*)cvLoad(CASCADE_XML_FILENAME_FACE, 0, 0, 0); CvHaarClassifierCascade* haarclassifier_nose = (CvHaarClassifierCascade*)cvLoad(CASCADE_XML_FILENAME_NOSE, 0, 0, 0); CvHaarClassifierCascade* haarclassifier_eyel = (CvHaarClassifierCascade*)cvLoad(CASCADE_XML_FILENAME_EYEL, 0, 0, 0); CvHaarClassifierCascade* haarclassifier_eyer = (CvHaarClassifierCascade*)cvLoad(CASCADE_XML_FILENAME_EYER, 0, 0, 0); IplImage* image; //cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH, 1280); //cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT, 1024); int res_w = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH); int res_h = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT); //double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS); int counter = 0; printf("Capturing : %dx%d \n", res_w, res_h); cvNamedWindow("Window", CV_WINDOW_NORMAL); CvRect tracking_window; CvPoint nosetip, lefteye, righteye; CvRect face, l_eye, r_eye, nose; TrackObject face_obj; //isophote_init(); while(1) { for(i = 0; i < Screen_Count; i++) { window[i] = XRootWindow(display, i); if(XQueryPointer(display, window[i], &ret, &ret, &mouse.root.x, &mouse.root.y, &mouse.win.x, &mouse.win.y, &mask)) break; } has_face = 0; image = cvQueryFrame(capture); if(is_tracking && CAMSHIFT) { //CAMSHIFT if(CAMSHIFT_MAX_ITER > camshift(image, &face_obj)) continue; has_face = 1; cvEllipseBox(image, face_obj.track_box, CV_RGB(255, 0, 0), 3, CV_AA, 0); tracking_window = face_obj.track_window; tracking_window.y += tracking_window.height*0.2; tracking_window.height *= 0.4; tracking_window.width *= 0.6; } else if(!Haar_Detect(image, haarclassifier_face, mem_storage, &face)) { /* tracking_window.x += tracking_window.width*0.1; tracking_window.width *= 0.8; tracking_window.height *= 0.8; */ cvSetImageROI(image, face); #ifdef DEBUG cvSaveImage("face.png", image, 0); #endif #if CAMSHIFT camshift_init(image, &face_obj); printf("Face Found, Start Tracking...\n"); #endif cvResetImageROI(image); is_tracking = 1; has_face = 1; } //Once face is detected if(has_face) { //Draw Face Area cvRectangle(image, cvPoint(face.x, face.y), cvPoint(face.x+face.width, face.y+face.height), CV_RGB(255, 255, 255), 3, 8, 0); //Estimate eyes and nose (NO ROI) nose = face; //nose nose.y += (1-NOSE_UPPER)*face.height; nose.height *= (NOSE_UPPER-NOSE_LOWER); nose.x += NOSE_LR*face.width; nose.width *= (1-2*NOSE_LR); l_eye = face; l_eye.y += (1-EYE_UPPER)*face.height; l_eye.height *= EYE_UPPER-EYE_LOWER; l_eye.x += EYE_LR*face.width; l_eye.width *= EYE_SIZE; r_eye = l_eye; r_eye.x += (1-2*EYE_LR)*face.width - r_eye.width; //detect nose /* NOSE AREA cvRectangle(image, cvPoint(tracking_window.x, tracking_window.y), cvPoint(tracking_window.x+tracking_window.width, tracking_window.y+tracking_window.height), CV_RGB(0, 255, 0), 3, 8, 0); */ cvSetImageROI(image, nose); if(!Haar_Detect(image, haarclassifier_nose, mem_storage, &tracking_window)) { nosetip = CALC_POINT(tracking_window); cvRectangle(image, cvPoint(nosetip.x-3, nosetip.y-3), cvPoint(nosetip.x+3, nosetip.y+3), CV_RGB(255, 0, 0), 3, 8, 0); nosetip.x += cvGetImageROI(image).x; nosetip.y += cvGetImageROI(image).y; } #ifdef POS_DISPLAY printf("Nose: %d, %d ", nosetip.x, nosetip.y); #endif /* NOSE 2 cvRectangle(image, cvPoint(tracking_window.x, tracking_window.y), cvPoint(tracking_window.x+tracking_window.width, tracking_window.y+tracking_window.height), CV_RGB(0, 255, 0), 3, 8, 0); */ //no nose detected, use kalman //find pupil using isophote curvature //LEFT EYE cvSetImageROI(image, l_eye); #ifdef USE_HAAR_REFINE if(!Haar_Detect(image, haarclassifier_eyel, mem_storage, &tracking_window)) { l_eye.x += tracking_window.x; l_eye.y += tracking_window.y; l_eye.width = tracking_window.width; l_eye.height = tracking_window.height; //printf("eye:%d, %d @ %d, %d\n", l_eye.x, l_eye.y, l_eye.x, l_eye.y); cvSetImageROI(image, l_eye); } #endif cvRectangle(image, cvPoint(0, 0), cvPoint(l_eye.width, l_eye.height), CV_RGB(0, 0, 255), 3, 8, 0); #ifdef DEBUG cvSaveImage("lefteye.png", image, 0); #endif #ifdef CENTERMAP calc_stable_ic(image, &tracking_window); //cvRectangle(image, cvPoint(tracking_window.x, tracking_window.y), // cvPoint(tracking_window.x+tracking_window.width, tracking_window.y+tracking_window.height), // CV_RGB(255, 0, 0), 3, 8, 0); cvCircle(image, CALC_POINT(tracking_window),3, CV_RGB(255, 0, 0), 1, 8, 0); //l_eye.x += CALC_POINT(tracking_window).x - PUPIL_SIZE/2; //l_eye.y += CALC_POINT(tracking_window).y - PUPIL_SIZE/2; lefteye.x = tracking_window.x+PUPIL_SIZE/2+l_eye.x; lefteye.y = tracking_window.y+PUPIL_SIZE/2+l_eye.y; #else cvCircle(image, lefteye = calc_heyecenter(image),3, CV_RGB(255, 0, 0), 1, 8, 0); lefteye.x += l_eye.x; lefteye.y += l_eye.y; #endif #ifdef POS_DISPLAY printf("LEYE: %d, %d ", tracking_window.x+PUPIL_SIZE/2+l_eye.x, tracking_window.y+PUPIL_SIZE/2+l_eye.y); #endif //RIGHT EYE cvSetImageROI(image, r_eye); #ifdef USE_HAAR_REFINE if(!Haar_Detect(image, haarclassifier_eyer, mem_storage, &tracking_window)) { r_eye.x += tracking_window.x; r_eye.y += tracking_window.y; r_eye.width = tracking_window.width; r_eye.height = tracking_window.height; //printf("right eye:%d, %d @ %d, %d\n", r_eye.x, r_eye.y, r_eye.x, r_eye.y); cvSetImageROI(image, r_eye); } #endif cvRectangle(image, cvPoint(0, 0), cvPoint(r_eye.width, r_eye.height), CV_RGB(0, 0, 255), 3, 8, 0); /* counter++; char filename[32]; sprintf(filename, "%d.png", counter); cvSaveImage(filename, image, 0); */ #ifdef DEBUG cvSaveImage("right.png", image, 0); #endif #ifdef CENTERMAP calc_stable_ic(image, &tracking_window); cvCircle(image, CALC_POINT(tracking_window),3, CV_RGB(255, 0, 0), 1, 8, 0); righteye.x = tracking_window.x+PUPIL_SIZE/2+r_eye.x; righteye.y = tracking_window.y+PUPIL_SIZE/2+r_eye.y+300; #else cvCircle(image, righteye = calc_heyecenter(image),3, CV_RGB(255, 0, 0), 1, 8, 0); righteye.x += r_eye.x; righteye.y += r_eye.y; #endif #ifdef POS_DISPLAY printf("REYE: %d, %d \r", tracking_window.x+PUPIL_SIZE/2+r_eye.x, tracking_window.y+PUPIL_SIZE/2+r_eye.y); #endif cvResetImageROI(image); } cvShowImage("Window", image); //printf("%d %d %d %d : %d \r", mouse.root.x, mouse.root.y, mouse.win.x, mouse.win.y, i); fflush(stdout); /* mouse.win.x = X_A0*(lefteye.x-nosetip.x+42)*LREYE_WEIGHT+X_A0*(righteye.x-nosetip.x-52)*(1-LREYE_WEIGHT) +1920*(1-LREYE_WEIGHT); mouse.win.y = Y_A0*(lefteye.y-nosetip.y+74)*LREYE_WEIGHT+Y_A0*(righteye.y-nosetip.y+65)*(1-LREYE_WEIGHT) +1080*(1-LREYE_WEIGHT); //if(abs(mouse.win.x-mouse.root.x) < 10 && abs((mouse.win.y-mouse.root.y) < 10)) { mouse.root.x += mouse.win.x; mouse.root.y += mouse.win.y; mouse.root.x /= 2; mouse.root.y /= 2; XWarpPointer(display, window[i], window[i], 0, 0, 0, 0, mouse.root.x, mouse.root.y); } */ mouse.root.x = 1920+NOSE_AX*nosetip.x; mouse.root.y = -540+NOSE_AY*nosetip.y; mouse.root.x += X_A0*((lefteye.x+righteye.x)/2-nosetip.x); //mouse.root.y += Y_A0*((lefteye.y+righteye.y)/2-nosetip.y-73)+800; XWarpPointer(display, 0, window[i], 0, 0, 0, 0, mouse.root.x, mouse.root.y); printf("%d \r",X_A0*((lefteye.x+righteye.x)/2-nosetip.x)); //printf("\n%d %d %d %d : %d \r", mouse.root.x, mouse.root.y, mouse.win.x, mouse.win.y, i); //Save video //cvCreateVideoWriter if(cvWaitKey(30) == 'q') goto RELEASE_OpenCV_RESOURCE; //goto RELEASE_XLib_RESOURCE; // // // } ret_val = EXIT_SUCCESS; RELEASE_OpenCV_RESOURCE: #if CAMSHIFT camshift_free(&face_obj); #endif cvDestroyWindow("Window"); /* Let OS Handle It ! cvReleaseImage(&image); cvReleaseHaarClassifierCascade(&haarclassifier_eyer); cvReleaseHaarClassifierCascade(&haarclassifier_eyel); cvReleaseHaarClassifierCascade(&haarclassifier_nose); cvReleaseHaarClassifierCascade(&haarclassifier_face); cvReleaseMemStorage(&mem_storage); cvReleaseCapture(&capture); */ RELEASE_XLib_RESOURCE: free(window); XCloseDisplay(display); exit(ret_val); }
void reconstructSurface( const char* dirName, slParams* sl_params, slCalib* sl_calib) { IplImage** proj_gray_codes = NULL; int gray_ncols, gray_nrows; int gray_colshift, gray_rowshift; generateGrayCodes(sl_params->proj_w, sl_params->proj_h, proj_gray_codes, gray_ncols, gray_nrows, gray_colshift, gray_rowshift, sl_params->scan_cols, sl_params->scan_rows); IplImage **cam_gray_codes = new IplImage*[22]; int numImages = getLatestImages(dirName, cam_gray_codes, 22); IplImage* gray_decoded_cols = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_16U, 1); IplImage* gray_decoded_rows = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_16U, 1); IplImage* gray_mask = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 1); decodeGrayCodes(sl_params->proj_w, sl_params->proj_h, cam_gray_codes, gray_decoded_cols, gray_decoded_rows, gray_mask, gray_ncols, gray_nrows, gray_colshift, gray_rowshift, sl_params->thresh); char str[1024], outputDir[1024]; mkdir(sl_params->outdir, 0755); std::string baseNameBuilder(dirName); size_t last_slash_position = baseNameBuilder.find_last_of("/"); baseNameBuilder = baseNameBuilder.substr(last_slash_position+1); const char* baseName = baseNameBuilder.c_str(); //sprintf(outputDir, "3D/%s", baseName); sprintf(outputDir, "%s/%s", sl_params->outdir, baseName); //mkdir("3D", 0755); mkdir(outputDir, 0755); // Display and save the correspondences. if(sl_params->display) displayDecodingResults(gray_decoded_cols, gray_decoded_rows, gray_mask, sl_params); // Reconstruct the point cloud and depth map. //printf("Reconstructing the point cloud and the depth map...\n"); CvMat *points = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *colors = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *depth_map = cvCreateMat(sl_params->cam_h, sl_params->cam_w, CV_32FC1); CvMat *mask = cvCreateMat(1, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *resampled_points = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); reconstructStructuredLight(sl_params, sl_calib, cam_gray_codes[0], gray_decoded_cols, gray_decoded_rows, gray_mask, points, colors, depth_map, mask); // cvSave("points.xml",points); CvMat *points_trans = cvCreateMat(sl_params->cam_h*sl_params->cam_w, 3, CV_32FC1); cvTranspose(points, points_trans); downsamplePoints(sl_params, sl_calib, points_trans, mask, resampled_points, depth_map); double min_val, max_val; cvMinMaxLoc(depth_map, &min_val, &max_val); // Display and save the depth map. if(sl_params->display) displayDepthMap(depth_map, gray_mask, sl_params); //printf("Saving the depth map...\n"); IplImage* depth_map_image = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 1); for(int r=0; r<sl_params->cam_h; r++){ for(int c=0; c<sl_params->cam_w; c++){ char* depth_map_image_data = (char*)(depth_map_image->imageData + r*depth_map_image->widthStep); if(mask->data.fl[sl_params->cam_w*r+c]) depth_map_image_data[c] = 255-int(255*(depth_map->data.fl[sl_params->cam_w*r+c]-sl_params->dist_range[0])/ (sl_params->dist_range[1]-sl_params->dist_range[0])); else depth_map_image_data[c] = 0; } } CvMat* dist_range = cvCreateMat(1, 2, CV_32FC1); cvmSet(dist_range, 0, 0, sl_params->dist_range[0]); cvmSet(dist_range, 0, 1, sl_params->dist_range[1]); sprintf(str, "%s/depth_map.png", outputDir); printf("%s\n",str); cvSaveImage(str, depth_map_image); sprintf(str, "%s/depth_map_range.xml", outputDir); cvSave(str, dist_range); cvReleaseImage(&depth_map_image); cvReleaseMat(&dist_range); // Save the texture map. //printf("Saving the texture map...\n"); sprintf(str, "%s/%s.png", outputDir, baseName); cvSaveImage(str, cam_gray_codes[0]); // Save the point cloud. //printf("Saving the point cloud...\n"); sprintf(str, "%s/%s.ply", outputDir, baseName); //if(savePointsPLY(str, resampled_points, NULL, NULL, mask, sl_params->proj_w, sl_params->proj_h)){ //if(savePointsPLY(str, resampled_points, NULL, NULL, mask, sl_params->cam_w, sl_params->cam_h)){ if(savePointsPLY(str, points, NULL, NULL, mask, sl_params->cam_w, sl_params->cam_h)){ fprintf(stderr, "Saving the reconstructed point cloud failed!\n"); return; } sprintf(str,"%s/proj_intrinsic.xml", outputDir); cvSave(str, sl_calib->proj_intrinsic); sprintf(str,"%s/proj_distortion.xml", outputDir); cvSave(str, sl_calib->proj_distortion); sprintf(str,"%s/cam_intrinsic.xml", outputDir); cvSave(str, sl_calib->cam_intrinsic); sprintf(str,"%s/cam_distortion.xml", outputDir); cvSave(str, sl_calib->cam_distortion); sprintf(str, "%s/cam_extrinsic.xml", outputDir); cvSave(str, sl_calib->cam_extrinsic); sprintf(str, "%s/proj_extrinsic.xml", outputDir); cvSave(str, sl_calib->proj_extrinsic); // Free allocated resources. cvReleaseImage(&gray_decoded_cols); cvReleaseImage(&gray_decoded_rows); cvReleaseImage(&gray_mask); cvReleaseMat(&points); cvReleaseMat(&colors); cvReleaseMat(&depth_map); cvReleaseMat(&mask); cvReleaseMat(&resampled_points); for(int i=0; i<(gray_ncols+gray_nrows+1); i++) cvReleaseImage(&proj_gray_codes[i]); delete[] proj_gray_codes; for(int i=0; i<2*(gray_ncols+gray_nrows+1); i++) cvReleaseImage(&cam_gray_codes[i]); delete[] cam_gray_codes; return; }
int susan_corner_detect(char*filename) { int cornerCount=max_corners; CvPoint2D32f corners[max_corners]; double qualityLevel; double minDistance; IplImage *srcImage = 0, *grayImage = 0, *corners1 = 0, *corners2 = 0; int i; CvScalar color = CV_RGB(255,0,0); //char* filename = argc == 2 ? argv[1] : (char*)"lena.jpg"; //cvNamedWindow( "input", 1 ); // create HighGUI window with name "image" //Load the image to be processed srcImage = cvLoadImage(filename,1); grayImage = cvCreateImage(cvGetSize(srcImage), IPL_DEPTH_8U, 1); //copy the source image to copy image after converting the format cvCvtColor(srcImage, grayImage, CV_BGR2GRAY); //create empty images of same size as the copied images corners1= cvCreateImage(cvGetSize(srcImage), IPL_DEPTH_32F, 1); corners2= cvCreateImage(cvGetSize(srcImage),IPL_DEPTH_32F, 1); cvGoodFeaturesToTrack (grayImage, corners1, corners2, corners, &cornerCount, 0.05, 5, 0); printf("num corners found: %d/n", cornerCount); // draw circles at each corner location in the gray image and print out a list the corners if(cornerCount>0) { for (i=0; i<cornerCount;i++) { cvCircle(srcImage, cvPoint((int)(corners[i].x), (int)(corners[i].y)), 6, color, 1, CV_AA, 0); } } //cvShowImage( "input", srcImage ); //图像缩放 cvNamedWindow("scale_dst",1); CvSize dst_size; double scale=0.5; IplImage*scale_dst=0; dst_size.width = (int)(srcImage->width * scale); dst_size.height = (int)(srcImage->height * scale);//确定新图的矩形框 scale_dst=cvCreateImage(dst_size,srcImage->depth,srcImage->nChannels);//创建图像头 cvResize(srcImage,scale_dst,CV_INTER_LINEAR);//使用双线性差值减小图像。 cvShowImage("scale_dst",scale_dst); cvSaveImage("samples//scale_dst.JPG",scale_dst); cvWaitKey(0); cvDestroyAllWindows(); cvReleaseImage(&srcImage); cvReleaseImage(&grayImage); cvReleaseImage(&corners1); cvReleaseImage(&corners2); cvReleaseImage(&scale_dst); cvWaitKey(0); return 0; }
/* ====================================================================== ** Функция для второй (нижней) камеры. Предназначена для взятия и анализа ** кадра с изображением пола (асфальта) во время полёта точно не над ** маркером. Обработка ранее полученного кадра маркера с помощью текущего. ** ====================================================================== ** Устанавливает: ** //getW - Полученное значение "белого" цвета маркера. ** //getB - Полученное значение "чёрного" цвета маркера. ** //getF - Полученное значение "серого" цвета пола. ** ====================================================================== */ void getFloor (IplImage* floor, char* fname_floor, char* fname_marker) { //--------------------------------Область алгоритма--------------------------------------------------- IplImage* frameflWB = cvCreateImage(cvSize(floor->width,floor->height),8,1); cvConvertImage(floor,frameflWB,0); int countflB[B]={0}; int max=0; for (int y=0; y<frameflWB->height;y++) { uchar *ptr = (uchar*) ( frameflWB->imageData + y * frameflWB->widthStep) ; for (int x=0; x<frameflWB->width;x++) { countflB[ptr[x]]++; } } for (int i=0; i<B;i++) //Поиск серого как максимума. { if (countflB[i]>max) { max=countflB[i]; getF = i; } } max=0; IplImage* marker = cvLoadImage (fname_marker); IplImage* framemWB = cvCreateImage(cvSize(marker->width,marker->height),8,1); cvConvertImage(marker,framemWB,0); int countmB[B]={0}; for (int y=0; y<framemWB->height;y++) { uchar *ptr = (uchar*) ( framemWB->imageData + y * framemWB->widthStep) ; for (int x=0; x<framemWB->width;x++) { countmB[ptr[x]]++; } } for (int b=0; b<getF;b++)//Поиск чёрного как максимума до серого. { if (countmB[b]>max) { max=countmB[b]; getB = b; } } max=0; for (int w=getF+1; w<B;w++)//Поиск белого как максимума после серого. { if (countmB[w]>max) { max=max=countmB[w]; getW = w; } } getW=(getW-getF)/2+getF; getB=(getF-getB)/4+getB; W_B=getW-getB; cvReleaseImage ( &frameflWB); cvReleaseImage ( &framemWB); //---------------------------------------------------------------------------------------------------- #if defined (WRITE_FLOOR) cvSaveImage(fname_floor,floor); #endif return; }
void faceDbCreator(const char filePath[50],const char coordsFilename[100], const int startFile,const int endFile, const int noIterations,const int border){ /**Number of Feature Points used in aligning images.**/ const int noFeaturePoints = 4; const int initialSize = 38; int i,j,k,iteration; /**No of files from DB added for alignment**/ int noFiles = 0; double xr = 0; double yr = 0; int x,y; char filePathCopy[100]; /**Corrds of the standards face with respect to initialSize**/ CvMat *stdCoords = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); double stdCoordsData[] = {5+border,6+border,32+border, 6+border,18+border,15+border, 18+border,25+border}; stdCoords->data.db = stdCoordsData; /**Average Coords of the faces aligned so far**/ double avgData[noFeaturePoints*2]; CvMat *avgMat = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); avgMat->data.db = avgData; /**Coords to which other coordinates are aligned to**/ double testData[noFeaturePoints*2]; CvMat *testMat = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); testMat->data.db = testData; cvCopy(stdCoords,testMat); double tempCoords[noFeaturePoints*2]; /**Coords of all the image in the database**/ CvMat* coords[endFile-startFile+1]; double coordsData[endFile-startFile+1][noFeaturePoints*8]; /**Face DB image file names**/ char fileNames[endFile-startFile+1][100]; char tempFileName[100]; char tempStr[50]; IplImage *img = NULL; IplImage *dst = NULL; FILE* coordsFile = fopen(coordsFilename,"r+"); FILE* t = NULL; if (coordsFile){ for (i=-startFile+1;i<=endFile-startFile;++i){ if(!feof(coordsFile)){ fscanf(coordsFile,"%s %lf %lf %lf %lf %lf %lf %lf %lf",&tempStr, &tempCoords[0],&tempCoords[1],&tempCoords[2], &tempCoords[3],&tempCoords[4],&tempCoords[5], &tempCoords[6],&tempCoords[7]); /**Skip the coords upto startImage**/ if (i>=0){ strcpy(tempFileName,filePath); strcat(tempFileName,tempStr); /**Check whether the file exists**/ if (t=fopen(tempFileName,"r")){ fclose(t); strcpy(fileNames[noFiles],tempFileName); coords[noFiles] = cvCreateMat(noFeaturePoints*2,4, CV_64FC1); faceDbCreatorFillData(coordsData[noFiles],tempCoords,noFeaturePoints); coords[noFiles]->data.db = coordsData[noFiles]; ++noFiles; } } } else{ noFiles = i-1; break; } } fclose(coordsFile); if (!noFiles){ printf("Face DB Creator Error: No File To Process\n"); exit(EXIT_FAILURE); } } else { printf("Face DB Creator Error: Could Not Open Coords File\n"); exit(EXIT_FAILURE); } /**PsuedoInverse**/ CvMat *temp2 = cvCreateMat(4,1,CV_64FC1); double tempData2[4]; temp2->data.db = tempData2; for (iteration=0;iteration<noIterations;++iteration){ cvSetZero(avgMat); for (i=0;i<noFiles;++i){ pseudoInverse(coords[i],testMat,temp2); for (j=0;j<noFeaturePoints;++j){ xr = coordsData[i][j*8]*temp2->data.db[0] -coordsData[i][j*8+4]* temp2->data.db[1]+temp2->data.db[2]; yr = coordsData[i][j*8]*temp2->data.db[1] +coordsData[i][j*8+4]* temp2->data.db[0]+temp2->data.db[3]; coordsData[i][j*8] = xr; coordsData[i][j*8+5] = xr; coordsData[i][j*8+1] = -yr; coordsData[i][j*8+4] = yr; avgData[j*2] += xr; avgData[j*2+1] += yr; } img = cvLoadImage(fileNames[i], CV_LOAD_IMAGE_GRAYSCALE); dst = cvCreateImage(cvSize(initialSize+ 2*border,initialSize+2*border), img->depth,img->nChannels); cvSetZero(dst); double a = temp2->data.db[0]; double b = temp2->data.db[1]; double det = a*a+b*b; double tx = temp2->data.db[2]; double ty = temp2->data.db[3]; /**Transform the image**/ for (j=0;j<dst->height;++j){ for (k=0;k<dst->width;++k){ xr = ((k-tx)*a+(j-ty)*b)/det; yr = ((k-tx)*-b+(j-ty)*a)/det; if ((int)xr>=0 && (int)xr <img->width && (int)yr>=0 && (int)yr<img->height){ *((unsigned char*)(dst->imageData)+j*dst->widthStep+k)= *((unsigned char*)(img->imageData)+ (int)yr*img->widthStep+(int)xr); } } } cvSaveImage(fileNames[i],dst); cvReleaseImage(&img); cvReleaseImage(&dst); } /**Averge of the transformation performed so far**/ for (j=0;j<noFeaturePoints*2;++j){ avgData[j] /= endFile-startFile+1; } /**Perform transformation on the average data**/ CvMat* tempMat = cvCreateMat(noFeaturePoints*2,4, CV_64FC1); double tempMatData[noFeaturePoints*8]; tempMat->data.db = tempMatData; faceDbCreatorFillData(tempMatData,avgData,noFeaturePoints); pseudoInverse(tempMat,stdCoords,temp2); for (j=0;j<noFeaturePoints;++j){ testData[j*2] = avgData[j*2]*temp2->data.db[0]- avgData[j*2+1]*temp2->data.db[1]+ temp2->data.db[2]; testData[j*2+1] = avgData[j*2]*temp2->data.db[1]+ avgData[j*2+1]*temp2->data.db[0]+ temp2->data.db[3]; } cvReleaseMat(&tempMat); } IplImage *img8U,*img64F; CvRect *cropArea; IplImage *finalImage32F = cvCreateImage(cvSize(CROPPED_WIDTH, CROPPED_HEIGHT),IPL_DEPTH_32F,1); IplImage *finalImage8U = cvCreateImage(cvSize(CROPPED_WIDTH, CROPPED_HEIGHT),IPL_DEPTH_8U,1); IplImage *transformImage64F; IplImage *transformImage32F; IplImage *croppedImage32F = cvCreateImage(cvSize(initialSize, initialSize),IPL_DEPTH_32F,1); IplImage *croppedImage64F = cvCreateImage(cvSize(initialSize, initialSize),IPL_DEPTH_64F,1); IplImage* mask = cvCreateImage(cvGetSize (croppedImage64F),IPL_DEPTH_8U,1); maskGenerator(mask); /**Random transformations**/ double scale = 0; double rotate = 0; double translateX = 0; double translateY = 0; tempStr[0] = '_'; tempStr[4] = '.'; tempStr[5] = 'j'; tempStr[6] = 'p'; tempStr[7] = 'g'; tempStr[8] = '\0'; /**Random Number Generator**/ CvRNG rg; for (i=0;i<noFiles;++i){ img8U = cvLoadImage(fileNames[i], CV_LOAD_IMAGE_GRAYSCALE); img64F = cvCreateImage(cvGetSize(img8U), IPL_DEPTH_64F,1); cvConvertScale(img8U,img64F); cvReleaseImage(&img8U); remove(fileNames[i]); xr = coordsData[i][0]-stdCoordsData[0]+ border; yr = coordsData[i][4]-stdCoordsData[1]+ border; cvSetImageROI(img64F,cvRect(cvRound(xr),cvRound(yr),initialSize, initialSize)); cvCopy(img64F,croppedImage64F); /**Creating variations for each image**/ for (j=0;j<NO_VARIATIONS;++j){ lightingCorrection(croppedImage64F,mask); rg = cvRNG(time(0)*1000*(i+20)*(j+30)); cvConvertScale(croppedImage64F,croppedImage32F); cvResize(croppedImage32F,finalImage32F); cvConvertScale(finalImage32F,finalImage8U); tempStr[1] = (j/100)%10+48; tempStr[2] = (j/10)%10+48;tempStr[3]=j%10+48; strncpy(tempFileName,fileNames[i],strlen(fileNames[i])-4); tempFileName[strlen(fileNames[i])-4] ='\0'; strcat(tempFileName,tempStr); cvSaveImage(tempFileName,finalImage8U); switch (cvRandInt(&rg)%3){ /**Scaling**/ case 0: if (cvRandInt(&rg)%2) scale = cvRandReal(&rg)*MAX_SCALE* initialSize/CROPPED_WIDTH; else scale = cvRandReal(&rg)*MIN_SCALE* initialSize/CROPPED_HEIGHT; transformImage64F = cvCreateImage( cvSize(cvRound(initialSize-2*scale), cvRound(initialSize-2*scale)), IPL_DEPTH_64F,1); transformImage32F = cvCreateImage( cvSize(cvRound(initialSize-2*scale), cvRound(initialSize-2*scale)), IPL_DEPTH_32F,1); cvSetImageROI(img64F,cvRect(cvRound(xr+scale),cvRound(yr+scale), cvRound(initialSize-2*scale),cvRound(initialSize-2*scale))); cvCopy(img64F,transformImage64F); cvConvertScale(transformImage64F,transformImage32F); cvResize(transformImage32F,croppedImage32F); cvConvertScale(croppedImage32F,croppedImage64F); cvReleaseImage(&transformImage64F); cvReleaseImage(&transformImage32F); break; /**Rotation**/ case 1: if (cvRandInt(&rg)%2) rotate = cvRandReal(&rg)*MAX_ROTATE; else rotate = cvRandReal(&rg)*MIN_ROTATE; cvResetImageROI(img64F); transformImage64F = cvCreateImage(cvGetSize(img64F), IPL_DEPTH_64F,1); transformRotate(img64F,transformImage64F, &cvPoint2D64f(xr+initialSize/2,yr+initialSize/2),rotate*M_PI/180); cvSetImageROI(transformImage64F, cvRect(xr,yr,initialSize,initialSize)); cvCopy(transformImage64F,croppedImage64F); cvReleaseImage(&transformImage64F); break; default: /**Translation**/ if (cvRandInt(&rg)%2){ if (cvRandInt(&rg)%2){ translateX = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_HEIGHT; } else{ translateX = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_HEIGHT; } } else{ if (cvRandInt(&rg)%2){ translateX = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_HEIGHT; } else{ translateX = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_HEIGHT; } } cvSetImageROI(img64F,cvRect(cvRound(xr+translateX), cvRound(yr+translateY),initialSize,initialSize)); cvCopy(img64F,croppedImage64F); } } cvReleaseImage(&img64F); cvReleaseMat(&coords[i]); } cvReleaseImage(&finalImage8U); cvReleaseImage(&finalImage32F); cvReleaseImage(&croppedImage32F); cvReleaseImage(&croppedImage64F); cvReleaseMat(&stdCoords); cvReleaseMat(&testMat); cvReleaseMat(&avgMat); cvReleaseMat(&temp2); }
bool LegsDetector::saveSnapshot(const char* filename) { if (_debug && !cvSaveImage(filename, _debugImage)) return true; return false; }
int main(int argc, char *argv[]) { IplImage *img = 0, *img2=0 ; int height,width,step,channels; int i,j,k; if(argc<4){ printf("Usage: ./a.out <image-file-name> <watermarker image> <audio file>\n"); exit(0); } // load an image img=cvLoadImage(argv[1]); if(!img){ printf("Could not load image file: %s\n",argv[1]); exit(0); } /// Load Watermark Image img2=cvLoadImage(argv[2]); if(!img2){ printf("Could not load image file: %s\n",argv[2]); exit(0); } height = img->height; width = img->width; step = img->widthStep; channels = img->nChannels; int nchannels = img->nChannels; data = (uchar *)img->imageData; int height2=img2->height; int width2=img2->width; int step2=img2->widthStep; int channels2=img2->nChannels; data2 = (uchar *)img2->imageData; ///// Inserting Watermark insert_watermark(img,img2); //Read an audio file and write into the image FILE *fp=fopen(argv[3],"r"); if(fp==NULL){ printf("Could not load audio file: %s\n",argv[3]); exit(0); } insert_audio(fp,img,img2); //printf("%d row=%d col=%d \n",count1,row_count,col_count); // Extract bit form count of total no. of Bytes which are presence in audio file int a1,a2,a3,a4; a1=count1%256; a2=count1%65536; a3=count1%16777216; a4=count1%4294967296; //printf("%d %d %d %d\n",a1,(a2-a1)>>8,(a3-a2)>>16,(a4-a3)>>24); int size[4]={0}; size[0]=a1; size[1]=(a2-a1)>>8; size[2]=(a3-a2)>>16; size[3]=(a4-a3)>>24; printf("bytes=%d\n", size[0] | (size[1]<<8) | (size[2]<<16) | (size[3]<<24)); int val1,val2; //insert the header of the audio// for(i=0;i<4;i++) { a1=size[i]%4; a2=size[i]%32; a3=size[i]%256; val1=(a2-a1)>>2; val2=(a3-a2)>>5; data[0+i*channels+0]= (data[0+i*channels+0] &252); data[0+i*channels+1]= (data[0+i*channels+1] &248); data[0+i*channels+2]= (data[0+i*channels+2] &248); data[0+i*channels+0]= (data[0+i*channels+0] |a1); data[0+i*channels+1]= (data[0+i*channels+1] |val1); data[0+i*channels+2]= (data[0+i*channels+2] |val2); } cvSaveImage("new_image.png", img ); // create a window cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE); cvMoveWindow("mainWin", 100, 100); // show the image cvShowImage("mainWin", img ); // wait for a key cvWaitKey(0); // release the image cvReleaseImage(&img ); return 0; }
void process_image() { // std::cout << "Checking publish count: " << image_in->publish_count << std::endl; // image_in->lock_atom(); if (image_in->publish_count > 0) { cvSetData(cvimage_in, codec_in->get_raster(), 3*704); cvConvertImage(cvimage_in, cvimage_bgr, CV_CVTIMG_SWAP_RB); // image_in->unlock_atom(); CvSize board_sz = cvSize(12, 12); CvPoint2D32f* corners = new CvPoint2D32f[12*12]; int corner_count = 0; //This function has a memory leak in the current version of opencv! int found = cvFindChessboardCorners(cvimage_bgr, board_sz, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS); IplImage* gray = cvCreateImage(cvSize(cvimage_bgr->width, cvimage_bgr->height), IPL_DEPTH_8U, 1); cvCvtColor(cvimage_bgr, gray, CV_BGR2GRAY); cvFindCornerSubPix(gray, corners, corner_count, cvSize(5, 5), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f )); cvReleaseImage(&gray); if (take_pic && corner_count == 144) { std::stringstream ss; img_cnt++; ss << dir_name << "/Image" << img_cnt << ".jpg"; // std::ofstream imgfile(ss.str().c_str()); // imgfile.write((char*)image_in->jpeg_buffer, image_in->compressed_size); // imgfile.close(); cvSaveImage(ss.str().c_str(), cvimage_bgr); ss.str(""); ss << dir_name << "/Position" << img_cnt << ".txt"; std::ofstream posfile(ss.str().c_str()); observe->lock_atom(); posfile << "P: " << observe->pan_val << std::endl << "T: " << observe->tilt_val << std::endl << "Z: " << observe->lens_zoom_val << std::endl << "F: " << observe->lens_focus_val; observe->unlock_atom(); posfile.close(); take_pic = false; } float maxdiff = 0; for(int c=0; c<12*12; c++) { float diff = sqrt( pow(corners[c].x - last_corners[c].x, 2.0) + pow(corners[c].y - last_corners[c].y, 2.0)); last_corners[c].x = corners[c].x; last_corners[c].y = corners[c].y; if (diff > maxdiff) { maxdiff = diff; } } printf("Max diff: %g\n", maxdiff); cvDrawChessboardCorners(cvimage_bgr, board_sz, corners, corner_count, found); if (undistort) { cvUndistort2(cvimage_bgr, cvimage_undistort, intrinsic_matrix, distortion_coeffs); } else { cvCopy(cvimage_bgr, cvimage_undistort); } CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2); std::stringstream ss; observe->lock_atom(); ss << "P: " << observe->pan_val; ss << " T: " << observe->tilt_val; ss << " Z: " << observe->lens_zoom_val; ss << " F: " << observe->lens_focus_val; observe->unlock_atom(); cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,30), &font, CV_RGB(255,0,0)); ss.str(""); ss << "Found " << corner_count << " corners"; if (centering) { ss << " -- Autocentering"; } cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,60), &font, CV_RGB(255,0,0)); image_out->width = 704; image_out->height = 480; image_out->compression = "raw"; image_out->colorspace = "rgb24"; // codec_out->realloc_raster_if_needed(); cvSetData(cvimage_out, codec_out->get_raster(), 3*image_out->width); cvConvertImage(cvimage_undistort, cvimage_out, CV_CVTIMG_SWAP_RB); codec_out->set_flow_data(); image_out->publish(); CvPoint2D32f COM = cvPoint2D32f(0,0); if (centering && corner_count > 20) { //average corners: for (int i = 0; i < corner_count; i++) { COM.x += corners[i].x / corner_count; COM.y += corners[i].y / corner_count; } if ( (fabs(COM.x - 354.0) > 10) || (fabs(COM.y - 240.0) > 10) ) { float rel_pan,rel_tilt; rel_pan = (COM.x - 354.0) * .001; rel_tilt = -(COM.y - 240.0) * .001; control->pan_val = rel_pan; control->pan_rel = true; control->pan_valid = true; control->tilt_val = rel_tilt; control->tilt_rel = true; control->tilt_valid = true; control->publish(); } } delete[] corners; } else { // image_in->unlock_atom(); } }
int mainStaticMatch() { time_t start,end1,end2,end3,end4,end5; start = clock(); IplImage *img1, *img2; img1 = cvLoadImage("../data/1.JPG"); img2 = cvLoadImage("../data/2.JPG"); end1 = clock(); IpVec ipts1, ipts2; surfDetDes(img1,ipts1,false,4,4,2,0.0008f); surfDetDes(img2,ipts2,false,4,4,2,0.0008f); std::cout << "im1" << std::endl; std::cout << "Size:" << ipts1.size() << std::endl; std::cout << "im2" << std::endl; std::cout << "Size:" << ipts2.size() << std::endl; end2 = clock(); IpPairVec matches; getMatches(ipts1,ipts2,matches); end3 = clock(); for (unsigned int i = 0; i < matches.size(); ++i) { drawPoint(img1,matches[i].first); drawPoint(img2,matches[i].second); const int & w = img1->width; cvLine(img1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(255,255,255),1); cvLine(img2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(255,255,255),1); } std::cout << "Matches: " << matches.size() << std::endl; /* cvNamedWindow("1", CV_WINDOW_AUTOSIZE ); cvNamedWindow("2", CV_WINDOW_AUTOSIZE ); cvShowImage("1", img1); cvShowImage("2", img2); cvWaitKey(0); */ end4 = clock(); // cvSaveImage("result_gpu1.jpg",img1); // cvSaveImage("result_gpu2.jpg",img2); // Stitch two images IplImage *img = cvCreateImage(cvSize(img1->width + img2->width, img1->height),img1->depth,img1->nChannels); cvSetImageROI( img, cvRect( 0, 0, img1->width, img1->height ) ); cvCopy(img1, img); cvSetImageROI( img, cvRect(img1->width,0, img2->width, img2->height) ); cvCopy(img2, img); cvResetImageROI(img); cvSaveImage("result_gpu.jpg",img); end5 = clock(); double dif1 = (double)(end1 - start) / CLOCKS_PER_SEC; double dif2 = (double)(end2 - end1) / CLOCKS_PER_SEC; double dif3 = (double)(end3 - end2) / CLOCKS_PER_SEC; double dif4 = (double)(end4 - end3) / CLOCKS_PER_SEC; double dif5 = (double)(end5 - end4) / CLOCKS_PER_SEC; double total = (double)(end5 - start) / CLOCKS_PER_SEC; std::cout.setf(std::ios::fixed,std::ios::floatfield); std::cout.precision(5); std::cout << "Time(load):" << dif1 << std::endl; std::cout << "Time(descriptor):" << dif2 << std::endl; std::cout << "Time(match):" << dif3 << std::endl; std::cout << "Time(plot):" << dif4 << std::endl; std::cout << "Time(save):" << dif5 << std::endl; std::cout << "Time(Total):" << total << std::endl; return 0; }
void *ControlThread(void *unused) { int i=0; char fileName[30]; NvMediaTime pt1 ={0}, pt2 = {0}; NvU64 ptime1, ptime2; struct timespec; IplImage* imgOrigin; IplImage* imgCanny; // cvCreateImage imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3); imgCanny = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1); int angle, speed; IplImage* imgOrigin; IplImage* imgResult; unsigned char status; unsigned int gain; CarControlInit(); PositionControlOnOff_Write(UNCONTROL); SpeedControlOnOff_Write(1); //speed controller gain set //P-gain gain = SpeedPIDProportional_Read(); // default value = 10, range : 1~50 printf("SpeedPIDProportional_Read() = %d \n", gain); gain = 20; SpeedPIDProportional_Write(gain); //I-gain gain = SpeedPIDIntegral_Read(); // default value = 10, range : 1~50 printf("SpeedPIDIntegral_Read() = %d \n", gain); gain = 20; SpeedPIDIntegral_Write(gain); //D-gain gain = SpeedPIDDifferential_Read(); // default value = 10, range : 1~50 printf("SpeedPIDDefferential_Read() = %d \n", gain); gain = 20; SpeedPIDDifferential_Write(gain); angle = 1460; SteeringServoControl_Write(angle); // cvCreateImage imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3); imgResult = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1); int flag = 1; while(1) { pthread_mutex_lock(&mutex); pthread_cond_wait(&cond, &mutex); GetTime(&pt1); ptime1 = (NvU64)pt1.tv_sec * 1000000000LL + (NvU64)pt1.tv_nsec; Frame2Ipl(imgOrigin); // save image to IplImage structure & resize image from 720x480 to 320x240 pthread_mutex_unlock(&mutex); cvCanny(imgOrigin, imgCanny, 100, 100, 3); sprintf(fileName, "captureImage/imgCanny%d.png", i); cvSaveImage(fileName , imgCanny, 0); Frame2Ipl(imgOrigin, imgResult); // save image to IplImage structure & resize image from 720x480 to 320x240 pthread_mutex_unlock(&mutex); //cvCanny(imgOrigin, imgCanny, 100, 100, 3); sprintf(fileName, "captureImage/imgyuv%d.png", i); cvSaveImage(fileName , imgOrigin, 0); //sprintf(fileName, "captureImage/imgOrigin%d.png", i); //cvSaveImage(fileName, imgOrigin, 0); // TODO : control steering angle based on captured image --------------- //speed set speed = DesireSpeed_Read(); printf("DesireSpeed_Read() = %d \n", speed); //speed = -10; //DesireSpeed_Write(speed); if(flag == 1){ if(greenlight>1000) { printf("right go\n"); Winker_Write(LEFT_ON); usleep(1000000); //Winker_Write(ALL_OFF); angle = 1400; SteeringServoControl_Write(angle); speed = 10; DesireSpeed_Write(speed); speed = DesireSpeed_Read(); printf("DesireSpeed_Read() = %d \n", speed); sleep(1); flag = 0; } else { printf("left go\n"); Winker_Write(RIGHT_ON); usleep(10000); Winker_Write(ALL_OFF); speed = 20; DesireSpeed_Write(speed); usleep(1300000); angle = 1950; SteeringServoControl_Write(angle); usleep(5000000); angle = 1460; SteeringServoControl_Write(angle); usleep(1000000); speed = 0; DesireSpeed_Write(speed); flag = 0; } } // --------------------------------------------------------------------- GetTime(&pt2); ptime2 = (NvU64)pt2.tv_sec * 1000000000LL + (NvU64)pt2.tv_nsec; printf("--------------------------------operation time=%llu.%09llu[s]\n", (ptime2-ptime1)/1000000000LL, (ptime2-ptime1)%1000000000LL); i++; } }
void FindElevatorButtons::fitGridToDetections(int nPixels) { gridParams.clear(); obsDetections.clear(); emg->computeGridParams(svlDetections, gridParams, obsDetections, nPixels, imageName, imageFile); // nPixels = 50; // Adjust top left coord of grid. for (size_t i=0; i<gridParams.size(); i++) { gridParams[i].gx += gridParams[i].dx; gridParams[i].gy += gridParams[i].dy; } // Display grid output by EM algorithm on image and save to file. IplImage* em_image = cvCloneImage(source_image); CvPoint pt1, pt2; int line_width = 4; // Assume no more than 5 grids. vector<CvScalar> colors; colors.push_back(CV_RGB(255, 0, 0)); // red colors.push_back(CV_RGB(255, 153, 18)); // orange colors.push_back(CV_RGB(155, 48, 255)); // purple colors.push_back(CV_RGB(0, 0 ,255)); // blue colors.push_back(CV_RGB(0, 255, 0)); // green // Draw vertical lines. for (size_t i=0; i<gridParams.size(); i++) { cout << "Drawing grid with parameters: " << endl; cout << "gx: " << gridParams[i].gx << endl; cout << "gy: " << gridParams[i].gy << endl; cout << "dx: " << gridParams[i].dx << endl; cout << "dy: " << gridParams[i].dy << endl; cout << "ncols: " << gridParams[i].ncols << endl; cout << "nrows: " << gridParams[i].nrows << endl; // Draw horizontal lines. for (int row=0; row<=gridParams[i].nrows; row++) { for (int col=0; col<gridParams[i].ncols; col++) { pt1.x = int(gridParams[i].gx + gridParams[i].dx*(col-0.75)); pt1.y = int(gridParams[i].gy + gridParams[i].dy*(row-0.5)); pt2.x = int(gridParams[i].gx + gridParams[i].dx*(col+0.25)); pt2.y = pt1.y; if (DEBUG > 1) { cout << "pt1: " << pt1.x << "," << pt1.y << endl; cout << "pt2: " << pt2.x << "," << pt2.y << endl; } if (nPixels > 500000) cvLine(em_image, pt1, pt2, colors[i], line_width, 8); else cvLine(em_image, pt1, pt2, colors[i], line_width, 2); } } // Draw vertical lines. for (int col=0; col<=gridParams[i].ncols; col++) { for (int row=0; row<gridParams[i].nrows; row++) { pt1.x = int(gridParams[i].gx + gridParams[i].dx*(col-0.75)); pt1.y = int(gridParams[i].gy + gridParams[i].dy*(row-0.5)); pt2.x = pt1.x; pt2.y = int(gridParams[i].gy + gridParams[i].dy*(row+0.5)); if (DEBUG > 1) { cout << "pt1: " << pt1.x << "," << pt1.y << endl; cout << "pt2: " << pt2.x << "," << pt2.y << endl; } if (nPixels > 500000) cvLine(em_image, pt1, pt2, colors[i], line_width, 8); else cvLine(em_image, pt1, pt2, colors[i], line_width, 2); } } // display button locations from EM algorithm for (size_t k=0; k<obsDetections[i].size(); k++) { if (obsDetections[i][k].isButton) { if (nPixels > 500000) { cvCircle(em_image, cvPoint(int(obsDetections[i][k].x), int(obsDetections[i][k].y)), 15, colors[i], -1); } else { cvCircle(em_image, cvPoint(int(obsDetections[i][k].x), int(obsDetections[i][k].y)), 3, colors[i], -1); } } } } string debugFile = find_button_pkg_path + "/Data/debug/" + imageName + "_em.jpg"; cvSaveImage(debugFile.c_str(), em_image); }
int main(int argc, char **argv) { IplImage *Iimg = 0, *Oimg = 0; unsigned char *Idata, *Odata; int height, width, step; int i, j; int Wsize, wh, ww; if(argc != 3) { printf("Usage: ./sauvolaBinarirzation <input_image> <output_image>\n"); exit(-1); } Iimg = (IplImage *) cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if(!Iimg) { printf("Sorry! Loading image failed\nTry again\n"); exit(-1); } Oimg = cvCloneImage(Iimg); height = Iimg->height; width = Iimg->width; step = Iimg->widthStep; Idata = (unsigned char *) Iimg->imageData; Odata = (unsigned char *) Oimg->imageData; printf("Image size is %d X %d\nEnter window size: ",width, height); scanf("%d", &Wsize); for(i = 0; i < height; i += Wsize) { if(i + Wsize <= height) wh = Wsize; else wh = height % Wsize; for(j = 0; j < width; j += Wsize) { if(j + Wsize <= width) ww = Wsize; else ww = width % Wsize; sauvola(Idata + i*step + j, Odata + i*step + j, step, wh, ww); } } cvSaveImage(argv[2], Oimg); /* cvNamedWindow("input", CV_WINDOW_AUTOSIZE); cvMoveWindow("input", 100, 100); cvShowImage("input", Iimg); cvNamedWindow("output", 200); cvMoveWindow("output", 500, 100); cvShowImage("output", Oimg); cvWaitKey(0); */ cvReleaseImage(&Iimg); cvReleaseImage(&Oimg); return 0; }
int main(void) { IplImage *src=NULL; if (0){ src = cvCreateImageHeader(cvSize(4,4),IPL_DEPTH_8U,1); char rawdata[4][4] = { {0, 0, 1, 1}, {0, 0, 1, 1}, {0, 2, 2, 2}, {2, 2, 3, 3}}; src->imageData = (char*)(&rawdata); }else{ src = cvLoadImage("test.png",0); } CvGLCM* glcm; // glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_LUT); glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_NONE); cvCreateGLCMDescriptors(glcm, CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST); //#define CV_GLCMDESC_ENTROPY 0 //#define CV_GLCMDESC_ENERGY 1 //#define CV_GLCMDESC_HOMOGENITY 2 //#define CV_GLCMDESC_CONTRAST 3 //#define CV_GLCMDESC_CLUSTERTENDENCY 4 //#define CV_GLCMDESC_CLUSTERSHADE 5 //#define CV_GLCMDESC_CORRELATION 6 //#define CV_GLCMDESC_CORRELATIONINFO1 7 //#define CV_GLCMDESC_CORRELATIONINFO2 8 //#define CV_GLCMDESC_MAXIMUMPROBABILITY 9 for (int step=0; step<4; step++){ for (int i=0; i<10; i++){ printf("%.3f,", cvGetGLCMDescriptor(glcm, step, i)); } printf("\n"); } IplImage *d0org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); cvResize(cvCreateGLCMImage(glcm,0),d0org,CV_INTER_NN); IplImage *d0 = cvCreateImage(cvGetSize(d0org),IPL_DEPTH_8U,1); cvConvertScaleAbs(d0org,d0,255,0); cvNormalize(d0,d0,0,255,CV_MINMAX); cvSaveImage("d0.png",d0); IplImage *d1org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); cvResize(cvCreateGLCMImage(glcm,1),d1org,CV_INTER_NN); IplImage *d1 = cvCreateImage(cvGetSize(d1org),IPL_DEPTH_8U,1); cvConvertScaleAbs(d1org,d1,255,0); cvNormalize(d1,d1,0,255,CV_MINMAX); cvSaveImage("d1.png",d1); IplImage *d2org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); cvResize(cvCreateGLCMImage(glcm,2),d2org,CV_INTER_NN); IplImage *d2 = cvCreateImage(cvGetSize(d2org),IPL_DEPTH_8U,1); cvConvertScaleAbs(d2org,d2,255,0); cvNormalize(d2,d2,0,255,CV_MINMAX); cvSaveImage("d2.png",d2); IplImage *d3org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); cvResize(cvCreateGLCMImage(glcm,3),d3org,CV_INTER_NN); IplImage *d3 = cvCreateImage(cvGetSize(d3org),IPL_DEPTH_8U,1); cvConvertScaleAbs(d3org,d3,255,0); cvNormalize(d3,d3,0,255,CV_MINMAX); cvSaveImage("d3.png",d3); cvNamedWindow("D0",1); cvNamedWindow("D1",1); cvNamedWindow("D2",1); cvNamedWindow("D3",1); cvShowImage("D0",d0); cvShowImage("D1",d1); cvShowImage("D2",d2); cvShowImage("D3",d3); cvWaitKey(0); cvReleaseGLCM(glcm,CV_GLCM_ALL); return 0; }
int main(int argc, char** argv) { FILE *fp; IplImage* frame_in = 0; //聲明IplImage指針 //開新視窗 cvNamedWindow( "ori", 1 ); cvNamedWindow( "DIBR", 1 ); //////////資料輸入 frame_in = cvLoadImage("D:\\3-2(d).bmp"); ///////找尋影像的大小 int height = frame_in->height; int width = frame_in->width; int step = frame_in->widthStep/sizeof(uchar);//step = frame_in->widthStep; printf("h = %d w = %d s = %d\n",height,width,step); /////宣告暫存器 int i,j,k,l; int avg_reg = 0,diff_bom,diff_right,diff_top,diff_li; int reg_A,reg_B,reg_C,reg_D; /////宣告影像空間CV IplImage* frame_DIBR = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3); IplImage* frame_gray = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,1); IplImage* frame_avg = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3); IplImage* frame_reg = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3); IplImage* frame_out = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3); for( i=0 ; i < height ; i++ ){ for( j=0 ; j < width ; j++ ){ //Y[i][j] = frame_gray->imageData[i*width+j]; B[i][j] = frame_in->imageData[i*step+j*3+0]; G[i][j] = frame_in->imageData[i*step+j*3+1]; R[i][j] = frame_in->imageData[i*step+j*3+2]; Y[i][j] = R[i][j]*(0.299) + G[i][j]*(0.587) + B[i][j]*(0.114); /*if (i>=0) Y[i][j]=10; if (i>=12) Y[i][j]=100; if (i>=50) Y[i][j]=200; */ depth_floor[i][j] = (int)floor((double)((back_TH*i/height))); FF[i][j] = 0; }//end j }//end i /////////////////演算法 for( i=0 ; i < height ; i++ ){ for( j=0 ; j < width ; j++ ){ if ((i%4)==0 && (j%4)==0){ avg_R[i][j] = R[i][j]+R[i][j+1]+R[i][j+2]+R[i][j+3]+ R[i+1][j]+R[i+1][j+1]+R[i+1][j+2]+R[i+1][j+3]+ R[i+2][j]+R[i+2][j+1]+R[i+2][j+2]+R[i+2][j+3]+ R[i+3][j]+R[i+3][j+1]+R[i+3][j+2]+R[i+3][j+3]; avg_G[i][j] = G[i][j]+G[i][j+1]+G[i][j+2]+G[i][j+3]+ G[i+1][j]+G[i+1][j+1]+G[i+1][j+2]+G[i+1][j+3]+ G[i+2][j]+G[i+2][j+1]+G[i+2][j+2]+G[i+2][j+3]+ G[i+3][j]+G[i+3][j+1]+G[i+3][j+2]+G[i+3][j+3]; avg_B[i][j] = B[i][j]+B[i][j+1]+B[i][j+2]+B[i][j+3]+ B[i+1][j]+B[i+1][j+1]+B[i+1][j+2]+B[i+1][j+3]+ B[i+2][j]+B[i+2][j+1]+B[i+2][j+2]+B[i+2][j+3]+ B[i+3][j]+B[i+3][j+1]+B[i+3][j+2]+B[i+3][j+3]; for( k=0 ; k < 4 ; k++ ) for( l=0 ; l < 4 ; l++ ){ avg_R[i+k][j+l] = avg_R[i][j]; avg_G[i+k][j+l] = avg_G[i][j]; avg_B[i+k][j+l] = avg_B[i][j]; } } }//end j }//end i for( i=0 ; i < height ; i=i+4 ){ for( j=0 ; j < width ; j=j+4 ){ if ((i%4)==0 && (j%4)==0){ //diff_top = abs(avg[i][j]-avg[i-4][j]); //diff_li = abs(avg[i][j]-avg[i][j-4]); diff_bom = abs(avg_R[i][j]-avg_R[i+4][j])+abs(avg_G[i][j]-avg_G[i+4][j])+abs(avg_B[i][j]-avg_B[i+4][j]); diff_right = abs(avg_R[i][j]-avg_R[i][j+4])+abs(avg_G[i][j]-avg_G[i][j+4])+abs(avg_B[i][j]-avg_B[i][j+4]); // printf("[%d][%d] avg = %d avg_right = %d avg_bom = %d bom = %d right = %d\n",i+k,j+l,avg[i][j],avg[i][j+4],avg[i+4][j],diff_bom,diff_right); // printf("FF = %d FF_R = %d FF_B = %d\n",FF[i][j],FF[i][j+4],FF[i+4][j]); } /*printf("top = %d li = %d\n",diff_top,diff_li); _getch();*/ for( k=0 ; k <= 3 ; k++ ) for( l=0 ; l <= 3 ; l++ ){ // printf("[%d][%d] bom = %d right = %d\n",i+k,j+l,diff_bom,diff_right); /*if (i == 0 ){///第一行皆為0 depth_out[i+k][j+l] = 0; FF[i+k][j+l] = 1; printf("1\n"); }else*/ if (diff_bom<=TH_diff && diff_right<=TH_diff && FF[i+4][j] == 0 && FF[i][j+4] == 0 ){ //下右皆為0,直接給值 if (FF[i][j] == 0){ depth_out[i+k][j+l] = depth_floor[i][j] ; if ((k%4)==3 && (l%4)==3){ FF[i][j] = 255; //printf(" 0 "); //_getch(); } } depth_out[i+k+4][j+l] = depth_out[i][j] ; depth_out[i+k][j+l+4] = depth_out[i][j] ; if ((k%4)==3 && (l%4)==3){ FF[i+4][j] = 255; FF[i][j+4] = 255; // printf(" 2 "); //_getch(); } // }else if (diff_right<=TH_diff && FF[i][j] == 0 && FF[i][j+4] == 255 ){ //原點為0,右邊點為1,右→原點 depth_out[i+k][j+l] = depth_out[i][j+4]; //printf("3"); //printf("FF = %d FF_R = %d\n",FF[i][j],FF[i][j+4]); // _getch(); }else if (diff_bom<=TH_diff && FF[i][j] == 0 && FF[i+4][j] == 255 ){ //原點為0,下邊點為1,下→原點 depth_out[i+k][j+l] = depth_out[i+4][j]; //printf("4\n"); }else if (diff_right<=TH_diff && FF[i][j+4] == 0 ){ //給右邊點 if (FF[i][j+4] == 0) depth_out[i+k][j+l] = depth_floor[i][j]; depth_out[i+k][j+l+4] = depth_out[i][j]; if ((k%4)==3 && (l%4)==3){ FF[i][j+4] = 255; //printf("5\n"); } }else if (diff_bom<=TH_diff && FF[i+4][j] == 0 ){ //給下邊點 if (FF[i][j] == 0) depth_out[i+k][j+l] = depth_floor[i][j]; depth_out[i+k+4][j+l] = depth_out[i][j]; if ((k%4)==3 && (l%4)==3){ FF[i+4][j] = 255; //printf("6\n"); } }else if (depth_out[i+k][j+l] <= 0 && FF[i][j] == 0){ depth_out[i+k][j+l]=depth_floor[i][j]; } } // printf("[%d][%d] bom = %d right = %d",i+k,j+l,diff_bom,diff_right); //_getch(); }//end j }//end i for( i=0 ; i < height ; i++ ){ for( j=0 ; j < width ; j++ ){ ////lowpass reg_A = 0;reg_B = 0; for( k=-1 ; k <= 1 ; k++ ) for( l=-1 ; l <= 1 ; l++ ){ reg_A += depth_out[i+k][j+l]; reg_B++; } reg_C = reg_A/reg_B; if (reg_C<=0) reg_C = 0; else if (reg_C>=255) reg_C=255; depth_out[i][j] = (unsigned char)reg_C; } } /////DIBR for( i=0 ; i < height ; i++ ){ for( j=0 ; j < width ; j++ ){ int motion = (int)((10*depth_out[i][j]/(85+depth_out[i][j]))); frame_out->imageData[i*step+(j)*3+2] = frame_in->imageData[i*step+j*3+2]; frame_out->imageData[i*step+(j)*3+1] = frame_in->imageData[i*step+j*3+1]; frame_out->imageData[i*step+(j)*3+0] = frame_in->imageData[i*step+j*3+0]; if ((j-motion)>0){ frame_out->imageData[i*step+(j-motion)*3+2] = frame_in->imageData[i*step+j*3+2]; } if ((j+motion)<width){ frame_out->imageData[i*step+(j+motion)*3+0] = frame_in->imageData[i*step+j*3+0]; frame_out->imageData[i*step+(j+motion)*3+1] = frame_in->imageData[i*step+j*3+1]; } }//end j }//end i ///////////////輸出 for( i=0 ; i < height ; i++ ) for( j=0 ; j < width ; j++ ){ //if (line[i] == 255){ frame_avg->imageData[i*step+j*3+0] = depth_out[i][j];//B frame_avg->imageData[i*step+j*3+1] = depth_out[i][j];//G frame_avg->imageData[i*step+j*3+2] = depth_out[i][j];//R frame_reg->imageData[i*step+j*3+0] = depth_outB[i][j];//B frame_reg->imageData[i*step+j*3+1] = depth_outB[i][j];//G frame_reg->imageData[i*step+j*3+2] = depth_outB[i][j];//R } //fp=fopen("D:/tt.raw","wb"); //寫圖 //fwrite(avg,height,width,fp); //fclose(fp); cvShowImage( "ori", frame_avg ); cvShowImage( "DIBR", frame_out ); //存影像 cvSaveImage("D:\\3-4.jpg",frame_out); cvWaitKey(0); cvDestroyWindow( "ori" );//銷毀視窗 cvDestroyWindow( "DIBR" );//銷毀視窗 return 0; }
BOOL CFaceProcess::OnSavePic() { // TODO: Add your control notification handler code here KillTimer(timer); if (!m_Video){ // {AfxMessageBox("先打开摄像头"); return false; } // m_GrabFrame=cvQueryFrame(m_Video); // if (!m_GrabFrame) // {AfxMessageBox("截取视屏帧失败,请重试!"); return false; } //static char countsnap='1'; if( !cvGrabFrame( m_Video)) return FALSE; m_GrabFrame = cvRetrieveFrame(m_Video ); if( !m_GrabFrame) return FALSE; if( !m_SaveFrame) m_SaveFrame = cvCreateImage( cvSize(m_GrabFrame->width,m_GrabFrame->height), IPL_DEPTH_8U, m_GrabFrame->nChannels ); if( m_GrabFrame->origin == IPL_ORIGIN_TL ) cvCopy( m_GrabFrame, m_SaveFrame, 0 ); else cvFlip( m_GrabFrame, m_SaveFrame, 0 ); }//if (!m_Video){ static int countsnap=1; CString m_name1=GetImageName(path,countsnap); /*********加上后缀名称,加上存放相对路径*********************/ CString headPath="ImageLab/"; m_name1=headPath+m_name1+".pgm"; const char* pszStr1 = m_name1.GetBuffer(m_name1.GetLength()); //AfxMessageBox(m_name); IplImage * m_snap1=cvCreateImage(cvGetSize(m_SaveFrame),m_SaveFrame->depth,m_SaveFrame->nChannels); cvCopy(m_SaveFrame,m_snap1,NULL); //m_snap1->origin=1;//等于0保存倒立图向 IplImage *faceImage1=0; IplImage *faceGray1=0; //检测人脸 try{ if(faceDetector.detect_and_draw(m_snap1)){ faceImage1=faceDetector.getFaceImage(); // faceImage1->origin=1; //化简图片 if(faceSimplifier.Simplify(faceImage1)){ faceGray1=faceSimplifier.getFaceImage(); //faceGray1->origin=1;//等于0保存倒立图向 cvSaveImage(pszStr1,faceGray1); //把图像写入文件 countsnap++; } } SetTimer(timer,1,NULL) ; }catch(...) { SetTimer(timer,1,NULL) ; AfxMessageBox("保存图片失败--OnSavePic!!"); return false; } if(countsnap>3) GetDlgItem(IDC_OK)->EnableWindow(TRUE);//使控件有效 if(m_snap1) cvReleaseImage(&m_snap1); if(faceImage1) cvReleaseImage(&faceImage1); if(faceGray1) cvReleaseImage(&faceGray1); return true; }
int main( int argc, char** argv ){ CvCapture* capture = NULL; IplImage* src = NULL; IplImage* src2 = NULL; IplImage* gray = NULL; IplImage* output = NULL; CvMat* cornerPoints; CvMat* objectPoints; CvMat pointsNumMat; CvPoint2D32f* points; int pointsNum[1]; ChessBoard chess; int pointsPerScene; int detectedPointsNum; int allPointsFound; int i, j; char key; int camID; char* windowName = "extrinsic calibration"; capture = cvCreateCameraCapture(0); if(!capture) { fprintf(stderr, "ERROR: capture is NULL \n"); return(-1); } chess.dx = CHESS_ROW_DX; chess.dy = CHESS_COL_DY; chess.patternSize.width = CHESS_ROW_NUM; chess.patternSize.height = CHESS_COL_NUM; pointsPerScene = chess.patternSize.width * chess.patternSize.height; cornerPoints = cvCreateMat(pointsPerScene, 2, CV_32F); objectPoints = cvCreateMat(pointsPerScene, 3, CV_32F); pointsNum[0] = pointsPerScene; pointsNumMat = cvMat(1, 1, CV_32S, pointsNum); points = (CvPoint2D32f*)malloc( sizeof(CvPoint2D32f) * pointsPerScene ) ; src = cvQueryFrame(capture); if(src == NULL){ fprintf(stderr, "Could not grab and retrieve frame...\n"); return(-1); } src2 = cvCreateImage(cvSize(src->width, src->height), src->depth, 3); output = cvCreateImage(cvSize(src->width, src->height), src->depth, 3); cvCopy( src, src2, NULL ); gray = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1); cvNamedWindow( windowName, CV_WINDOW_AUTOSIZE ); while( 1 ){ src = cvQueryFrame(capture); if( !src ) { break; } cvCopy( src, src2, NULL ); cvCopy( src2, output, NULL ); cvCvtColor(src2, gray, CV_BGR2GRAY); if( cvFindChessboardCorners( gray, chess.patternSize, points, &detectedPointsNum, CV_CALIB_CB_ADAPTIVE_THRESH ) ){ cvFindCornerSubPix(gray, points, detectedPointsNum, cvSize(5, 5), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1)); allPointsFound = 1; } else { allPointsFound = 0; } cvDrawChessboardCorners( src2, chess.patternSize, points, detectedPointsNum, allPointsFound ); cvShowImage(windowName, src2); key = cvWaitKey( 20 ); if(key == RETURN && allPointsFound ){ store2DCoordinates( cornerPoints, points, chess, 0 ); store3DCoordinates( objectPoints, chess, 0 ); calibrateCamera("intrinsic_param_ref.txt", "extrinsic_param.txt", cornerPoints, objectPoints ); cvSaveImage( "board.jpg", output, 0 ); break; } else if(key == ESCAPE) { break; } } cvDestroyWindow( windowName ); cvReleaseCapture(&capture); free(points); cvReleaseMat(&cornerPoints); cvReleaseMat(&objectPoints); cvReleaseImage(&gray); cvReleaseImage(&src2); return(0); }
int main() { bool salir=FALSE; do { IplImage *im; char eleccion; bool j=TRUE; //Panel printf("Elija la imagen que quiere cargar\n"); printf("Imagenes del programa:\n\n" "A=2_bolas\n" "B=3_bolas\n" "C=4_bolas\n" "D=6_bolas\n" "E=bola_azul\n" "F=bola_roja\n" "G=bolas_cortadas\n" "H=bola_amarilla_blanca\n" "I=bola_amarilla_blanca_+intensidad\n" "J=bola_amarilla1\n" "K=bolas_cortadas_+intensidad\n" "L=bolas_juntas\n" "M=cambio_angulo_iluminacion\n" "N=bolas_pegadas_1\n" "O=bolas_pegadas_2\n" "P=bolas_pegadas_3\n" "Q=bolas_pegadas_4\n" "R=bolas_pegadas_4_+intensidad\n" "S=bolas_pegadas_rotas\n" "T=bolas_pegadas_rotas_2\n" ); printf("X=SALIR\n\n"); while(j==TRUE) { scanf("%c",&eleccion); switch(eleccion) { case 'A':{ char NombreImagen[]="2_bolas.jpg"; im=cvLoadImage(NombreImagen, -1); j=FALSE;} break; case 'B': {char NombreImagen[]="3_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'C': { char NombreImagen[]="4_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'D': { char NombreImagen[]="6_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'E': { char NombreImagen[]="bola_azul.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'F': {char NombreImagen[]="bola_roja.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'G': {char NombreImagen[]="bolas_cortadas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'H': {char NombreImagen[]="bola_amarilla_blanca.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'I': { char NombreImagen[]="bola_amarilla_blanca_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'J': { char NombreImagen[]="bola_amarilla1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'K': { char NombreImagen[]="bolas_cortadas_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'L': { char NombreImagen[]="bolas_juntas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'M': {char NombreImagen[]="cambio_angulo_iluminacion.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'N': {char NombreImagen[]="bolas_pegadas_1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'O': {char NombreImagen[]="bolas_pegadas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'P': {char NombreImagen[]="bolas_pegadas_3.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'Q': {char NombreImagen[]="bolas_pegadas_4.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'R': {char NombreImagen[]="bolas_pegadas_4_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'S': {char NombreImagen[]="bolas_pegadas_rotas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'T': {char NombreImagen[]="bolas_pegadas_rotas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;} break; case 'X': {salir=TRUE; return 0;} break; default:{ printf("Eleccion incorrecta, vuelva a elegir una opcion\n"); j=TRUE; } } } //-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //OBTENER UNA IMAGEN BINARIA SÓLO CON BOLAS AZULES Y OTRA SÓLO CON BOLAS ROJAS IplImage *Imagen_RGB; IplImage *Imagen_umbr; IplImage *Imagen_umbr_2; CvSize Dimensiones; //umbrales de la imagenS y la imagenH. En esta parte no utilizo la función MinMax porque me sale mejor poniendo unos umbrales fijos int umbral1=150; int umbral2=100; //pasamos de BGR a RGB Dimensiones= cvGetSize(im); Imagen_RGB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(im,Imagen_RGB,CV_BGR2RGB); IplImage *ImagenHSV; IplImage *ImagenH,*ImagenS,*ImagenV; //pasamos de RGB a HSV ImagenHSV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3); cvCvtColor(Imagen_RGB,ImagenHSV,CV_RGB2HSV); //Extraemos de la imagen HSV sus tres componentes: H, S y V ImagenH=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenS=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); ImagenV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(ImagenHSV,ImagenH,ImagenS,ImagenV,0); //imagenes binarias para umbralizar Sy H Imagen_umbr=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); Imagen_umbr_2=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); //umbralizacion. cvThreshold(ImagenS,Imagen_umbr,umbral1,255,CV_THRESH_BINARY); cvThreshold(ImagenH,Imagen_umbr_2,umbral2,255,CV_THRESH_BINARY_INV); //Descompongo la imagen en R,G y B IplImage *ImagenR=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenG=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); IplImage *ImagenB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1); cvSplit(Imagen_RGB,ImagenR,ImagenG,ImagenB,0); //A partir de aquí hago una serie de transformaciones morfológicas para separar en imágenes binarias las bolas azules de las rojas. //creo elemento estructurante IplConvKernel* element = 0; const int element_shape =CV_SHAPE_ELLIPSE; int pos=1; element= cvCreateStructuringElementEx(pos*2+1,pos*2+1,pos,pos, element_shape,0); IplImage * temp= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *temp2=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage *resta=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); //con esto obtengo todas las bolas binarizadas cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_TOPHAT,2); //tophat. Me detecta sólo las sombras de las bolas. Mi iluminación iene de arriba. //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); Esto podria aplicarlo si las sombras se crearan en el lado contrario cvAbsDiff (Imagen_umbr, temp ,temp); //resto la original - el tophat cvMorphologyEx(temp,temp,temp, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Con esto obtengo las bolas azules binarizadas cvMorphologyEx(Imagen_umbr_2,temp2,temp2, NULL,CV_MOP_TOPHAT,1); //tophat //cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); cvAbsDiff (Imagen_umbr_2, temp2 ,temp2); //resto la original - el tophat cvMorphologyEx(temp2,temp2,temp2, NULL,CV_MOP_CLOSE,6); //aplico el cierre //Dilato y erosiono el mismo número de veces, para que las bolas me queden mas o menos del mismo tamaño. Además lo hago muchas veces(15), para eliminar los //máximos defectos posibles debido a sombras y cambios y contrastes debido a la iluminación cvDilate(temp2,temp2,element,15); cvErode(temp2,temp2,element,15); cvAbsDiff (temp2, temp ,resta); // Resto la imagen de todas las bolas -la imagen de las bolas azules, dilato mcuhas veces y erosiono muchas veces, //y finalmente solo me quedan las rojas cvDilate(resta,resta,element,15);//dilato cvErode(resta,resta,element,15);//erosiono //Puede que algun contorno no deseado aún permanezca en la imagen binaria. Como aplico las mismas transformaciones morfológicas a las dos imágenes binarias //tendré el mismo defecto en las dos imagenes, así que obtengo una imagen sólo los defectos, y después resto los defectos a las dos imágenes. IplImage * temp3= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * temp4= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_defectos_comunes= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_azules= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); IplImage * Im_bolas_rojas= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1); cvThreshold(temp2,temp3,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas rojas cvThreshold(resta,temp4,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas azules cvAnd(temp3,temp4,Im_defectos_comunes,NULL);//multiplico las dos imagenes, la imagen que obtengo solo aparecen los defectos comunes cvAbsDiff (temp2,Im_defectos_comunes,Im_bolas_azules);//resto los defectos a las bolas azules cvAbsDiff (resta, Im_defectos_comunes ,Im_bolas_rojas);//resto los defectos a las bolas rojas //Ya tengo una imagen binaria sólo con las bolas azules y otra sólo con las rojas. //------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CALCULAR HISTOGRAMA DE LA IMAGEN G //Nueva imagen para dibujar el histograma IplImage *histImage; //Variables para el histograma int hist_size=256; int NivelGris; float NumPixels; //Estructura histograma para guardar la informacion CvHistogram *hist; //Nueva imagen para dibujar el histograma histImage = cvCreateImage(cvSize(256,256), 8, 1); //Estructura histograma para guardar la informacion hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY,NULL, 1); //calcular el histograma. Lo hago con la imagenG, ya que hay más contraste que en la imagen en escala de grises, pero también funcionaria con la imagen de escala de grises cvCalcHist(&ImagenG,hist,0,NULL); cvSetZero(histImage); long Histograma[256]; //dibujo el histograma for(NivelGris=0;NivelGris<hist_size;++NivelGris) { NumPixels=cvQueryHistValue_1D(hist,NivelGris)/15; cvLine(histImage,cvPoint(NivelGris,256),cvPoint(NivelGris,256-NumPixels),CV_RGB(255,255,255),1,8,0); Histograma[NivelGris]=NumPixels;//meto en un array el numero de pixels para cada nivel de gris } cvReleaseHist(&hist); cvSaveImage("Histograma.jpg",histImage,0); //------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ //UMBRALIZACIÓN DE LA IMAGEN G IplImage *imagen_bin; CvMemStorage *Memoria; CvSeq *Contorno, *Primer_Contorno; int Nc; //imagen=cvLoadImage("herramientas.tif",CV_LOAD_IMAGE_GRAYSCALE); imagen_bin=cvCreateImage(cvGetSize(ImagenG),8,1); //imagen_color=cvCreateImage(cvGetSize(ImagenG),8,3); //umbralizar la ImagenG int umbral; umbral=MinMax(Histograma); //Para algunas imagenes, debido a que tienen mas iluminacion o se introducen otros objetos como la mano, en el histograma las gausianas se juntan mucho o solo aparece //una. En este caso la función MinMAx() calcula un umbral muy alto y hace que no se detecten los contornos de algunas bolas, asi que establezco un umbral máximo if(umbral>100) { umbral=100; } cvLine(histImage,cvPoint(umbral,256),cvPoint(umbral,0),CV_RGB(255,255,255),1,8,0);//uDibujo el umbral en el histograma cvThreshold(ImagenG,imagen_bin,umbral,255,CV_THRESH_BINARY_INV);//Binarizo la imagen G cvMorphologyEx(imagen_bin,imagen_bin,imagen_bin, NULL,CV_MOP_CLOSE,6);//Alplico cierre para eliminar los cambios de contraste en el interior de las bolas //debido al reflejo al reflejo de la luz //--------------------------------------------------------------------------------------------------------------------------------------------------------------------- // CÁLCULO DE CONTORNOS, ÁREAS, PERÍMETROS, CAJAS Y CENTROS DE CAJA EN LA IMAGEN G. IplConvKernel* element_2 = 0; const int element_shape_2 =CV_SHAPE_ELLIPSE; int pos_2=1; element_2= cvCreateStructuringElementEx(pos_2*2+1,pos_2*2+1,pos_2,pos_2, element_shape_2,0); Memoria=cvCreateMemStorage(); bool k=FALSE; int n=0; bool pelotas_juntas=FALSE; int i; double *perimetro; double *area; CvBox2D *BoundBox; CvPoint *centro; int bolas_rotas_azules=0; int bolas_rotas_rojas=0; CvScalar s3; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //DETECTAR BOLAS ROTAS IplImage * inv_bolas_azules, *inv_bolas_rojas; CvMemStorage *storage_2; CvMemStorage *storage_3; CvSeq *Contorno_2, *Primer_Contorno_2; CvSeq *Contorno_3, *Primer_Contorno_3; int Nc_2; int Nc_3; double *area_2; double *area_3; CvBox2D *BoundBox_2; CvBox2D *BoundBox_3; CvPoint *centro_2; CvPoint *centro_3; inv_bolas_azules=cvCreateImage(cvGetSize(Im_bolas_azules),8,1); inv_bolas_rojas=cvCreateImage(cvGetSize(Im_bolas_rojas),8,1); cvThreshold(Im_bolas_azules,inv_bolas_azules,128,255,CV_THRESH_BINARY_INV); cvThreshold(Im_bolas_rojas,inv_bolas_rojas,128,255,CV_THRESH_BINARY_INV); storage_2=cvCreateMemStorage(); storage_3=cvCreateMemStorage(); //detecto las bolas rotas azules Nc_2=cvFindContours(inv_bolas_azules,storage_2,&Primer_Contorno_2,sizeof(CvContour),CV_RETR_EXTERNAL); //Encuentro cotornos en la imagen binaria donde sólo aparecen //las bolas azules area_2=(double*)malloc(Nc_2*sizeof(double));//tamaño del vector area BoundBox_2=(CvBox2D*)malloc(Nc_2*sizeof(CvBox2D));//tamaño del vector BoundBox_2 centro_2=(CvPoint*)malloc(Nc_2*sizeof(CvPoint));//tamaño del vector centro_2 for(i=0,Contorno_2=Primer_Contorno_2;Contorno_2!=NULL;Contorno_2=Contorno_2->h_next,++i) { area_2[i]=cvContourArea(Contorno_2,CV_WHOLE_SEQ);//Hallo el area de cada contorno BoundBox_2[i]=cvMinAreaRect2(Contorno_2,NULL);//Hallo las caja de cada contorno } for(i=0;i<Nc_2;++i) { centro_2[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);// Hallo el centro de cada contorno } //Para cada contorno, si su area es menor que 2500, es que se trata de una bola rota for(i=0;i<Nc_2;++i) { if(area_2[i]<2500) { bolas_rotas_azules++; DibujarBox2D(im,BoundBox_2[i]); printf("Bola rota azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Detecto las bolas rotas rojas // Es el mismo procedimiento que para detectar las bolas rotas azules, pero encontrando contornos en la imagen binaria donde solo aparecen las bolas rojas Nc_3=cvFindContours(inv_bolas_rojas,storage_3,&Primer_Contorno_3,sizeof(CvContour),CV_RETR_EXTERNAL); area_3=(double*)malloc(Nc_3*sizeof(double)); BoundBox_3=(CvBox2D*)malloc(Nc_3*sizeof(CvBox2D)); centro_3=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno_3=Primer_Contorno_3;Contorno_3!=NULL;Contorno_3=Contorno_3->h_next,++i) { area_3[i]=cvContourArea(Contorno_3,CV_WHOLE_SEQ); BoundBox_3[i]=cvMinAreaRect2(Contorno_3,NULL); } for(i=0;i<Nc_3;++i) { centro_3[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc_3;++i) { if(area_3[i]<2000) { bolas_rotas_rojas++; DibujarBox2D(im,BoundBox_3[i]); printf("Bola rota roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- //CASO DE LAS BOLAS JUNTAS // En el caso de que haya dos o más bolas juntas, el programa encuentra un contorno con el área de todas las bolas que están juntas. Para solucionar este problema //utilizo el perímetro de los contornos. Elijo un valor umbral para el perímetro en el que me aseguro que se han separado todas las bolas. Así, si existe un perímetro //mayor al umbral, erosiono la imagen hasta que todos los perímetros sean menores que ese umbral. // Para detectar si hay bolas juntas, compruebo si existe algún controno que tenga el área mayor que el de una bola . for(i=0;i<Nc;++i) { if(area[i]>4000)//si existe el área de un contorno mayor al área de una bola { k=TRUE; pelotas_juntas=TRUE; } } while(k==TRUE)// Se mete en este bucle si ha encontrado algun área mayor que el de una bola { k=FALSE; Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL); perimetro=(double*)malloc(Nc*sizeof(double)); area=(double*)malloc(Nc*sizeof(double)); BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D)); centro=(CvPoint*)malloc(Nc*sizeof(CvPoint)); for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i) { area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ); perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1); BoundBox[i]=cvMinAreaRect2(Contorno,NULL); } for(i=0;i<Nc;++i) { centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y); } for(i=0;i<Nc;++i) { if(perimetro[i]>100) { k=TRUE; cvErode(imagen_bin,imagen_bin,element_2,1); } } } //------------------------------------------------------------------------------------------------------------------------------------------------------------ //CONOCER EL NÚMERO DE BOLAS DE CADA COLOR Y SUS RESPECTIVAS POSICIONES int bolas_azules=0; int bolas_rojas=0; int mano=0; double radio=0.0; CvScalar s; CvScalar s2; //Diferenciar bolas en el caso de que no haya bolas juntas if( pelotas_juntas==FALSE) { //Bolas azules for(i=0;i<Nc;++i)//bucle para todods los contornos { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen de bolas azules if(s.val[0]==0)// si es 0,es que puede haber una bola azul o una bola rota azul { if(area[i]>2000 && area[i]<4000)//bola azul { bolas_azules++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que puede haber bola roja o bola rota roja { if(area[i]>2000 && area[i]<4000)//bola roja { bolas_rojas++; radio=sqrt(area[i]/3.14); cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } } if( pelotas_juntas==TRUE) { float radio=30;//Como en el caso de qhe haya bolas juntas erosiono la imagen hasta separlas, no tengo las áreas reales de las bolas, así que //estipulo un radio aproximado . //Bolas azules for(i=0;i<Nc;++i) { s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen con bolas azules if(s.val[0]==0)// si es 0,es que hay bola azul. En este caso no existe la posibilidad de que haya bolas rotas porque al erosionar solo permanecen los contornos //con un perímetro mayor al de una bola. El perímetro de una bola rota siempre será menor { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0x00,0xff,0xff)); bolas_azules++; printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } //Bolas rojas for(i=0;i<Nc;++i)//bucle para todos los contornos { s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas if(s2.val[0]==0)// si es 0,es que hay una bola roja { cvCircle( im, centro[i], cvRound( radio ), CV_RGB(0xff,0x00,0x00)); bolas_rojas++; printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x); } } } printf("bolas azules:%d\n",bolas_azules); printf("bolas rotas azules:%d\n", bolas_rotas_azules); printf("bolas rojas:%d\n",bolas_rojas); printf("bolas rotas rojas:%d\n\n",bolas_rotas_rojas); printf("ORDENAR AL ROBOT\n\n\n"); if(bolas_rotas_azules>0) { printf("METER BOLAS AZULES DEFECTUOSAS EN CAJA DE BOLAS AZULES DEFECTUOSAS\n\n"); } if(bolas_rotas_rojas>0) { printf("METER BOLAS ROJAS DEFECTUOSAS EN CAJA DE BOLAS ROJAS DEFECTUOSAS\n\n"); } if(bolas_azules>0 || bolas_rojas>0) { printf("EMPAQUETAR BOLAS\n\n"); } //---------------------------------------------------------------------------------------------------------------------------------------------------------------------- cvWaitKey(0); //-------------------------------------------------------------------------------------------------------------------------------------------------------------------- //PANTALLA cvNamedWindow("Original", CV_WINDOW_AUTOSIZE); cvShowImage("Original", im ); //cvNamedWindow("imagen_bin", CV_WINDOW_AUTOSIZE); //cvShowImage("imagen_bin", imagen_bin ); //Mostrar el plano de color rojo, verde y azul //cvNamedWindow("R", CV_WINDOW_AUTOSIZE); //cvShowImage("R",ImagenR); //cvNamedWindow("G", CV_WINDOW_AUTOSIZE); //cvShowImage("G",inv_bolas_azules); //cvNamedWindow("B", CV_WINDOW_AUTOSIZE); //cvShowImage("B",inv_bolas_rojas); cvNamedWindow("bolas_azules", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_azules",Im_bolas_azules); cvNamedWindow("bolas_rojas", CV_WINDOW_AUTOSIZE); cvShowImage("bolas_rojas",Im_bolas_rojas); //Mostrar la imagen cvNamedWindow("Histograma de G", CV_WINDOW_AUTOSIZE); cvShowImage("Histograma de G", histImage ); cvWaitKey(0); //--------------------------------------------------------------------------------------------------------------------------------------------------------------- //LIBERAR MEMORIA cvDestroyAllWindows(); cvReleaseImage(&ImagenR); cvReleaseImage(&ImagenG); cvReleaseImage(&ImagenB); cvReleaseImage(&imagen_bin); cvReleaseImage(&histImage); cvReleaseImage(&im); cvReleaseImage(&Imagen_RGB); cvReleaseImage(&Imagen_umbr); cvReleaseImage(&Imagen_umbr_2); cvReleaseImage(&ImagenHSV); cvReleaseImage(&ImagenH); cvReleaseImage(&ImagenS); cvReleaseImage(&ImagenV); cvReleaseImage(&temp); cvReleaseImage(&temp2); cvReleaseImage(&temp3); cvReleaseImage(&temp4); cvReleaseImage(&Im_defectos_comunes); cvReleaseImage(&Im_bolas_azules); cvReleaseImage(&Im_bolas_rojas); cvReleaseImage(&inv_bolas_rojas); cvReleaseImage(&inv_bolas_azules); }while(salir==FALSE); return 0; }
//main function int main() { //declare variables for image IplImage * input1; IplImage * output1; char nameim[100]="../project/dbase/males/1/mvc-001f.jpg"; CvSize s1= {48,48}; output1=cvCreateImage(s1,IPL_DEPTH_8U,3); CvSVM SVM; float a; SVM.load("../project/temp/SVM_hap_neu_sad.txt"); FILE *fp; float feat[18432]; char str[50]="./gabor ../project/temp/temp1.jpg "; IplImage * happy; IplImage * sad; IplImage * neutral; IplImage * temp; CvSize s2= {400,400}; happy=cvCreateImage(s2,IPL_DEPTH_8U,3); sad=cvCreateImage(s2,IPL_DEPTH_8U,3); neutral=cvCreateImage(s2,IPL_DEPTH_8U,3); temp = cvLoadImage("../project/data/Images/happy.jpeg", CV_LOAD_IMAGE_UNCHANGED); cvResize(temp,happy); temp = cvLoadImage("../project/data/Images/sad.jpeg", CV_LOAD_IMAGE_UNCHANGED); cvResize(temp,sad); temp = cvLoadImage("../project/data/Images/neutral.jpeg", CV_LOAD_IMAGE_UNCHANGED); cvResize(temp,neutral); CvCapture *capture=cvCreateCameraCapture(0); if(capture!=NULL) //camera has begun starting itself for(;;) { input1=cvQueryFrame(capture);//take current image in camera and give it to input pointer //get input from camera (input) //input1 = cvLoadImage(nameim, CV_LOAD_IMAGE_UNCHANGED); face_detect_crop(input1,output1); cvSaveImage("../project/temp/temp1.jpg",output1); //_______________________________________________________________// fp=popen(str,"r"); for(int i=0; i<18432; i++) { fscanf(fp,"%f",&feat[i]); //std::cout<<feat[i]<<" "; } pclose(fp); //_______________________________________________________________// cvNamedWindow("Emotion", 1); cv::Mat testmat(1, 18432, CV_32FC1, feat); a=SVM.predict(testmat); if( a<1.1 && a>0.9) { std::cout<<"happy\n"; cvShowImage("Emotion",happy); if( cv::waitKey( 10 ) >= 0 )break; } else if(a>-1.1 && a<-0.9) { std::cout<<"sad\n"; cvShowImage("Emotion",sad); if( cv::waitKey( 10 ) >= 0 )break; } else { std::cout<<"neutral\n"; cvShowImage("Emotion",neutral); if( cv::waitKey( 10 ) >= 0 )break; } cvNamedWindow("O-O", 1); cvShowImage("O-O",input1); if( cv::waitKey( 10 ) >= 0 )break; } cvReleaseCapture( &capture ); return 0; }
int main(int argc, char*argv[]) { int device; cvNamedWindow(camwindow,CV_WINDOW_AUTOSIZE); CvCapture* capture; CvMat* intrinsic ; CvMat* distortion; if (strcmp(argv[1],"-nocalib") == 0 && (argc == 4)){ MODE = 1; H = (CvMat*)cvLoad(argv[2],NULL,NULL,NULL); device = atoi(argv[3]); capture = cvCaptureFromCAM( device) ; Z=28; printf("\nUsage:\nReset: 'r'\nCrop-ROI: 'c'\nZoom: 'u' +/- 'd'\nSave: 's'\n Quit: 'q' | ESC key\n"); } else if ((strcmp(argv[1],"-calib") == 0) && (argc == 7) ) { MODE = 2; board_w = atoi(argv[2]); board_h = atoi(argv[3]); intrinsic = (CvMat*)cvLoad(argv[4],NULL,NULL,NULL); distortion = (CvMat*)cvLoad(argv[5],NULL,NULL,NULL); device = atoi(argv[6]); capture = cvCaptureFromCAM( device) ; printf("\nUsage:\nZoom: 'u' +/- 'd'\nBird-I-View: 't'\n Quit: 'q' | ESC key\n"); }else { printf("Error:Wrong numbers of input parameters\n"); printf("* if -option == -nocalib then only first 2 parameters are required \ \n Homography matrix \ \n usb-camera device driver \ * if -option == -calib then only 5 addition parameter are required \ \n #inner checkerboard corners generally it is 7x7 \ \n Intrinsic (xml) from Camera Calibration \ \n Distortion (xml) from Camera Calibration \ \n usb-camera device driver\n"); return -1; } if (capture == NULL ){ perror("\nFailure to access camera device\n"); return -1; } CvSize board_sz = cvSize( board_w, board_h ); int board_n = board_w*board_h; int frame=0, found = 0,corner_count = 0; CvPoint2D32f corners[board_n]; cvNamedWindow(BVwindow, CV_WINDOW_AUTOSIZE); cvSetMouseCallback(BVwindow, on_mouse, 0); CvMat stub; IplImage *image = cvQueryFrame( capture ); IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel frame++; //Bird Eye View with ROI birdsview_image = cvCreateImage( cvGetSize(image), image->depth,3 ); Mbvimg = cvGetMat(birdsview_image,&stub,NULL,0); while((MODE == 1 )){ // Capture bird's view image every 10 frames if (frame % board_dt == 0) { cvWarpPerspective( image, birdsview_image, H, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS |CV_WARP_INVERSE_MAP , cvScalarAll(0) ); cvShowImage( BVwindow, birdsview_image ); updateImage(); frame=1; } char key = (char) cvWaitKey(2); switch( (char) key ) { case 'r': reset(); if (frame % board_dt != 0) // sychronized updated updateImage(); break; case 'c': BirdEyeROI(); break; case 'u': Z+=0.5; CV_MAT_ELEM(*H,float,2,2) = Z; printf("\n%f",Z); break; case 'd': Z-=0.5; CV_MAT_ELEM(*H,float,2,2) = Z; printf("\n%f",Z); break; case 's': cvSaveImage("birdviewROIimg.bmp",birdsview_image,0); printf("\nImage Saved! Name: birdviewROIimg\n"); break; case 27: case 'q': return 0; break; } cvShowImage(camwindow, image); //overlay points in web cam stream window image = cvQueryFrame(capture); //Get next image frame++; } //Bird Eye View to extract Homography matrix while((MODE == 2 )){ //Skip every board_dt frames to allow user to move chessboard if (frame % board_dt == 0) { found = cvFindChessboardCorners(image,board_sz,corners,&corner_count,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS); if (found){ cvCvtColor( image,gray_image,CV_BGR2GRAY ); cvFindCornerSubPix( gray_image,corners,corner_count,cvSize(11,11),cvSize(-1,-1),cvTermCriteria( CV_TERMCRIT_EPS |CV_TERMCRIT_ITER,30,0.1)); frame=1; } } char key = (char)cvWaitKey(2); switch (key) { case 'u': Z +=0.5; printf("\n%f",Z); break; case 'd': Z -=0.5; printf("\n%f",Z); break; case 't': BirdsIview(intrinsic,distortion,cvCloneImage(image),corners); break; case 27: case 'q': if (H != NULL){ cvSave("H.xml",H,NULL,NULL,cvAttrList(0,0)); printf("\nHomography Saved! Name: H.xml\n"); } return 0; break; } cvDrawChessboardCorners(image, board_sz, corners, corner_count,found); cvShowImage(camwindow, image); //overlay points in web cam stream window frame++; image = cvQueryFrame(capture); //Get next image } return 0; }
int main(int argc, char *argv[]) { int numtasks, rank, rc, dest, source, count, tag=1; int number_of_processes; char inmsg[10], outmsg[10]; // original image IplImage* img = 0; uchar *img_local_data = 0; if(argc<2) { printf("Usage: main <image-file-name>\n\7"); exit(0); } // load an image // Initialize MPI and get important variable. MPI_Status Stat; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &number_of_processes); img=cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR); if(!img) { printf("Could not load image file: %s\n",argv[1]); exit(0); } // Image size int imageSize = img->height*img->widthStep; // Calculates the amount of work of each process int workload = img->height / number_of_processes; int rec_size = workload * img->widthStep; if(rank == 0) { // Array that contains the final imageData char *result = malloc(rec_size); char *extra = 0; int total_work = workload * number_of_processes; // The first process always calculates the first part of the matrix applySmooth(img, result, 0, workload); if (total_work < img->height) { extra = malloc((img->height - total_work) * img->widthStep); applySmooth(img, extra, total_work, img->height); memcpy(img->imageData + rec_size * number_of_processes, extra, (img->height - total_work) * img->widthStep); } // Collect all the processed data MPI_Gather(result, rec_size, MPI_CHAR, img->imageData, rec_size, MPI_CHAR, 0, MPI_COMM_WORLD); cvSaveImage("result/result.jpg", img, 0); cvReleaseImage(&img); } else { // Each process knows where to start int start = rank*workload; char * result = malloc(rec_size); applySmooth(img, result, start, start+workload); // The offset is important to avoid any problems while building the // image MPI_Gather(result, rec_size, MPI_CHAR, NULL , rec_size, MPI_CHAR, 0, MPI_COMM_WORLD); } MPI_Finalize(); return 0; }
void save_image(void* image, char* file){ cvSaveImage( file, (IplImage*)image, NULL); }
/** * Save the image in referred path. */ void ImageSaver::save() { cvSaveImage(path.c_str(), this->image); }